diff options
author | Jinjie Ruan <ruanjinjie@huawei.com> | 2025-08-15 11:06:28 +0800 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2025-09-11 15:55:34 +0100 |
commit | 77c1953946391e38c1e5120230f8df14f85219a7 (patch) | |
tree | 566fd2318789aaff1294ffc64804d1a2f8a0b0e4 | |
parent | ee776d68ba47cc8e2022f8c2218f1891a1244197 (diff) |
arm64: entry: Rework arm64_preempt_schedule_irq()
The generic entry code has the form:
| raw_irqentry_exit_cond_resched()
| {
| if (!preempt_count()) {
| ...
| if (need_resched())
| preempt_schedule_irq();
| }
| }
In preparation for moving arm64 over to the generic entry code, align
the structure of the arm64 code with raw_irqentry_exit_cond_resched() from
the generic entry code.
Reviewed-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r-- | arch/arm64/kernel/entry-common.c | 17 |
1 files changed, 10 insertions, 7 deletions
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 93c95fc51cc0..dd7903f371ad 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -294,10 +294,10 @@ DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); #define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION)) #endif -static void __sched arm64_preempt_schedule_irq(void) +static inline bool arm64_preempt_schedule_irq(void) { if (!need_irq_preemption()) - return; + return false; /* * Note: thread_info::preempt_count includes both thread_info::count @@ -305,7 +305,7 @@ static void __sched arm64_preempt_schedule_irq(void) * preempt_count(). */ if (READ_ONCE(current_thread_info()->preempt_count) != 0) - return; + return false; /* * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC @@ -314,7 +314,7 @@ static void __sched arm64_preempt_schedule_irq(void) * DAIF we must have handled an NMI, so skip preemption. */ if (system_uses_irq_prio_masking() && read_sysreg(daif)) - return; + return false; /* * Preempting a task from an IRQ means we leave copies of PSTATE @@ -324,8 +324,10 @@ static void __sched arm64_preempt_schedule_irq(void) * Only allow a task to be preempted once cpufeatures have been * enabled. */ - if (system_capabilities_finalized()) - preempt_schedule_irq(); + if (!system_capabilities_finalized()) + return false; + + return true; } static void do_interrupt_handler(struct pt_regs *regs, @@ -699,7 +701,8 @@ static __always_inline void __el1_irq(struct pt_regs *regs, do_interrupt_handler(regs, handler); irq_exit_rcu(); - arm64_preempt_schedule_irq(); + if (arm64_preempt_schedule_irq()) + preempt_schedule_irq(); exit_to_kernel_mode(regs, state); } |