summaryrefslogtreecommitdiff
path: root/kernel/entry/common.c
diff options
context:
space:
mode:
authorJinjie Ruan <ruanjinjie@huawei.com>2025-08-15 11:06:30 +0800
committerWill Deacon <will@kernel.org>2025-09-11 15:55:34 +0100
commit3c973c51bfbaf356367afa46b94f9100a7d672f2 (patch)
tree40243e8a59344dbe42436ed3be041d8de8a8b8de /kernel/entry/common.c
parentc74c44c6ae207e196c4c31c4a243abb0811a5974 (diff)
entry: Add arch_irqentry_exit_need_resched() for arm64
Compared to the generic entry code, ARM64 does additional checks when deciding to reschedule on return from interrupt. So introduce arch_irqentry_exit_need_resched() in the need_resched() condition of the generic raw_irqentry_exit_cond_resched(), with a NOP default. This will allow ARM64 to implement the architecture specific version for switching over to the generic entry code. Suggested-by: Ada Couprie Diaz <ada.coupriediaz@arm.com> Suggested-by: Mark Rutland <mark.rutland@arm.com> Suggested-by: Kevin Brodsky <kevin.brodsky@arm.com> Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'kernel/entry/common.c')
-rw-r--r--kernel/entry/common.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index 408d28b5179d..f62e1d1b2063 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -143,6 +143,20 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
return ret;
}
+/**
+ * arch_irqentry_exit_need_resched - Architecture specific need resched function
+ *
+ * Invoked from raw_irqentry_exit_cond_resched() to check if resched is needed.
+ * Defaults return true.
+ *
+ * The main purpose is to permit arch to avoid preemption of a task from an IRQ.
+ */
+static inline bool arch_irqentry_exit_need_resched(void);
+
+#ifndef arch_irqentry_exit_need_resched
+static inline bool arch_irqentry_exit_need_resched(void) { return true; }
+#endif
+
void raw_irqentry_exit_cond_resched(void)
{
if (!preempt_count()) {
@@ -150,7 +164,7 @@ void raw_irqentry_exit_cond_resched(void)
rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack());
- if (need_resched())
+ if (need_resched() && arch_irqentry_exit_need_resched())
preempt_schedule_irq();
}
}