summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2023-11-14 14:38:40 -0500
committerPeter Zijlstra <peterz@infradead.org>2023-11-15 09:57:52 +0100
commit194600008d5c43b5a4ba98c4b81633397e34ffad (patch)
tree4421046692e3eac2559f69bf9df605e1bc9db072 /kernel/sched/core.c
parentdd5403869a40595eb953f12e8cd2bb57bb88bb67 (diff)
sched/timers: Explain why idle task schedules out on remote timer enqueue
Trying to avoid that didn't bring much value after testing, add comment about this. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Rafael J. Wysocki <rafael@kernel.org> Link: https://lkml.kernel.org/r/20231114193840.4041-3-frederic@kernel.org
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f5f4495d1768..2de77a6d5ef8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1131,6 +1131,28 @@ static void wake_up_idle_cpu(int cpu)
if (cpu == smp_processor_id())
return;
+ /*
+ * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
+ * part of the idle loop. This forces an exit from the idle loop
+ * and a round trip to schedule(). Now this could be optimized
+ * because a simple new idle loop iteration is enough to
+ * re-evaluate the next tick. Provided some re-ordering of tick
+ * nohz functions that would need to follow TIF_NR_POLLING
+ * clearing:
+ *
+ * - On most archs, a simple fetch_or on ti::flags with a
+ * "0" value would be enough to know if an IPI needs to be sent.
+ *
+ * - x86 needs to perform a last need_resched() check between
+ * monitor and mwait which doesn't take timers into account.
+ * There a dedicated TIF_TIMER flag would be required to
+ * fetch_or here and be checked along with TIF_NEED_RESCHED
+ * before mwait().
+ *
+ * However, remote timer enqueue is not such a frequent event
+ * and testing of the above solutions didn't appear to report
+ * much benefits.
+ */
if (set_nr_and_not_polling(rq->idle))
smp_send_reschedule(cpu);
else