summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2018-06-28 18:29:41 +0200
committerIngo Molnar <mingo@kernel.org>2018-07-03 09:17:28 +0200
commitd9c0ffcabd6aae7ff1e34e8078354c13bb9f1183 (patch)
tree2cea6fe70aebc13c688665e927ebee6c12100dc4 /kernel/sched/core.c
parent021c91791a5e7e85c567452f1be3e4c2c6cb6063 (diff)
sched/nohz: Skip remote tick on idle task entirely
Some people have reported that the warning in sched_tick_remote() occasionally triggers, especially in favour of some RCU-Torture pressure: WARNING: CPU: 11 PID: 906 at kernel/sched/core.c:3138 sched_tick_remote+0xb6/0xc0 Modules linked in: CPU: 11 PID: 906 Comm: kworker/u32:3 Not tainted 4.18.0-rc2+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1 04/01/2014 Workqueue: events_unbound sched_tick_remote RIP: 0010:sched_tick_remote+0xb6/0xc0 Code: e8 0f 06 b8 00 c6 03 00 fb eb 9d 8b 43 04 85 c0 75 8d 48 8b 83 e0 0a 00 00 48 85 c0 75 81 eb 88 48 89 df e8 bc fe ff ff eb aa <0f> 0b eb +c5 66 0f 1f 44 00 00 bf 17 00 00 00 e8 b6 2e fe ff 0f b6 Call Trace: process_one_work+0x1df/0x3b0 worker_thread+0x44/0x3d0 kthread+0xf3/0x130 ? set_worker_desc+0xb0/0xb0 ? kthread_create_worker_on_cpu+0x70/0x70 ret_from_fork+0x35/0x40 This happens when the remote tick applies on an idle task. Usually the idle_cpu() check avoids that, but it is performed before we lock the runqueue and it is therefore racy. It was intended to be that way in order to prevent from useless runqueue locks since idle task tick callback is a no-op. Now if the racy check slips out of our hands and we end up remotely ticking an idle task, the empty task_tick_idle() is harmless. Still it won't pass the WARN_ON_ONCE() test that ensures rq_clock_task() is not too far from curr->se.exec_start because update_curr_idle() doesn't update the exec_start value like other scheduler policies. Hence the reported false positive. So let's have another check, while the rq is locked, to make sure we don't remote tick on an idle task. The lockless idle_cpu() still applies to avoid unecessary rq lock contention. Reported-by: Jacek Tomaka <jacekt@dug.com> Reported-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reported-by: Anna-Maria Gleixner <anna-maria@linutronix.de> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1530203381-31234-1-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 78d8facba456..22fce36426c0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3113,7 +3113,9 @@ static void sched_tick_remote(struct work_struct *work)
struct tick_work *twork = container_of(dwork, struct tick_work, work);
int cpu = twork->cpu;
struct rq *rq = cpu_rq(cpu);
+ struct task_struct *curr;
struct rq_flags rf;
+ u64 delta;
/*
* Handle the tick only if it appears the remote CPU is running in full
@@ -3122,24 +3124,28 @@ static void sched_tick_remote(struct work_struct *work)
* statistics and checks timeslices in a time-independent way, regardless
* of when exactly it is running.
*/
- if (!idle_cpu(cpu) && tick_nohz_tick_stopped_cpu(cpu)) {
- struct task_struct *curr;
- u64 delta;
+ if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
+ goto out_requeue;
- rq_lock_irq(rq, &rf);
- update_rq_clock(rq);
- curr = rq->curr;
- delta = rq_clock_task(rq) - curr->se.exec_start;
+ rq_lock_irq(rq, &rf);
+ curr = rq->curr;
+ if (is_idle_task(curr))
+ goto out_unlock;
- /*
- * Make sure the next tick runs within a reasonable
- * amount of time.
- */
- WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
- curr->sched_class->task_tick(rq, curr, 0);
- rq_unlock_irq(rq, &rf);
- }
+ update_rq_clock(rq);
+ delta = rq_clock_task(rq) - curr->se.exec_start;
+
+ /*
+ * Make sure the next tick runs within a reasonable
+ * amount of time.
+ */
+ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+ curr->sched_class->task_tick(rq, curr, 0);
+
+out_unlock:
+ rq_unlock_irq(rq, &rf);
+out_requeue:
/*
* Run the remote tick once per second (1Hz). This arbitrary
* frequency is large enough to avoid overload but short enough