From 6731da9e0ff33d8c5f340705d118a27d3b817d1e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 9 Sep 2020 14:14:34 -0700 Subject: rcu-tasks: Mark variables static The n_heavy_reader_attempts, n_heavy_reader_updates, and n_heavy_reader_ofl_updates variables are not used outside of their translation unit, so this commit marks them static. Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 05d3e1375e4c..978508ec39c1 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -745,9 +745,9 @@ static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); // The number of detections of task quiescent state relying on // heavyweight readers executing explicit memory barriers. -unsigned long n_heavy_reader_attempts; -unsigned long n_heavy_reader_updates; -unsigned long n_heavy_reader_ofl_updates; +static unsigned long n_heavy_reader_attempts; +static unsigned long n_heavy_reader_updates; +static unsigned long n_heavy_reader_ofl_updates; void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, -- cgit From 2393a613d2e3da35bd73ee55d9dca0fb04810955 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 9 Sep 2020 21:36:34 -0700 Subject: rcu-tasks: Use more aggressive polling for RCU Tasks Trace The RCU Tasks Trace grace periods are too slow, as in 40x slower than those of RCU Tasks. This is due to my having assumed a one-second grace period was OK, and thus not having optimized any further. This commit provides the first step in this optimization process, namely by allowing the task_list scan backoff interval to be specified on a per-flavor basis, and then speeding up the scans for RCU Tasks Trace. However, kernels built with CONFIG_TASKS_TRACE_RCU_READ_MB=y continue to use the old slower backoff, consistent with that Kconfig option's goal of reducing IPIs. Link: https://lore.kernel.org/bpf/CAADnVQK_AiX+S_L_A4CQWT11XyveppBbQSQgH_qWGyzu_E8Yeg@mail.gmail.com/ Reported-by: Alexei Starovoitov Cc: Daniel Borkmann Cc: Jiri Olsa Cc: Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 978508ec39c1..ad8c4f3f44d2 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -28,6 +28,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. * @gp_func: This flavor's grace-period-wait function. * @gp_state: Grace period's most recent state transition (debugging). + * @init_fract: Initial backoff sleep interval. * @gp_jiffies: Time of last @gp_state transition. * @gp_start: Most recent grace-period start in jiffies. * @n_gps: Number of grace periods completed since boot. @@ -48,6 +49,7 @@ struct rcu_tasks { struct wait_queue_head cbs_wq; raw_spinlock_t cbs_lock; int gp_state; + int init_fract; unsigned long gp_jiffies; unsigned long gp_start; unsigned long n_gps; @@ -329,8 +331,10 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) */ lastreport = jiffies; - /* Start off with HZ/10 wait and slowly back off to 1 HZ wait. */ - fract = 10; + // Start off with initial wait and slowly back off to 1 HZ wait. + fract = rtp->init_fract; + if (fract > HZ) + fract = HZ; for (;;) { bool firstreport; @@ -553,6 +557,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks); static int __init rcu_spawn_tasks_kthread(void) { + rcu_tasks.init_fract = 10; rcu_tasks.pregp_func = rcu_tasks_pregp_step; rcu_tasks.pertask_func = rcu_tasks_pertask; rcu_tasks.postscan_func = rcu_tasks_postscan; @@ -1163,6 +1168,13 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); static int __init rcu_spawn_tasks_trace_kthread(void) { + if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { + rcu_tasks_trace.init_fract = 10; + } else { + rcu_tasks_trace.init_fract = HZ / 5; + if (rcu_tasks_trace.init_fract <= 0) + rcu_tasks_trace.init_fract = 1; + } rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask; rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; -- cgit From 574de8766f6efa0757f3c7ac15c9eb29a4636861 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 9 Sep 2020 21:51:09 -0700 Subject: rcu-tasks: Selectively enable more RCU Tasks Trace IPIs Many workloads are quite sensitive to IPIs, and such workloads should build kernels with CONFIG_TASKS_TRACE_RCU_READ_MB=y to prevent RCU Tasks Trace from using them under normal conditions. However, other workloads are quite happy to permit more IPIs if doing so makes BPF program updates go faster. This commit therefore sets the default value for the rcupdate.rcu_task_ipi_delay kernel parameter to zero for kernels that have been built with CONFIG_TASKS_TRACE_RCU_READ_MB=n, while retaining the old default of (HZ / 10) for kernels that have indicated an aversion to IPIs via CONFIG_TASKS_TRACE_RCU_READ_MB=y. Link: https://lore.kernel.org/bpf/CAADnVQK_AiX+S_L_A4CQWT11XyveppBbQSQgH_qWGyzu_E8Yeg@mail.gmail.com/ Reported-by: Alexei Starovoitov Cc: Daniel Borkmann Cc: Jiri Olsa Cc: Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index ad8c4f3f44d2..2b4df237b598 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -83,7 +83,7 @@ static struct rcu_tasks rt_name = \ DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); /* Avoid IPIing CPUs early in the grace period. */ -#define RCU_TASK_IPI_DELAY (HZ / 2) +#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; module_param(rcu_task_ipi_delay, int, 0644); @@ -916,7 +916,8 @@ static void trc_wait_for_one_reader(struct task_struct *t, // If currently running, send an IPI, either way, add to list. trc_add_holdout(t, bhp); - if (task_curr(t) && time_after(jiffies, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { + if (task_curr(t) && + time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { // The task is currently running, so try IPIing it. cpu = task_cpu(t); -- cgit From 4fe192dfbe5ba9780df699d411aa4f25ba24cf61 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 9 Sep 2020 22:05:41 -0700 Subject: rcu-tasks: Shorten per-grace-period sleep for RCU Tasks Trace The various RCU tasks flavors currently wait 100 milliseconds between each grace period in order to prevent CPU-bound loops and to favor efficiency over latency. However, RCU Tasks Trace needs to have a grace-period latency of roughly 25 milliseconds, which is completely infeasible given the 100-millisecond per-grace-period sleep. This commit therefore reduces this sleep duration to 5 milliseconds (or one jiffy, whichever is longer) in kernels built with CONFIG_TASKS_TRACE_RCU_READ_MB=y. Link: https://lore.kernel.org/bpf/CAADnVQK_AiX+S_L_A4CQWT11XyveppBbQSQgH_qWGyzu_E8Yeg@mail.gmail.com/ Reported-by: Alexei Starovoitov Cc: Daniel Borkmann Cc: Jiri Olsa Cc: Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 2b4df237b598..a0eaed522277 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -28,6 +28,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. * @gp_func: This flavor's grace-period-wait function. * @gp_state: Grace period's most recent state transition (debugging). + * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. * @init_fract: Initial backoff sleep interval. * @gp_jiffies: Time of last @gp_state transition. * @gp_start: Most recent grace-period start in jiffies. @@ -49,6 +50,7 @@ struct rcu_tasks { struct wait_queue_head cbs_wq; raw_spinlock_t cbs_lock; int gp_state; + int gp_sleep; int init_fract; unsigned long gp_jiffies; unsigned long gp_start; @@ -233,7 +235,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) cond_resched(); } /* Paranoid sleep to keep this from entering a tight loop */ - schedule_timeout_idle(HZ/10); + schedule_timeout_idle(rtp->gp_sleep); set_tasks_gp_state(rtp, RTGS_WAIT_CBS); } @@ -557,6 +559,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks); static int __init rcu_spawn_tasks_kthread(void) { + rcu_tasks.gp_sleep = HZ / 10; rcu_tasks.init_fract = 10; rcu_tasks.pregp_func = rcu_tasks_pregp_step; rcu_tasks.pertask_func = rcu_tasks_pertask; @@ -690,6 +693,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); static int __init rcu_spawn_tasks_rude_kthread(void) { + rcu_tasks_rude.gp_sleep = HZ / 10; rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); return 0; } @@ -1170,8 +1174,12 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); static int __init rcu_spawn_tasks_trace_kthread(void) { if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { + rcu_tasks_trace.gp_sleep = HZ / 10; rcu_tasks_trace.init_fract = 10; } else { + rcu_tasks_trace.gp_sleep = HZ / 200; + if (rcu_tasks_trace.gp_sleep <= 0) + rcu_tasks_trace.gp_sleep = 1; rcu_tasks_trace.init_fract = HZ / 5; if (rcu_tasks_trace.init_fract <= 0) rcu_tasks_trace.init_fract = 1; -- cgit From ba3a86e47232ad9f76160929f33ac9c64e4d0567 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 14 Sep 2020 15:44:37 -0700 Subject: rcu-tasks: Fix grace-period/unlock race in RCU Tasks Trace The more intense grace-period processing resulting from the 50x RCU Tasks Trace grace-period speedups exposed the following race condition: o Task A running on CPU 0 executes rcu_read_lock_trace(), entering a read-side critical section. o When Task A eventually invokes rcu_read_unlock_trace() to exit its read-side critical section, this function notes that the ->trc_reader_special.s flag is zero and and therefore invoke wil set ->trc_reader_nesting to zero using WRITE_ONCE(). But before that happens... o The RCU Tasks Trace grace-period kthread running on some other CPU interrogates Task A, but this fails because this task is currently running. This kthread therefore sends an IPI to CPU 0. o CPU 0 receives the IPI, and thus invokes trc_read_check_handler(). Because Task A has not yet cleared its ->trc_reader_nesting counter, this function sees that Task A is still within its read-side critical section. This function therefore sets the ->trc_reader_nesting.b.need_qs flag, AKA the .need_qs flag. Except that Task A has already checked the .need_qs flag, which is part of the ->trc_reader_special.s flag. The .need_qs flag therefore remains set until Task A's next rcu_read_unlock_trace(). o Task A now invokes synchronize_rcu_tasks_trace(), which cannot start a new grace period until the current grace period completes. And thus cannot return until after that time. But Task A's .need_qs flag is still set, which prevents the current grace period from completing. And because Task A is blocked, it will never execute rcu_read_unlock_trace() until its call to synchronize_rcu_tasks_trace() returns. We are therefore deadlocked. This race is improbable, but 80 hours of rcutorture made it happen twice. The race was possible before the grace-period speedup, but roughly 50x less probable. Several thousand hours of rcutorture would have been necessary to have a reasonable chance of making this happen before this 50x speedup. This commit therefore eliminates this deadlock by setting ->trc_reader_nesting to a large negative number before checking the .need_qs and zeroing (or decrementing with respect to its initial value) ->trc_reader_nesting. For its part, the IPI handler's trc_read_check_handler() function adds a check for negative values, deferring evaluation of the task in this case. Taken together, these changes avoid this deadlock scenario. Fixes: 276c410448db ("rcu-tasks: Split ->trc_reader_need_end") Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Jiri Olsa Cc: Cc: # 5.7.x Signed-off-by: Paul E. McKenney --- include/linux/rcupdate_trace.h | 4 ++++ kernel/rcu/tasks.h | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index d9015aac78c6..a6a6a3acab5a 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -50,6 +50,7 @@ static inline void rcu_read_lock_trace(void) struct task_struct *t = current; WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1); + barrier(); if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) smp_mb(); // Pairs with update-side barriers @@ -72,6 +73,9 @@ static inline void rcu_read_unlock_trace(void) rcu_lock_release(&rcu_trace_lock_map); nesting = READ_ONCE(t->trc_reader_nesting) - 1; + barrier(); // Critical section before disabling. + // Disable IPI-based setting of .need_qs. + WRITE_ONCE(t->trc_reader_nesting, INT_MIN); if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { WRITE_ONCE(t->trc_reader_nesting, nesting); return; // We assume shallow reader nesting. diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index a0eaed522277..e583a2d47374 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -830,6 +830,12 @@ static void trc_read_check_handler(void *t_in) WRITE_ONCE(t->trc_reader_checked, true); goto reset_ipi; } + // If we are racing with an rcu_read_unlock_trace(), try again later. + if (unlikely(t->trc_reader_nesting < 0)) { + if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) + wake_up(&trc_wait); + goto reset_ipi; + } WRITE_ONCE(t->trc_reader_checked, true); // Get here if the task is in a read-side critical section. Set -- cgit From 592031cc10858be4adb10f6c0f2608f6f21824aa Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 15 Sep 2020 14:03:34 -0700 Subject: rcu-tasks: Fix low-probability task_struct leak When rcu_tasks_trace_postgp() function detects an RCU Tasks Trace CPU stall, it adds all tasks blocking the current grace period to a list, invoking get_task_struct() on each to prevent them from being freed while on the list. It then traverses that list, printing stall-warning messages for each one that is still blocking the current grace period and removing it from the list. The list removal invokes the matching put_task_struct(). This of course means that in the admittedly unlikely event that some task executes its outermost rcu_read_unlock_trace() in the meantime, it won't be removed from the list and put_task_struct() won't be executing, resulting in a task_struct leak. This commit therefore makes the list removal and put_task_struct() unconditional, stopping the leak. Note further that this bug can occur only after an RCU Tasks Trace CPU stall warning, which by default only happens after a grace period has extended for ten minutes (yes, not a typo, minutes). Fixes: 4593e772b502 ("rcu-tasks: Add stall warnings for RCU Tasks Trace") Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Jiri Olsa Cc: Cc: # 5.7.x Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index e583a2d47374..fcd9c25f1140 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1092,11 +1092,11 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) if (READ_ONCE(t->trc_reader_special.b.need_qs)) trc_add_holdout(t, &holdouts); firstreport = true; - list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) - if (READ_ONCE(t->trc_reader_special.b.need_qs)) { + list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) { + if (READ_ONCE(t->trc_reader_special.b.need_qs)) show_stalled_task_trace(t, &firstreport); - trc_del_holdout(t); - } + trc_del_holdout(t); // Release task_struct reference. + } if (firstreport) pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n"); show_stalled_ipi_trace(); -- cgit From f747c7e15d7bc71a967a94ceda686cf2460b69e8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 15 Sep 2020 14:27:38 -0700 Subject: rcu-tasks: Enclose task-list scan in rcu_read_lock() The rcu_tasks_trace_postgp() function uses for_each_process_thread() to scan the task list without the benefit of RCU read-side protection, which can result in use-after-free errors on task_struct structures. This error was missed because the TRACE01 rcutorture scenario enables lockdep, but also builds with CONFIG_PREEMPT_NONE=y. In this situation, preemption is disabled everywhere, so lockdep thinks everywhere can be a legitimate RCU reader. This commit therefore adds the needed rcu_read_lock() and rcu_read_unlock(). Note that this bug can occur only after an RCU Tasks Trace CPU stall warning, which by default only happens after a grace period has extended for ten minutes (yes, not a typo, minutes). Fixes: 4593e772b502 ("rcu-tasks: Add stall warnings for RCU Tasks Trace") Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Jiri Olsa Cc: Cc: # 5.7.x Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index fcd9c25f1140..d5d9f2d03e8a 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1088,9 +1088,11 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) if (ret) break; // Count reached zero. // Stall warning time, so make a list of the offenders. + rcu_read_lock(); for_each_process_thread(g, t) if (READ_ONCE(t->trc_reader_special.b.need_qs)) trc_add_holdout(t, &holdouts); + rcu_read_unlock(); firstreport = true; list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) { if (READ_ONCE(t->trc_reader_special.b.need_qs)) -- cgit