summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/rcu/rcu.h2
-rw-r--r--kernel/rcu/tasks.h36
-rw-r--r--kernel/rcu/tree.c24
-rw-r--r--kernel/rcu/tree.h2
-rw-r--r--kernel/rcu/tree_plugin.h18
5 files changed, 72 insertions, 10 deletions
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index e1089fdf8626..296f9262d119 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -501,6 +501,7 @@ void srcutorture_get_gp_data(enum rcutorture_type test_type,
#endif
#ifdef CONFIG_TINY_RCU
+static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
static inline unsigned long rcu_get_gp_seq(void) { return 0; }
static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
static inline unsigned long
@@ -510,6 +511,7 @@ static inline void show_rcu_gp_kthreads(void) { }
static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
static inline void rcu_fwd_progress_check(unsigned long j) { }
#else /* #ifdef CONFIG_TINY_RCU */
+bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
unsigned long rcu_get_gp_seq(void);
unsigned long rcu_exp_batches_completed(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp);
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 4147857007d7..a9e8ecb10860 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -806,22 +806,38 @@ reset_ipi:
/* Callback function for scheduler to check locked-down task. */
static bool trc_inspect_reader(struct task_struct *t, void *arg)
{
- if (task_curr(t))
- return false; // It is running, so decline to inspect it.
+ int cpu = task_cpu(t);
+ bool in_qs = false;
+
+ if (task_curr(t)) {
+ // If no chance of heavyweight readers, do it the hard way.
+ if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
+ return false;
+
+ // If heavyweight readers are enabled on the remote task,
+ // we can inspect its state despite its currently running.
+ // However, we cannot safely change its state.
+ if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
+ return false; // No quiescent state, do it the hard way.
+ in_qs = true;
+ } else {
+ in_qs = likely(!t->trc_reader_nesting);
+ }
// Mark as checked. Because this is called from the grace-period
// kthread, also remove the task from the holdout list.
t->trc_reader_checked = true;
trc_del_holdout(t);
- // If the task is in a read-side critical section, set up its
- // its state so that it will awaken the grace-period kthread upon
- // exit from that critical section.
- if (unlikely(t->trc_reader_nesting)) {
- atomic_inc(&trc_n_readers_need_end); // One more to wait on.
- WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
- WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
- }
+ if (in_qs)
+ return true; // Already in quiescent state, done!!!
+
+ // The task is in a read-side critical section, so set up its
+ // state so that it will awaken the grace-period kthread upon exit
+ // from that critical section.
+ atomic_inc(&trc_n_readers_need_end); // One more to wait on.
+ WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
+ WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
return true;
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 0bbcbf398169..573fd78a7bca 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -252,6 +252,7 @@ static void rcu_dynticks_eqs_enter(void)
* critical sections, and we also must force ordering with the
* next idle sojourn.
*/
+ rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
// RCU is no longer watching. Better be in extended quiescent state!
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
@@ -278,6 +279,7 @@ static void rcu_dynticks_eqs_exit(void)
*/
seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
// RCU is now watching. Better not be in an extended quiescent state!
+ rcu_dynticks_task_trace_exit(); // After ->dynticks update!
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!(seq & RCU_DYNTICK_CTRL_CTR));
if (seq & RCU_DYNTICK_CTRL_MASK) {
@@ -350,6 +352,28 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
}
/*
+ * Return true if the referenced integer is zero while the specified
+ * CPU remains within a single extended quiescent state.
+ */
+bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
+{
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ int snap;
+
+ // If not quiescent, force back to earlier extended quiescent state.
+ snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
+ RCU_DYNTICK_CTRL_CTR);
+
+ smp_rmb(); // Order ->dynticks and *vp reads.
+ if (READ_ONCE(*vp))
+ return false; // Non-zero, so report failure;
+ smp_rmb(); // Order *vp read and ->dynticks re-read.
+
+ // If still in the same extended quiescent state, we are good!
+ return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
+}
+
+/*
* Set the special (bottom) bit of the specified CPU so that it
* will take special action (such as flushing its TLB) on the
* next exit from an extended quiescent state. Returns true if
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 9dc2ec021da5..29ba79989802 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -454,6 +454,8 @@ static void rcu_bind_gp_kthread(void);
static bool rcu_nohz_full_cpu(void);
static void rcu_dynticks_task_enter(void);
static void rcu_dynticks_task_exit(void);
+static void rcu_dynticks_task_trace_enter(void);
+static void rcu_dynticks_task_trace_exit(void);
/* Forward declarations for tree_stall.h */
static void record_gp_stall_check_time(void);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 37e02812d18f..4cef7e3bca69 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2552,3 +2552,21 @@ static void rcu_dynticks_task_exit(void)
WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
}
+
+/* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
+static void rcu_dynticks_task_trace_enter(void)
+{
+#ifdef CONFIG_TASKS_RCU_TRACE
+ if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
+ current->trc_reader_special.b.need_mb = true;
+#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
+}
+
+/* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
+static void rcu_dynticks_task_trace_exit(void)
+{
+#ifdef CONFIG_TASKS_RCU_TRACE
+ if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
+ current->trc_reader_special.b.need_mb = false;
+#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
+}