summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-02 09:04:27 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-08-30 16:02:42 -0700
commit82fcecfa81855924cc69f3078113cf63dd6c2964 (patch)
treed0265b21d92811b8ae717047264573049a3a7e74 /kernel
parent65cfe3583b612a22e12fba9a7bbd2d37ca5ad941 (diff)
rcu: Update comments and help text for no more RCU-bh updaters
This commit updates comments and help text to account for the fact that RCU-bh update-side functions are now simple wrappers for their RCU or RCU-sched counterparts. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/Kconfig10
-rw-r--r--kernel/rcu/tree.c17
-rw-r--r--kernel/rcu/update.c2
3 files changed, 15 insertions, 14 deletions
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
index 9210379c0353..a0b7f0103ca9 100644
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
@@ -229,11 +229,11 @@ config RCU_NOCB_CPU
CPUs specified at boot time by the rcu_nocbs parameter.
For each such CPU, a kthread ("rcuox/N") will be created to
invoke callbacks, where the "N" is the CPU being offloaded,
- and where the "x" is "b" for RCU-bh, "p" for RCU-preempt, and
- "s" for RCU-sched. Nothing prevents this kthread from running
- on the specified CPUs, but (1) the kthreads may be preempted
- between each callback, and (2) affinity or cgroups can be used
- to force the kthreads to run on whatever set of CPUs is desired.
+ and where the "p" for RCU-preempt and "s" for RCU-sched.
+ Nothing prevents this kthread from running on the specified
+ CPUs, but (1) the kthreads may be preempted between each
+ callback, and (2) affinity or cgroups can be used to force
+ the kthreads to run on whatever set of CPUs is desired.
Say Y here if you want to help to debug reduced OS jitter.
Say N here if you are unsure.
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index aedf81a0abd8..158c58d47b07 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -565,7 +565,8 @@ unsigned long rcu_sched_get_gp_seq(void)
EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
/*
- * Return the number of RCU-bh GPs completed thus far for debug & stats.
+ * Return the number of RCU GPs completed thus far for debug & stats.
+ * This is a transitional API and will soon be removed.
*/
unsigned long rcu_bh_get_gp_seq(void)
{
@@ -3069,13 +3070,13 @@ void kfree_call_rcu(struct rcu_head *head,
EXPORT_SYMBOL_GPL(kfree_call_rcu);
/*
- * Because a context switch is a grace period for RCU-sched and RCU-bh,
- * any blocking grace-period wait automatically implies a grace period
- * if there is only one CPU online at any point time during execution
- * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to
- * occasionally incorrectly indicate that there are multiple CPUs online
- * when there was in fact only one the whole time, as this just adds
- * some overhead: RCU still operates correctly.
+ * Because a context switch is a grace period for RCU-sched, any blocking
+ * grace-period wait automatically implies a grace period if there
+ * is only one CPU online at any point time during execution of either
+ * synchronize_sched() or synchronize_rcu_bh(). It is OK to occasionally
+ * incorrectly indicate that there are multiple CPUs online when there
+ * was in fact only one the whole time, as this just adds some overhead:
+ * RCU still operates correctly.
*/
static int rcu_blocking_is_gp(void)
{
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 39cb23d22109..9ea87d0aa386 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_held);
*
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
*
- * Note that rcu_read_lock() is disallowed if the CPU is either idle or
+ * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
* offline from an RCU perspective, so check for those as well.
*/
int rcu_read_lock_bh_held(void)