summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2019-07-15 06:06:40 -0700
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-08-13 14:38:24 -0700
commit296181d78df9892e08e794f2a9a4d2c38f9acedb (patch)
treeebec54a8019bec324e60c76ac6980f80d4823f0f /kernel/rcu/tree_plugin.h
parent1d5a81c18dc68fc38a52e8dab1992a043a358927 (diff)
rcu/nocb: Reduce __call_rcu_nocb_wake() leaf rcu_node ->lock contention
Currently, __call_rcu_nocb_wake() advances callbacks each time that it detects excessive numbers of callbacks, though only if it succeeds in conditionally acquiring its leaf rcu_node structure's ->lock. Despite the conditional acquisition of ->lock, this does increase contention. This commit therefore avoids advancing callbacks unless there are callbacks in ->cblist whose grace period has completed and advancing has not yet been done during this jiffy. Note that this decision does not take the presence of new callbacks into account. That is because on this code path, there will always be at least one new callback, namely the one we just enqueued. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h13
1 files changed, 10 insertions, 3 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index f6f23a16bd64..f56fb4e97a8e 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1872,6 +1872,8 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
unsigned long flags)
__releases(rdp->nocb_lock)
{
+ unsigned long cur_gp_seq;
+ unsigned long j;
long len;
struct task_struct *t;
@@ -1900,12 +1902,17 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
} else if (len > rdp->qlen_last_fqs_check + qhimark) {
/* ... or if many callbacks queued. */
rdp->qlen_last_fqs_check = len;
- if (rdp->nocb_cb_sleep ||
- !rcu_segcblist_ready_cbs(&rdp->cblist)) {
+ j = jiffies;
+ if (j != rdp->nocb_gp_adv_time &&
+ rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+ rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
rcu_advance_cbs_nowake(rdp->mynode, rdp);
+ rdp->nocb_gp_adv_time = j;
+ }
+ if (rdp->nocb_cb_sleep ||
+ !rcu_segcblist_ready_cbs(&rdp->cblist))
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
TPS("WakeOvfIsDeferred"));
- }
rcu_nocb_unlock_irqrestore(rdp, flags);
} else {
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));