summaryrefslogtreecommitdiff
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-01-30 17:57:35 -0800
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-03-31 13:34:08 -0700
commitf6a12f34a448cc8a624070fd365c29c890138a48 (patch)
treec7fc5c50f1bf0c5af3b6a7d5f2dc61f43a4cfca3 /kernel/rcu/tree_plugin.h
parentd40a4f09a448382961fa9b1a2f7d4f34813f0273 (diff)
rcu: Enforce expedited-GP fairness via funnel wait queue
The current mutex-based funnel-locking approach used by expedited grace periods is subject to severe unfairness. The problem arises when a few tasks, making a path from leaves to root, all wake up before other tasks do. A new task can then follow this path all the way to the root, which needlessly delays tasks whose grace period is done, but who do not happen to acquire the lock quickly enough. This commit avoids this problem by maintaining per-rcu_node wait queues, along with a per-rcu_node counter that tracks the latest grace period sought by an earlier task to visit this node. If that grace period would satisfy the current task, instead of proceeding up the tree, it waits on the current rcu_node structure using a pair of wait queues provided for that purpose. This decouples awakening of old tasks from the arrival of new tasks. If the wakeups prove to be a bottleneck, additional kthreads can be brought to bear for that purpose. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h16
1 files changed, 5 insertions, 11 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 36e94aed38a7..c82c3640493f 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -738,8 +738,6 @@ static void sync_rcu_exp_handler(void *info)
*/
void synchronize_rcu_expedited(void)
{
- struct rcu_node *rnp;
- struct rcu_node *rnp_unlock;
struct rcu_state *rsp = rcu_state_p;
unsigned long s;
@@ -752,8 +750,7 @@ void synchronize_rcu_expedited(void)
s = rcu_exp_gp_seq_snap(rsp);
trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
- rnp_unlock = exp_funnel_lock(rsp, s);
- if (rnp_unlock == NULL)
+ if (exp_funnel_lock(rsp, s))
return; /* Someone else did our work for us. */
rcu_exp_gp_seq_start(rsp);
@@ -763,16 +760,13 @@ void synchronize_rcu_expedited(void)
sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
/* Wait for snapshotted ->blkd_tasks lists to drain. */
- rnp = rcu_get_root(rsp);
synchronize_sched_expedited_wait(rsp);
-
- /* Clean up and exit. */
rcu_exp_gp_seq_end(rsp);
trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
- mutex_unlock(&rnp_unlock->exp_funnel_mutex);
- trace_rcu_exp_funnel_lock(rsp->name, rnp_unlock->level,
- rnp_unlock->grplo, rnp_unlock->grphi,
- TPS("rel"));
+ rcu_exp_wake(rsp, s);
+
+ trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
+ mutex_unlock(&rsp->exp_mutex);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);