summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-04-15 12:08:22 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-05-27 12:59:01 -0700
commiteab128e8305f2bc4c91406031aab26d86fecced6 (patch)
tree80aec9d6b5d0d3cc999e3a3bd02cf68e211cc6cf /kernel/rcu
parent7d0ae8086b828311250c6afdf800b568ac9bd693 (diff)
rcu: Modulate grace-period slow init to normalize delay
Currently, the larger the gp_init_delay boot parameter, the slower rcutorture will sequence through grace periods. This commit avoids this issue by decreasing the probability of slowing initialization of a given grace period as the degree of slowness increases. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 0628df155970..c34422d92aa9 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -169,7 +169,17 @@ module_param(gp_init_delay, int, 0644);
#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
static const int gp_init_delay;
#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
-#define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */
+
+/*
+ * Number of grace periods between delays, normalized by the duration of
+ * the delay. The longer the the delay, the more the grace periods between
+ * each delay. The reason for this normalization is that it means that,
+ * for non-zero delays, the overall slowdown of grace periods is constant
+ * regardless of the duration of the delay. This arrangement balances
+ * the need for long delays to increase some race probabilities with the
+ * need for fast grace periods to increase other race probabilities.
+ */
+#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
/*
* Track the rcutorture test sequence number and the update version
@@ -1848,7 +1858,8 @@ static int rcu_gp_init(struct rcu_state *rsp)
cond_resched_rcu_qs();
WRITE_ONCE(rsp->gp_activity, jiffies);
if (gp_init_delay > 0 &&
- !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD)))
+ !(rsp->gpnum %
+ (rcu_num_nodes * PER_RCU_NODE_PERIOD * gp_init_delay)))
schedule_timeout_uninterruptible(gp_init_delay);
}