summaryrefslogtreecommitdiff
path: root/kernel/rcu/rcutorture.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-15 15:24:41 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-12 15:38:55 -0700
commitd72193123c81ae6123d108b3be2096f3f13b25a6 (patch)
tree2ec9801c5c3b7c1f9411ba3a1bc6005d1f154198 /kernel/rcu/rcutorture.c
parent2e3e5e55010105f9d4351f68e15dbc43402a7794 (diff)
rcutorture: Correctly handle grace-period sequence wrap
The new ->gq_seq grace-period sequence numbers must be shifted down, which give artifacts when these numbers wrap. This commit therefore enables rcutorture and rcuperf to handle grace-period sequence numbers even if they do wrap. It does this by allowing a special subtraction function to be specified, and this function subtracts before shifting. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/rcutorture.c')
-rw-r--r--kernel/rcu/rcutorture.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 81fb43530d64..0481c7286875 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -265,6 +265,7 @@ struct rcu_torture_ops {
void (*read_delay)(struct torture_random_state *rrsp);
void (*readunlock)(int idx);
unsigned long (*get_gp_seq)(void);
+ unsigned long (*gp_diff)(unsigned long new, unsigned long old);
void (*deferred_free)(struct rcu_torture *p);
void (*sync)(void);
void (*exp_sync)(void);
@@ -400,6 +401,7 @@ static struct rcu_torture_ops rcu_ops = {
.read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock,
.get_gp_seq = rcu_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.deferred_free = rcu_torture_deferred_free,
.sync = synchronize_rcu,
.exp_sync = synchronize_rcu_expedited,
@@ -441,6 +443,7 @@ static struct rcu_torture_ops rcu_bh_ops = {
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
.get_gp_seq = rcu_bh_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.deferred_free = rcu_bh_torture_deferred_free,
.sync = synchronize_rcu_bh,
.exp_sync = synchronize_rcu_bh_expedited,
@@ -646,6 +649,7 @@ static struct rcu_torture_ops sched_ops = {
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
.get_gp_seq = rcu_sched_get_gp_seq,
+ .gp_diff = rcu_seq_diff,
.deferred_free = rcu_sched_torture_deferred_free,
.sync = synchronize_sched,
.exp_sync = synchronize_sched_expedited,
@@ -695,6 +699,13 @@ static struct rcu_torture_ops tasks_ops = {
.name = "tasks"
};
+static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
+{
+ if (!cur_ops->gp_diff)
+ return new - old;
+ return cur_ops->gp_diff(new, old);
+}
+
static bool __maybe_unused torturing_tasks(void)
{
return cur_ops == &tasks_ops;
@@ -1127,9 +1138,7 @@ static void rcu_torture_timer(struct timer_list *unused)
rcu_ftrace_dump(DUMP_ALL);
}
__this_cpu_inc(rcu_torture_count[pipe_count]);
- completed = completed - started;
- if (completed > ULONG_MAX >> 1)
- completed = 0; /* Not all gp_seq have full range. */
+ completed = rcutorture_seq_diff(completed, started);
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
@@ -1205,9 +1214,7 @@ rcu_torture_reader(void *arg)
rcu_ftrace_dump(DUMP_ALL);
}
__this_cpu_inc(rcu_torture_count[pipe_count]);
- completed = completed - started;
- if (completed > ULONG_MAX >> 1)
- completed = 0; /* Not all gp_seq have full range. */
+ completed = rcutorture_seq_diff(completed, started);
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;