summaryrefslogtreecommitdiff
path: root/kernel/rcu/rcutorture.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/rcutorture.c')
-rw-r--r--kernel/rcu/rcutorture.c581
1 files changed, 419 insertions, 162 deletions
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 7567ca8e743c..d26fb1d33ed9 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -51,14 +51,15 @@
#include "rcu.h"
+MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
/* Bits for ->extendables field, extendables param, and related definitions. */
#define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */
-#define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1)
-#define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */
-#define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2)
+#define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1)
+#define RCUTORTURE_RDR_SHIFT_2 16 /* Put SRCU index in upper bits. */
+#define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2)
#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
@@ -70,6 +71,9 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@
#define RCUTORTURE_MAX_EXTEND \
(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
+#define RCUTORTURE_RDR_ALLBITS \
+ (RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \
+ RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2)
#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
/* Must be power of two minus one. */
#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
@@ -88,12 +92,20 @@ torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait
torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
torture_param(bool, gp_cond_exp_full, false,
"Use conditional/async full-stateexpedited GP wait primitives");
+torture_param(int, gp_cond_wi, 16 * USEC_PER_SEC / HZ,
+ "Wait interval for normal conditional grace periods, us (default 16 jiffies)");
+torture_param(int, gp_cond_wi_exp, 128,
+ "Wait interval for expedited conditional grace periods, us (default 128 us)");
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
+torture_param(int, gp_poll_wi, 16 * USEC_PER_SEC / HZ,
+ "Wait interval for normal polled grace periods, us (default 16 jiffies)");
+torture_param(int, gp_poll_wi_exp, 128,
+ "Wait interval for expedited polled grace periods, us (default 128 us)");
torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
@@ -105,8 +117,11 @@ torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
+torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable");
+torture_param(int, preempt_interval, MSEC_PER_SEC, "Interval between preemptions (ms)");
torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
+torture_param(int, reader_flavor, SRCU_READ_FLAVOR_NORMAL, "Reader flavors to use, one per bit.");
torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
@@ -114,6 +129,7 @@ torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s
torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
+torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one.");
torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of seconds to run/halt test");
@@ -143,6 +159,7 @@ static struct task_struct **fwd_prog_tasks;
static struct task_struct **barrier_cbs_tasks;
static struct task_struct *barrier_task;
static struct task_struct *read_exit_task;
+static struct task_struct *preempt_task;
#define RCU_TORTURE_PIPE_LEN 10
@@ -253,10 +270,13 @@ struct rt_read_seg {
unsigned long rt_delay_ms;
unsigned long rt_delay_us;
bool rt_preempted;
+ int rt_cpu;
+ int rt_end_cpu;
};
static int err_segs_recorded;
static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
static int rt_read_nsegs;
+static int rt_read_preempted;
static const char *rcu_torture_writer_state_getname(void)
{
@@ -347,7 +367,8 @@ struct rcu_torture_ops {
void (*read_delay)(struct torture_random_state *rrsp,
struct rt_read_seg *rtrsp);
void (*readunlock)(int idx);
- int (*readlock_held)(void);
+ int (*readlock_held)(void); // lockdep.
+ int (*readlock_nesting)(void); // actual nesting, if available, -1 if not.
unsigned long (*get_gp_seq)(void);
unsigned long (*gp_diff)(unsigned long new, unsigned long old);
void (*deferred_free)(struct rcu_torture *p);
@@ -365,8 +386,6 @@ struct rcu_torture_ops {
bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
unsigned long (*get_gp_state)(void);
void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
- unsigned long (*get_gp_completed)(void);
- void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp);
unsigned long (*start_gp_poll)(void);
void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
bool (*poll_gp_state)(unsigned long oldstate);
@@ -374,6 +393,8 @@ struct rcu_torture_ops {
bool (*poll_need_2gp)(bool poll, bool poll_full);
void (*cond_sync)(unsigned long oldstate);
void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
+ int poll_active;
+ int poll_active_full;
call_rcu_func_t call;
void (*cb_barrier)(void);
void (*fqs)(void);
@@ -381,12 +402,18 @@ struct rcu_torture_ops {
void (*gp_kthread_dbg)(void);
bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
int (*stall_dur)(void);
+ void (*get_gp_data)(int *flags, unsigned long *gp_seq);
+ void (*gp_slow_register)(atomic_t *rgssp);
+ void (*gp_slow_unregister)(atomic_t *rgssp);
+ bool (*reader_blocked)(void);
long cbflood_max;
int irq_capable;
int can_boost;
int extendables;
int slow_gps;
int no_pi_lock;
+ int debug_objects;
+ int start_poll_irqsoff;
const char *name;
};
@@ -437,10 +464,8 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
rtrsp->rt_delay_us = shortdelay_us;
}
if (!preempt_count() &&
- !(torture_random(rrsp) % (nrealreaders * 500))) {
+ !(torture_random(rrsp) % (nrealreaders * 500)))
torture_preempt_schedule(); /* QS only if preemptible. */
- rtrsp->rt_preempted = true;
- }
}
static void rcu_torture_read_unlock(int idx)
@@ -448,6 +473,15 @@ static void rcu_torture_read_unlock(int idx)
rcu_read_unlock();
}
+static int rcu_torture_readlock_nesting(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RCU))
+ return rcu_preempt_depth();
+ if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ return (preempt_count() & PREEMPT_MASK);
+ return -1;
+}
+
/*
* Update callback in the pipe. This should be invoked after a grace period.
*/
@@ -461,12 +495,13 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
WRITE_ONCE(rp->rtort_chkp, NULL);
smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
}
- i = READ_ONCE(rp->rtort_pipe_count);
+ i = rp->rtort_pipe_count;
if (i > RCU_TORTURE_PIPE_LEN)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
WRITE_ONCE(rp->rtort_pipe_count, i + 1);
- if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
+ if (i + 1 >= RCU_TORTURE_PIPE_LEN) {
rp->rtort_mbtest = 0;
return true;
}
@@ -536,6 +571,7 @@ static struct rcu_torture_ops rcu_ops = {
.read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock,
.readlock_held = torture_readlock_not_held,
+ .readlock_nesting = rcu_torture_readlock_nesting,
.get_gp_seq = rcu_get_gp_seq,
.gp_diff = rcu_seq_diff,
.deferred_free = rcu_torture_deferred_free,
@@ -547,8 +583,6 @@ static struct rcu_torture_ops rcu_ops = {
.get_comp_state_full = get_completed_synchronize_rcu_full,
.get_gp_state = get_state_synchronize_rcu,
.get_gp_state_full = get_state_synchronize_rcu_full,
- .get_gp_completed = get_completed_synchronize_rcu,
- .get_gp_completed_full = get_completed_synchronize_rcu_full,
.start_gp_poll = start_poll_synchronize_rcu,
.start_gp_poll_full = start_poll_synchronize_rcu_full,
.poll_gp_state = poll_state_synchronize_rcu,
@@ -556,21 +590,31 @@ static struct rcu_torture_ops rcu_ops = {
.poll_need_2gp = rcu_poll_need_2gp,
.cond_sync = cond_synchronize_rcu,
.cond_sync_full = cond_synchronize_rcu_full,
+ .poll_active = NUM_ACTIVE_RCU_POLL_OLDSTATE,
+ .poll_active_full = NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE,
.get_gp_state_exp = get_state_synchronize_rcu,
.start_gp_poll_exp = start_poll_synchronize_rcu_expedited,
.start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
.poll_gp_state_exp = poll_state_synchronize_rcu,
.cond_sync_exp = cond_synchronize_rcu_expedited,
+ .cond_sync_exp_full = cond_synchronize_rcu_expedited_full,
.call = call_rcu_hurry,
.cb_barrier = rcu_barrier,
.fqs = rcu_force_quiescent_state,
- .stats = NULL,
.gp_kthread_dbg = show_rcu_gp_kthreads,
.check_boost_failed = rcu_check_boost_fail,
.stall_dur = rcu_jiffies_till_stall_check,
+ .get_gp_data = rcutorture_get_gp_data,
+ .gp_slow_register = rcu_gp_slow_register,
+ .gp_slow_unregister = rcu_gp_slow_unregister,
+ .reader_blocked = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)
+ ? has_rcu_reader_blocked
+ : NULL,
.irq_capable = 1,
.can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
.extendables = RCUTORTURE_MAX_EXTEND,
+ .debug_objects = 1,
+ .start_poll_irqsoff = 1,
.name = "rcu"
};
@@ -611,10 +655,8 @@ static struct rcu_torture_ops rcu_busted_ops = {
.sync = synchronize_rcu_busted,
.exp_sync = synchronize_rcu_busted,
.call = call_rcu_busted,
- .cb_barrier = NULL,
- .fqs = NULL,
- .stats = NULL,
.irq_capable = 1,
+ .extendables = RCUTORTURE_MAX_EXTEND,
.name = "busted"
};
@@ -627,12 +669,32 @@ static struct srcu_struct srcu_ctld;
static struct srcu_struct *srcu_ctlp = &srcu_ctl;
static struct rcu_torture_ops srcud_ops;
+static void srcu_get_gp_data(int *flags, unsigned long *gp_seq)
+{
+ srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq);
+}
+
static int srcu_torture_read_lock(void)
{
- if (cur_ops == &srcud_ops)
- return srcu_read_lock_nmisafe(srcu_ctlp);
- else
- return srcu_read_lock(srcu_ctlp);
+ int idx;
+ int ret = 0;
+
+ if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) {
+ idx = srcu_read_lock(srcu_ctlp);
+ WARN_ON_ONCE(idx & ~0x1);
+ ret += idx;
+ }
+ if (reader_flavor & SRCU_READ_FLAVOR_NMI) {
+ idx = srcu_read_lock_nmisafe(srcu_ctlp);
+ WARN_ON_ONCE(idx & ~0x1);
+ ret += idx << 1;
+ }
+ if (reader_flavor & SRCU_READ_FLAVOR_LITE) {
+ idx = srcu_read_lock_lite(srcu_ctlp);
+ WARN_ON_ONCE(idx & ~0x1);
+ ret += idx << 2;
+ }
+ return ret;
}
static void
@@ -656,10 +718,13 @@ srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
static void srcu_torture_read_unlock(int idx)
{
- if (cur_ops == &srcud_ops)
- srcu_read_unlock_nmisafe(srcu_ctlp, idx);
- else
- srcu_read_unlock(srcu_ctlp, idx);
+ WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1)));
+ if (reader_flavor & SRCU_READ_FLAVOR_LITE)
+ srcu_read_unlock_lite(srcu_ctlp, (idx & 0x4) >> 2);
+ if (reader_flavor & SRCU_READ_FLAVOR_NMI)
+ srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1);
+ if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL))
+ srcu_read_unlock(srcu_ctlp, idx & 0x1);
}
static int torture_srcu_read_lock_held(void)
@@ -729,15 +794,20 @@ static struct rcu_torture_ops srcu_ops = {
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited,
+ .same_gp_state = same_state_synchronize_srcu,
+ .get_comp_state = get_completed_synchronize_srcu,
.get_gp_state = srcu_torture_get_gp_state,
.start_gp_poll = srcu_torture_start_gp_poll,
.poll_gp_state = srcu_torture_poll_gp_state,
+ .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE,
.call = srcu_torture_call,
.cb_barrier = srcu_torture_barrier,
.stats = srcu_torture_stats,
+ .get_gp_data = srcu_get_gp_data,
.cbflood_max = 50000,
.irq_capable = 1,
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
+ .debug_objects = 1,
.name = "srcu"
};
@@ -767,15 +837,20 @@ static struct rcu_torture_ops srcud_ops = {
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
.exp_sync = srcu_torture_synchronize_expedited,
+ .same_gp_state = same_state_synchronize_srcu,
+ .get_comp_state = get_completed_synchronize_srcu,
.get_gp_state = srcu_torture_get_gp_state,
.start_gp_poll = srcu_torture_start_gp_poll,
.poll_gp_state = srcu_torture_poll_gp_state,
+ .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE,
.call = srcu_torture_call,
.cb_barrier = srcu_torture_barrier,
.stats = srcu_torture_stats,
+ .get_gp_data = srcu_get_gp_data,
.cbflood_max = 50000,
.irq_capable = 1,
.no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
+ .debug_objects = 1,
.name = "srcud"
};
@@ -811,7 +886,7 @@ static void synchronize_rcu_trivial(void)
int cpu;
for_each_online_cpu(cpu) {
- torture_sched_setaffinity(current->pid, cpumask_of(cpu));
+ torture_sched_setaffinity(current->pid, cpumask_of(cpu), true);
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
}
}
@@ -837,8 +912,6 @@ static struct rcu_torture_ops trivial_ops = {
.get_gp_seq = rcu_no_completed,
.sync = synchronize_rcu_trivial,
.exp_sync = synchronize_rcu_trivial,
- .fqs = NULL,
- .stats = NULL,
.irq_capable = 1,
.name = "trivial"
};
@@ -881,8 +954,7 @@ static struct rcu_torture_ops tasks_ops = {
.call = call_rcu_tasks,
.cb_barrier = rcu_barrier_tasks,
.gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
- .fqs = NULL,
- .stats = NULL,
+ .get_gp_data = rcu_tasks_get_gp_data,
.irq_capable = 1,
.slow_gps = 1,
.name = "tasks"
@@ -903,11 +975,6 @@ static struct rcu_torture_ops tasks_ops = {
* Definitions for rude RCU-tasks torture testing.
*/
-static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
-{
- call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
-}
-
static struct rcu_torture_ops tasks_rude_ops = {
.ttype = RCU_TASKS_RUDE_FLAVOR,
.init = rcu_sync_torture_init,
@@ -915,15 +982,11 @@ static struct rcu_torture_ops tasks_rude_ops = {
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock_trivial,
.get_gp_seq = rcu_no_completed,
- .deferred_free = rcu_tasks_rude_torture_deferred_free,
.sync = synchronize_rcu_tasks_rude,
.exp_sync = synchronize_rcu_tasks_rude,
- .call = call_rcu_tasks_rude,
- .cb_barrier = rcu_barrier_tasks_rude,
.gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
+ .get_gp_data = rcu_tasks_rude_get_gp_data,
.cbflood_max = 50000,
- .fqs = NULL,
- .stats = NULL,
.irq_capable = 1,
.name = "tasks-rude"
};
@@ -973,9 +1036,8 @@ static struct rcu_torture_ops tasks_tracing_ops = {
.call = call_rcu_tasks_trace,
.cb_barrier = rcu_barrier_tasks_trace,
.gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
+ .get_gp_data = rcu_tasks_trace_get_gp_data,
.cbflood_max = 50000,
- .fqs = NULL,
- .stats = NULL,
.irq_capable = 1,
.slow_gps = 1,
.name = "tasks-tracing"
@@ -1050,8 +1112,13 @@ static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *star
// At most one persisted message per boost test.
j = jiffies;
lp = READ_ONCE(last_persist);
- if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
- pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
+ if (time_after(j, lp + mininterval) &&
+ cmpxchg(&last_persist, lp, j) == lp) {
+ if (cpu < 0)
+ pr_info("Boost inversion persisted: QS from all CPUs\n");
+ else
+ pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
+ }
return false; // passed on a technicality
}
VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
@@ -1308,6 +1375,8 @@ static void rcu_torture_write_types(void)
} else if (gp_sync && !cur_ops->sync) {
pr_alert("%s: gp_sync without primitives.\n", __func__);
}
+ pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes);
+ pr_info("%s: gp_cond_wi %d gp_cond_wi_exp %d gp_poll_wi %d gp_poll_wi_exp %d\n", __func__, gp_cond_wi, gp_cond_wi_exp, gp_poll_wi, gp_poll_wi_exp);
}
/*
@@ -1364,13 +1433,20 @@ rcu_torture_writer(void *arg)
int i;
int idx;
int oldnice = task_nice(current);
- struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE];
+ struct rcu_gp_oldstate *rgo = NULL;
+ int rgo_size = 0;
struct rcu_torture *rp;
struct rcu_torture *old_rp;
static DEFINE_TORTURE_RANDOM(rand);
+ unsigned long stallsdone = jiffies;
bool stutter_waited;
- unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE];
+ unsigned long *ulo = NULL;
+ int ulo_size = 0;
+ // If a new stall test is added, this must be adjusted.
+ if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
+ stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) *
+ HZ * (stall_cpu_repeat + 1);
VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
if (!can_expedite)
pr_alert("%s" TORTURE_FLAG
@@ -1387,6 +1463,16 @@ rcu_torture_writer(void *arg)
torture_kthread_stopping("rcu_torture_writer");
return 0;
}
+ if (cur_ops->poll_active > 0) {
+ ulo = kzalloc(cur_ops->poll_active * sizeof(ulo[0]), GFP_KERNEL);
+ if (!WARN_ON(!ulo))
+ ulo_size = cur_ops->poll_active;
+ }
+ if (cur_ops->poll_active_full > 0) {
+ rgo = kzalloc(cur_ops->poll_active_full * sizeof(rgo[0]), GFP_KERNEL);
+ if (!WARN_ON(!rgo))
+ rgo_size = cur_ops->poll_active_full;
+ }
do {
rcu_torture_writer_state = RTWS_FIXED_DELAY;
@@ -1395,6 +1481,7 @@ rcu_torture_writer(void *arg)
if (rp == NULL)
continue;
rp->rtort_pipe_count = 0;
+ ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
rcu_torture_writer_state = RTWS_DELAY;
udelay(torture_random(&rand) & 0x3ff);
rcu_torture_writer_state = RTWS_REPLACE;
@@ -1410,6 +1497,7 @@ rcu_torture_writer(void *arg)
atomic_inc(&rcu_torture_wcount[i]);
WRITE_ONCE(old_rp->rtort_pipe_count,
old_rp->rtort_pipe_count + 1);
+ ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count);
// Make sure readers block polled grace periods.
if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
@@ -1421,8 +1509,8 @@ rcu_torture_writer(void *arg)
rcu_torture_writer_state_getname(),
rcu_torture_writer_state,
cookie, cur_ops->get_gp_state());
- if (cur_ops->get_gp_completed) {
- cookie = cur_ops->get_gp_completed();
+ if (cur_ops->get_comp_state) {
+ cookie = cur_ops->get_comp_state();
WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
}
cur_ops->readunlock(idx);
@@ -1436,8 +1524,8 @@ rcu_torture_writer(void *arg)
rcu_torture_writer_state_getname(),
rcu_torture_writer_state,
cpumask_pr_args(cpu_online_mask));
- if (cur_ops->get_gp_completed_full) {
- cur_ops->get_gp_completed_full(&cookie_full);
+ if (cur_ops->get_comp_state_full) {
+ cur_ops->get_comp_state_full(&cookie_full);
WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
}
cur_ops->readunlock(idx);
@@ -1455,7 +1543,8 @@ rcu_torture_writer(void *arg)
case RTWS_COND_GET:
rcu_torture_writer_state = RTWS_COND_GET;
gp_snap = cur_ops->get_gp_state();
- torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
+ torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
+ 1000, &rand);
rcu_torture_writer_state = RTWS_COND_SYNC;
cur_ops->cond_sync(gp_snap);
rcu_torture_pipe_update(old_rp);
@@ -1463,7 +1552,8 @@ rcu_torture_writer(void *arg)
case RTWS_COND_GET_EXP:
rcu_torture_writer_state = RTWS_COND_GET_EXP;
gp_snap = cur_ops->get_gp_state_exp();
- torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
+ torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
+ 1000, &rand);
rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
cur_ops->cond_sync_exp(gp_snap);
rcu_torture_pipe_update(old_rp);
@@ -1471,7 +1561,8 @@ rcu_torture_writer(void *arg)
case RTWS_COND_GET_FULL:
rcu_torture_writer_state = RTWS_COND_GET_FULL;
cur_ops->get_gp_state_full(&gp_snap_full);
- torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
+ torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
+ 1000, &rand);
rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
cur_ops->cond_sync_full(&gp_snap_full);
rcu_torture_pipe_update(old_rp);
@@ -1479,49 +1570,50 @@ rcu_torture_writer(void *arg)
case RTWS_COND_GET_EXP_FULL:
rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
cur_ops->get_gp_state_full(&gp_snap_full);
- torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
+ torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
+ 1000, &rand);
rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
cur_ops->cond_sync_exp_full(&gp_snap_full);
rcu_torture_pipe_update(old_rp);
break;
case RTWS_POLL_GET:
rcu_torture_writer_state = RTWS_POLL_GET;
- for (i = 0; i < ARRAY_SIZE(ulo); i++)
+ for (i = 0; i < ulo_size; i++)
ulo[i] = cur_ops->get_comp_state();
gp_snap = cur_ops->start_gp_poll();
rcu_torture_writer_state = RTWS_POLL_WAIT;
while (!cur_ops->poll_gp_state(gp_snap)) {
gp_snap1 = cur_ops->get_gp_state();
- for (i = 0; i < ARRAY_SIZE(ulo); i++)
+ for (i = 0; i < ulo_size; i++)
if (cur_ops->poll_gp_state(ulo[i]) ||
cur_ops->same_gp_state(ulo[i], gp_snap1)) {
ulo[i] = gp_snap1;
break;
}
- WARN_ON_ONCE(i >= ARRAY_SIZE(ulo));
- torture_hrtimeout_jiffies(torture_random(&rand) % 16,
- &rand);
+ WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size);
+ torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
+ 1000, &rand);
}
rcu_torture_pipe_update(old_rp);
break;
case RTWS_POLL_GET_FULL:
rcu_torture_writer_state = RTWS_POLL_GET_FULL;
- for (i = 0; i < ARRAY_SIZE(rgo); i++)
+ for (i = 0; i < rgo_size; i++)
cur_ops->get_comp_state_full(&rgo[i]);
cur_ops->start_gp_poll_full(&gp_snap_full);
rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
cur_ops->get_gp_state_full(&gp_snap1_full);
- for (i = 0; i < ARRAY_SIZE(rgo); i++)
+ for (i = 0; i < rgo_size; i++)
if (cur_ops->poll_gp_state_full(&rgo[i]) ||
cur_ops->same_gp_state_full(&rgo[i],
&gp_snap1_full)) {
rgo[i] = gp_snap1_full;
break;
}
- WARN_ON_ONCE(i >= ARRAY_SIZE(rgo));
- torture_hrtimeout_jiffies(torture_random(&rand) % 16,
- &rand);
+ WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size);
+ torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
+ 1000, &rand);
}
rcu_torture_pipe_update(old_rp);
break;
@@ -1530,8 +1622,8 @@ rcu_torture_writer(void *arg)
gp_snap = cur_ops->start_gp_poll_exp();
rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
while (!cur_ops->poll_gp_state_exp(gp_snap))
- torture_hrtimeout_jiffies(torture_random(&rand) % 16,
- &rand);
+ torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
+ 1000, &rand);
rcu_torture_pipe_update(old_rp);
break;
case RTWS_POLL_GET_EXP_FULL:
@@ -1539,8 +1631,8 @@ rcu_torture_writer(void *arg)
cur_ops->start_gp_poll_exp_full(&gp_snap_full);
rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
while (!cur_ops->poll_gp_state_full(&gp_snap_full))
- torture_hrtimeout_jiffies(torture_random(&rand) % 16,
- &rand);
+ torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
+ 1000, &rand);
rcu_torture_pipe_update(old_rp);
break;
case RTWS_SYNC:
@@ -1576,13 +1668,14 @@ rcu_torture_writer(void *arg)
!atomic_read(&rcu_fwd_cb_nodelay) &&
!cur_ops->slow_gps &&
!torture_must_stop() &&
- boot_ended)
+ boot_ended &&
+ time_after(jiffies, stallsdone))
for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
if (list_empty(&rcu_tortures[i].rtort_free) &&
- rcu_access_pointer(rcu_torture_current) !=
- &rcu_tortures[i]) {
+ rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) {
tracing_off();
- show_rcu_gp_kthreads();
+ if (cur_ops->gp_kthread_dbg)
+ cur_ops->gp_kthread_dbg();
WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
rcu_ftrace_dump(DUMP_ALL);
}
@@ -1600,6 +1693,8 @@ rcu_torture_writer(void *arg)
pr_alert("%s" TORTURE_FLAG
" Dynamic grace-period expediting was disabled.\n",
torture_type);
+ kfree(ulo);
+ kfree(rgo);
rcu_torture_writer_state = RTWS_STOPPING;
torture_kthread_stopping("rcu_torture_writer");
return 0;
@@ -1663,14 +1758,22 @@ rcu_torture_fakewriter(void *arg)
cur_ops->cond_sync_exp_full(&gp_snap_full);
break;
case RTWS_POLL_GET:
+ if (cur_ops->start_poll_irqsoff)
+ local_irq_disable();
gp_snap = cur_ops->start_gp_poll();
+ if (cur_ops->start_poll_irqsoff)
+ local_irq_enable();
while (!cur_ops->poll_gp_state(gp_snap)) {
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
&rand);
}
break;
case RTWS_POLL_GET_FULL:
+ if (cur_ops->start_poll_irqsoff)
+ local_irq_disable();
cur_ops->start_gp_poll_full(&gp_snap_full);
+ if (cur_ops->start_poll_irqsoff)
+ local_irq_enable();
while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
torture_hrtimeout_jiffies(torture_random(&rand) % 16,
&rand);
@@ -1766,6 +1869,44 @@ static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
}
+// Verify the specified RCUTORTURE_RDR* state.
+#define ROEC_ARGS "%s %s: Current %#x To add %#x To remove %#x preempt_count() %#x\n", __func__, s, curstate, new, old, preempt_count()
+static void rcutorture_one_extend_check(char *s, int curstate, int new, int old, bool insoftirq)
+{
+ if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE))
+ return;
+
+ WARN_ONCE(!(curstate & RCUTORTURE_RDR_IRQ) && irqs_disabled(), ROEC_ARGS);
+ WARN_ONCE((curstate & RCUTORTURE_RDR_IRQ) && !irqs_disabled(), ROEC_ARGS);
+
+ // If CONFIG_PREEMPT_COUNT=n, further checks are unreliable.
+ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ return;
+
+ WARN_ONCE((curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) &&
+ !(preempt_count() & SOFTIRQ_MASK), ROEC_ARGS);
+ WARN_ONCE((curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) &&
+ !(preempt_count() & PREEMPT_MASK), ROEC_ARGS);
+ WARN_ONCE(cur_ops->readlock_nesting &&
+ (curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) &&
+ cur_ops->readlock_nesting() == 0, ROEC_ARGS);
+
+ // Timer handlers have all sorts of stuff disabled, so ignore
+ // unintended disabling.
+ if (insoftirq)
+ return;
+
+ WARN_ONCE(cur_ops->extendables &&
+ !(curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) &&
+ (preempt_count() & SOFTIRQ_MASK), ROEC_ARGS);
+ WARN_ONCE(cur_ops->extendables &&
+ !(curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) &&
+ (preempt_count() & PREEMPT_MASK), ROEC_ARGS);
+ WARN_ONCE(cur_ops->readlock_nesting &&
+ !(curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) &&
+ cur_ops->readlock_nesting() > 0, ROEC_ARGS);
+}
+
/*
* Do one extension of an RCU read-side critical section using the
* current reader state in readstate (set to zero for initial entry
@@ -1775,10 +1916,11 @@ static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
* beginning or end of the critical section and if there was actually a
* change, do a ->read_delay().
*/
-static void rcutorture_one_extend(int *readstate, int newstate,
+static void rcutorture_one_extend(int *readstate, int newstate, bool insoftirq,
struct torture_random_state *trsp,
struct rt_read_seg *rtrsp)
{
+ bool first;
unsigned long flags;
int idxnew1 = -1;
int idxnew2 = -1;
@@ -1787,8 +1929,10 @@ static void rcutorture_one_extend(int *readstate, int newstate,
int statesnew = ~*readstate & newstate;
int statesold = *readstate & ~newstate;
+ first = idxold1 == 0;
WARN_ON_ONCE(idxold2 < 0);
- WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
+ WARN_ON_ONCE(idxold2 & ~RCUTORTURE_RDR_ALLBITS);
+ rcutorture_one_extend_check("before change", idxold1, statesnew, statesold, insoftirq);
rtrsp->rt_readstate = newstate;
/* First, put new protection in place to avoid critical-section gap. */
@@ -1803,9 +1947,24 @@ static void rcutorture_one_extend(int *readstate, int newstate,
if (statesnew & RCUTORTURE_RDR_SCHED)
rcu_read_lock_sched();
if (statesnew & RCUTORTURE_RDR_RCU_1)
- idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
+ idxnew1 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1;
if (statesnew & RCUTORTURE_RDR_RCU_2)
- idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
+ idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2;
+
+ // Complain unless both the old and the new protection is in place.
+ rcutorture_one_extend_check("during change",
+ idxold1 | statesnew, statesnew, statesold, insoftirq);
+
+ // Sample CPU under both sets of protections to reduce confusion.
+ if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) {
+ int cpu = raw_smp_processor_id();
+ rtrsp->rt_cpu = cpu;
+ if (!first) {
+ rtrsp[-1].rt_end_cpu = cpu;
+ if (cur_ops->reader_blocked)
+ rtrsp[-1].rt_preempted = cur_ops->reader_blocked();
+ }
+ }
/*
* Next, remove old protection, in decreasing order of strength
@@ -1825,7 +1984,7 @@ static void rcutorture_one_extend(int *readstate, int newstate,
if (statesold & RCUTORTURE_RDR_RBH)
rcu_read_unlock_bh();
if (statesold & RCUTORTURE_RDR_RCU_2) {
- cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
+ cur_ops->readunlock((idxold2 & RCUTORTURE_RDR_MASK_2) >> RCUTORTURE_RDR_SHIFT_2);
WARN_ON_ONCE(idxnew2 != -1);
idxold2 = 0;
}
@@ -1835,7 +1994,7 @@ static void rcutorture_one_extend(int *readstate, int newstate,
lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
if (lockit)
raw_spin_lock_irqsave(&current->pi_lock, flags);
- cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
+ cur_ops->readunlock((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1);
WARN_ON_ONCE(idxnew1 != -1);
idxold1 = 0;
if (lockit)
@@ -1850,16 +2009,14 @@ static void rcutorture_one_extend(int *readstate, int newstate,
if (idxnew1 == -1)
idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
WARN_ON_ONCE(idxnew1 < 0);
- if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
- pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
if (idxnew2 == -1)
idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
WARN_ON_ONCE(idxnew2 < 0);
- WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
*readstate = idxnew1 | idxnew2 | newstate;
WARN_ON_ONCE(*readstate < 0);
- if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
- pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
+ if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS))
+ pr_info("Unexpected readstate value of %#x\n", *readstate);
+ rcutorture_one_extend_check("after change", *readstate, statesnew, statesold, insoftirq);
}
/* Return the biggest extendables mask given current RCU and boot parameters. */
@@ -1884,7 +2041,7 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
- WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
+ WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); // Can't have reader idx bits.
/* Mostly only one bit (need preemption!), sometimes lots of bits. */
if (!(randmask1 & 0x7))
mask = mask & randmask2;
@@ -1926,7 +2083,7 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
* critical section.
*/
static struct rt_read_seg *
-rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
+rcutorture_loop_extend(int *readstate, bool insoftirq, struct torture_random_state *trsp,
struct rt_read_seg *rtrsp)
{
int i;
@@ -1941,7 +2098,7 @@ rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
for (j = 0; j < i; j++) {
mask = rcutorture_extend_mask(*readstate, trsp);
- rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
+ rcutorture_one_extend(readstate, mask, insoftirq, trsp, &rtrsp[j]);
}
return &rtrsp[j];
}
@@ -1962,6 +2119,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
int newstate;
struct rcu_torture *p;
int pipe_count;
+ bool preempted = false;
int readstate = 0;
struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
struct rt_read_seg *rtrsp = &rtseg[0];
@@ -1970,7 +2128,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
WARN_ON_ONCE(!rcu_is_watching());
newstate = rcutorture_extend_mask(readstate, trsp);
- rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
+ rcutorture_one_extend(&readstate, newstate, myid < 0, trsp, rtrsp++);
if (checkpolling) {
if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
cookie = cur_ops->get_gp_state();
@@ -1983,17 +2141,18 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
!cur_ops->readlock_held || cur_ops->readlock_held());
if (p == NULL) {
/* Wait for rcu_torture_writer to get underway */
- rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
+ rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp);
return false;
}
if (p->rtort_mbtest == 0)
atomic_inc(&n_rcu_torture_mberror);
rcu_torture_reader_do_mbchk(myid, p, trsp);
- rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
+ rtrsp = rcutorture_loop_extend(&readstate, myid < 0, trsp, rtrsp);
preempt_disable();
pipe_count = READ_ONCE(p->rtort_pipe_count);
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
+ // Should not happen in a correct RCU implementation,
+ // happens quite often for torture_type=busted.
pipe_count = RCU_TORTURE_PIPE_LEN;
}
completed = cur_ops->get_gp_seq();
@@ -2026,7 +2185,9 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
rcu_torture_writer_state,
cpumask_pr_args(cpu_online_mask));
}
- rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
+ if (cur_ops->reader_blocked)
+ preempted = cur_ops->reader_blocked();
+ rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp);
WARN_ON_ONCE(readstate);
// This next splat is expected behavior if leakpointer, especially
// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
@@ -2038,6 +2199,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
err_segs[i++] = *rtrsp1;
rt_read_nsegs = i;
+ rt_read_preempted = preempted;
}
return true;
@@ -2255,10 +2417,8 @@ rcu_torture_stats_print(void)
int __maybe_unused flags = 0;
unsigned long __maybe_unused gp_seq = 0;
- rcutorture_get_gp_data(cur_ops->ttype,
- &flags, &gp_seq);
- srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
- &flags, &gp_seq);
+ if (cur_ops->get_gp_data)
+ cur_ops->get_gp_data(&flags, &gp_seq);
wtp = READ_ONCE(writer_task);
pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
rcu_torture_writer_state_getname(),
@@ -2354,24 +2514,28 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
"test_boost=%d/%d test_boost_interval=%d "
"test_boost_duration=%d shutdown_secs=%d "
"stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
- "stall_cpu_block=%d "
+ "stall_cpu_block=%d stall_cpu_repeat=%d "
"n_barrier_cbs=%d "
"onoff_interval=%d onoff_holdoff=%d "
"read_exit_delay=%d read_exit_burst=%d "
+ "reader_flavor=%x "
"nocbs_nthreads=%d nocbs_toggle=%d "
- "test_nmis=%d\n",
+ "test_nmis=%d "
+ "preempt_duration=%d preempt_interval=%d\n",
torture_type, tag, nrealreaders, nfakewriters,
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
test_boost, cur_ops->can_boost,
test_boost_interval, test_boost_duration, shutdown_secs,
stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
- stall_cpu_block,
+ stall_cpu_block, stall_cpu_repeat,
n_barrier_cbs,
onoff_interval, onoff_holdoff,
read_exit_delay, read_exit_burst,
+ reader_flavor,
nocbs_nthreads, nocbs_toggle,
- test_nmis);
+ test_nmis,
+ preempt_duration, preempt_interval);
}
static int rcutorture_booster_cleanup(unsigned int cpu)
@@ -2409,6 +2573,14 @@ static int rcutorture_booster_init(unsigned int cpu)
WARN_ON_ONCE(!t);
sp.sched_priority = 2;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+#ifdef CONFIG_IRQ_FORCED_THREADING
+ if (force_irqthreads()) {
+ t = per_cpu(ktimerd, cpu);
+ WARN_ON_ONCE(!t);
+ sp.sched_priority = 2;
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+ }
+#endif
}
/* Don't allow time recalculation while creating a new task. */
@@ -2441,21 +2613,14 @@ static struct notifier_block rcu_torture_stall_block = {
/*
* CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
- * induces a CPU stall for the time specified by stall_cpu.
+ * induces a CPU stall for the time specified by stall_cpu. If a new
+ * stall test is added, stallsdone in rcu_torture_writer() must be adjusted.
*/
-static int rcu_torture_stall(void *args)
+static void rcu_torture_stall_one(int rep, int irqsoff)
{
int idx;
- int ret;
unsigned long stop_at;
- VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
- if (rcu_cpu_stall_notifiers) {
- ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
- if (ret)
- pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
- __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
- }
if (stall_cpu_holdoff > 0) {
VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
@@ -2475,14 +2640,14 @@ static int rcu_torture_stall(void *args)
stop_at = ktime_get_seconds() + stall_cpu;
/* RCU CPU stall is expected behavior in following code. */
idx = cur_ops->readlock();
- if (stall_cpu_irqsoff)
+ if (irqsoff)
local_irq_disable();
else if (!stall_cpu_block)
preempt_disable();
- pr_alert("%s start on CPU %d.\n",
- __func__, raw_smp_processor_id());
- while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
- stop_at))
+ pr_alert("%s start stall episode %d on CPU %d.\n",
+ __func__, rep + 1, raw_smp_processor_id());
+ while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) &&
+ !kthread_should_stop())
if (stall_cpu_block) {
#ifdef CONFIG_PREEMPTION
preempt_schedule();
@@ -2492,12 +2657,42 @@ static int rcu_torture_stall(void *args)
} else if (stall_no_softlockup) {
touch_softlockup_watchdog();
}
- if (stall_cpu_irqsoff)
+ if (irqsoff)
local_irq_enable();
else if (!stall_cpu_block)
preempt_enable();
cur_ops->readunlock(idx);
}
+}
+
+/*
+ * CPU-stall kthread. Invokes rcu_torture_stall_one() once, and then as many
+ * additional times as specified by the stall_cpu_repeat module parameter.
+ * Note that stall_cpu_irqsoff is ignored on the second and subsequent
+ * stall.
+ */
+static int rcu_torture_stall(void *args)
+{
+ int i;
+ int repeat = stall_cpu_repeat;
+ int ret;
+
+ VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
+ if (repeat < 0) {
+ repeat = 0;
+ WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST));
+ }
+ if (rcu_cpu_stall_notifiers) {
+ ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
+ if (ret)
+ pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
+ __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
+ }
+ for (i = 0; i <= repeat; i++) {
+ if (kthread_should_stop())
+ break;
+ rcu_torture_stall_one(i, i == 0 ? stall_cpu_irqsoff : 0);
+ }
pr_alert("%s end.\n", __func__);
if (rcu_cpu_stall_notifiers && !ret) {
ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
@@ -2614,7 +2809,7 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
rfcpp = rfp->rcu_fwd_cb_tail;
rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
- WRITE_ONCE(*rfcpp, rfcp);
+ smp_store_release(rfcpp, rfcp);
WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
if (i >= ARRAY_SIZE(rfp->n_launders_hist))
@@ -2663,7 +2858,7 @@ static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
rcu_torture_fwd_prog_cond_resched(freed);
if (tick_nohz_full_enabled()) {
local_irq_save(flags);
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
local_irq_restore(flags);
}
}
@@ -2813,7 +3008,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
if (tick_nohz_full_enabled()) {
local_irq_save(flags);
- rcu_momentary_dyntick_idle();
+ rcu_momentary_eqs();
local_irq_restore(flags);
}
}
@@ -2827,13 +3022,14 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
!shutdown_time_arrived()) {
- WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
- pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
+ if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg)
+ cur_ops->gp_kthread_dbg();
+ pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n",
__func__,
stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
n_launders + n_max_cbs - n_launders_cb_snap,
n_launders, n_launders_sa,
- n_max_gps, n_max_cbs, cver, gps);
+ n_max_gps, n_max_cbs, cver, gps, num_online_cpus());
atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
rcu_torture_fwd_cb_hist(rfp);
@@ -2969,12 +3165,12 @@ static int __init rcu_torture_fwd_prog_init(void)
fwd_progress = 0;
return 0;
}
- if (stall_cpu > 0) {
- VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
+ if (stall_cpu > 0 || (preempt_duration > 0 && IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
+ VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall and/or preemption testing");
fwd_progress = 0;
if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
return -EINVAL; /* In module, can fail back to user. */
- WARN_ON(1); /* Make sure rcutorture notices conflict. */
+ WARN_ON(1); /* Make sure rcutorture scripting notices conflict. */
return 0;
}
if (fwd_progress_holdoff <= 0)
@@ -3035,11 +3231,12 @@ static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
}
/* IPI handler to get callback posted on desired CPU, if online. */
-static void rcu_torture_barrier1cb(void *rcu_void)
+static int rcu_torture_barrier1cb(void *rcu_void)
{
struct rcu_head *rhp = rcu_void;
cur_ops->call(rhp, rcu_torture_barrier_cbf);
+ return 0;
}
/* kthread function to register callbacks used to test RCU barriers. */
@@ -3065,11 +3262,9 @@ static int rcu_torture_barrier_cbs(void *arg)
* The above smp_load_acquire() ensures barrier_phase load
* is ordered before the following ->call().
*/
- if (smp_call_function_single(myid, rcu_torture_barrier1cb,
- &rcu, 1)) {
- // IPI failed, so use direct call from current CPU.
+ if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1))
cur_ops->call(&rcu, rcu_torture_barrier_cbf);
- }
+
if (atomic_dec_and_test(&barrier_cbs_count))
wake_up(&barrier_wq);
} while (!torture_must_stop());
@@ -3320,6 +3515,35 @@ static void rcutorture_test_nmis(int n)
#endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
}
+// Randomly preempt online CPUs.
+static int rcu_torture_preempt(void *unused)
+{
+ int cpu = -1;
+ DEFINE_TORTURE_RANDOM(rand);
+
+ schedule_timeout_idle(stall_cpu_holdoff);
+ do {
+ // Wait for preempt_interval ms with up to 100us fuzz.
+ torture_hrtimeout_ms(preempt_interval, 100, &rand);
+ // Select online CPU.
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_next(-1, cpu_online_mask);
+ WARN_ON_ONCE(cpu >= nr_cpu_ids);
+ // Move to that CPU, if can't do so, retry later.
+ if (torture_sched_setaffinity(current->pid, cpumask_of(cpu), false))
+ continue;
+ // Preempt at high-ish priority, then reset to normal.
+ sched_set_fifo(current);
+ torture_sched_setaffinity(current->pid, cpu_present_mask, true);
+ mdelay(preempt_duration);
+ sched_set_normal(current, 0);
+ stutter_wait("rcu_torture_preempt");
+ } while (!torture_must_stop());
+ torture_kthread_stopping("rcu_torture_preempt");
+ return 0;
+}
+
static enum cpuhp_state rcutor_hp;
static void
@@ -3335,12 +3559,12 @@ rcu_torture_cleanup(void)
pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
cur_ops->cb_barrier();
}
- rcu_gp_slow_unregister(NULL);
+ if (cur_ops->gp_slow_unregister)
+ cur_ops->gp_slow_unregister(NULL);
return;
}
if (!cur_ops) {
torture_cleanup_end();
- rcu_gp_slow_unregister(NULL);
return;
}
@@ -3348,6 +3572,7 @@ rcu_torture_cleanup(void)
if (cur_ops->gp_kthread_dbg)
cur_ops->gp_kthread_dbg();
+ torture_stop_kthread(rcu_torture_preempt, preempt_task);
rcu_torture_read_exit_cleanup();
rcu_torture_barrier_cleanup();
rcu_torture_fwd_prog_cleanup();
@@ -3379,8 +3604,8 @@ rcu_torture_cleanup(void)
fakewriter_tasks = NULL;
}
- rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
- srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
+ if (cur_ops->get_gp_data)
+ cur_ops->get_gp_data(&flags, &gp_seq);
pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
cur_ops->name, (long)gp_seq, flags,
rcutorture_seq_diff(gp_seq, start_gp_seq));
@@ -3410,26 +3635,49 @@ rcu_torture_cleanup(void)
pr_alert("\t: No segments recorded!!!\n");
firsttime = 1;
for (i = 0; i < rt_read_nsegs; i++) {
- pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
+ pr_alert("\t%d: %#4x", i, err_segs[i].rt_readstate);
if (err_segs[i].rt_delay_jiffies != 0) {
pr_cont("%s%ldjiffies", firsttime ? "" : "+",
err_segs[i].rt_delay_jiffies);
firsttime = 0;
}
+ if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) {
+ pr_cont(" CPU %2d", err_segs[i].rt_cpu);
+ if (err_segs[i].rt_cpu != err_segs[i].rt_end_cpu)
+ pr_cont("->%-2d", err_segs[i].rt_end_cpu);
+ else
+ pr_cont(" ...");
+ }
if (err_segs[i].rt_delay_ms != 0) {
- pr_cont("%s%ldms", firsttime ? "" : "+",
+ pr_cont(" %s%ldms", firsttime ? "" : "+",
err_segs[i].rt_delay_ms);
firsttime = 0;
}
if (err_segs[i].rt_delay_us != 0) {
- pr_cont("%s%ldus", firsttime ? "" : "+",
+ pr_cont(" %s%ldus", firsttime ? "" : "+",
err_segs[i].rt_delay_us);
firsttime = 0;
}
- pr_cont("%s\n",
- err_segs[i].rt_preempted ? "preempted" : "");
+ pr_cont("%s", err_segs[i].rt_preempted ? " preempted" : "");
+ if (err_segs[i].rt_readstate & RCUTORTURE_RDR_BH)
+ pr_cont(" BH");
+ if (err_segs[i].rt_readstate & RCUTORTURE_RDR_IRQ)
+ pr_cont(" IRQ");
+ if (err_segs[i].rt_readstate & RCUTORTURE_RDR_PREEMPT)
+ pr_cont(" PREEMPT");
+ if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RBH)
+ pr_cont(" RBH");
+ if (err_segs[i].rt_readstate & RCUTORTURE_RDR_SCHED)
+ pr_cont(" SCHED");
+ if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_1)
+ pr_cont(" RCU_1");
+ if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_2)
+ pr_cont(" RCU_2");
+ pr_cont("\n");
}
+ if (rt_read_preempted)
+ pr_alert("\tReader was preempted.\n");
}
if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
@@ -3439,10 +3687,10 @@ rcu_torture_cleanup(void)
else
rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
torture_cleanup_end();
- rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay);
+ if (cur_ops->gp_slow_unregister)
+ cur_ops->gp_slow_unregister(NULL);
}
-#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
static void rcu_torture_leak_cb(struct rcu_head *rhp)
{
}
@@ -3460,7 +3708,6 @@ static void rcu_torture_err_cb(struct rcu_head *rhp)
*/
pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
}
-#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
/*
* Verify that double-free causes debug-objects to complain, but only
@@ -3469,39 +3716,43 @@ static void rcu_torture_err_cb(struct rcu_head *rhp)
*/
static void rcu_test_debug_objects(void)
{
-#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
struct rcu_head rh1;
struct rcu_head rh2;
+ int idx;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) {
+ pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n",
+ KBUILD_MODNAME, cur_ops->name);
+ return;
+ }
+
+ if (WARN_ON_ONCE(cur_ops->debug_objects &&
+ (!cur_ops->call || !cur_ops->cb_barrier)))
+ return;
+
struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
init_rcu_head_on_stack(&rh1);
init_rcu_head_on_stack(&rh2);
- pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
+ pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name);
/* Try to queue the rh2 pair of callbacks for the same grace period. */
- preempt_disable(); /* Prevent preemption from interrupting test. */
- rcu_read_lock(); /* Make it impossible to finish a grace period. */
- call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */
- local_irq_disable(); /* Make it harder to start a new grace period. */
- call_rcu_hurry(&rh2, rcu_torture_leak_cb);
- call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
+ idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */
+ cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */
+ cur_ops->call(&rh2, rcu_torture_leak_cb);
+ cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
if (rhp) {
- call_rcu_hurry(rhp, rcu_torture_leak_cb);
- call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
+ cur_ops->call(rhp, rcu_torture_leak_cb);
+ cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
}
- local_irq_enable();
- rcu_read_unlock();
- preempt_enable();
+ cur_ops->readunlock(idx);
/* Wait for them all to get done so we can safely return. */
- rcu_barrier();
- pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
+ cur_ops->cb_barrier();
+ pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name);
destroy_rcu_head_on_stack(&rh1);
destroy_rcu_head_on_stack(&rh2);
kfree(rhp);
-#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
- pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
-#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
}
static void rcutorture_sync(void)
@@ -3751,8 +4002,8 @@ rcu_torture_init(void)
nrealreaders = 1;
}
rcu_torture_print_module_parms(cur_ops, "Start of test");
- rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
- srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
+ if (cur_ops->get_gp_data)
+ cur_ops->get_gp_data(&flags, &gp_seq);
start_gp_seq = gp_seq;
pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
cur_ops->name, (long)gp_seq, flags);
@@ -3918,10 +4169,16 @@ rcu_torture_init(void)
firsterr = rcu_torture_read_exit_init();
if (torture_init_error(firsterr))
goto unwind;
+ if (preempt_duration > 0) {
+ firsterr = torture_create_kthread(rcu_torture_preempt, NULL, preempt_task);
+ if (torture_init_error(firsterr))
+ goto unwind;
+ }
if (object_debug)
rcu_test_debug_objects();
torture_init_end();
- rcu_gp_slow_register(&rcu_fwd_cb_nodelay);
+ if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister))
+ cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay);
return 0;
unwind: