summaryrefslogtreecommitdiff
path: root/kernel/scftorture.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-07-01 16:38:16 -0700
committerPaul E. McKenney <paulmck@kernel.org>2020-08-24 18:38:36 -0700
commitee7035d29576dcb59b1191e5f609517cacab1e56 (patch)
tree72cd20ebd862dd36da3d5a565c5ef14897d75848 /kernel/scftorture.c
parentdbf83b655a7853bc430af10e9a3e7eb1f4c90f86 (diff)
scftorture: Prevent compiler from reducing race probabilities
Detecting smp_call_function() memory misordering requires close timing, so it is necessary to have the checks immediately before and after the call to the smp_call_function*() function under test. This commit therefore inserts barrier() calls to prevent the compiler from optimizing memory-misordering detection down into the zone of extreme improbability. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/scftorture.c')
-rw-r--r--kernel/scftorture.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/kernel/scftorture.c b/kernel/scftorture.c
index 880c2cef13e7..83496810fc48 100644
--- a/kernel/scftorture.c
+++ b/kernel/scftorture.c
@@ -322,6 +322,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
scfp->n_single++;
if (scfcp) {
scfcp->scfc_cpu = cpu;
+ barrier(); // Prevent race-reduction compiler optimizations.
scfcp->scfc_in = true;
}
ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, scfsp->scfs_wait);
@@ -339,8 +340,10 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
scfp->n_many_wait++;
else
scfp->n_many++;
- if (scfcp)
+ if (scfcp) {
+ barrier(); // Prevent race-reduction compiler optimizations.
scfcp->scfc_in = true;
+ }
smp_call_function_many(cpu_online_mask, scf_handler, scfcp, scfsp->scfs_wait);
break;
case SCF_PRIM_ALL:
@@ -348,8 +351,10 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
scfp->n_all_wait++;
else
scfp->n_all++;
- if (scfcp)
+ if (scfcp) {
+ barrier(); // Prevent race-reduction compiler optimizations.
scfcp->scfc_in = true;
+ }
smp_call_function(scf_handler, scfcp, scfsp->scfs_wait);
break;
}
@@ -358,6 +363,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
atomic_inc(&n_mb_out_errs); // Leak rather than trash!
else
kfree(scfcp);
+ barrier(); // Prevent race-reduction compiler optimizations.
}
if (use_cpus_read_lock)
cpus_read_unlock();