summaryrefslogtreecommitdiff
path: root/Documentation/litmus-tests
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-24 12:02:25 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-24 12:02:25 -0700
commit60eb45074234b90333b6241b4fd8d196aa2dfd98 (patch)
treee4843847c30bf2e6e3176edaaff73439de706829 /Documentation/litmus-tests
parent406037351e08dea03735178bf11046da85f00125 (diff)
parentcc4a29819b0df9f3a2e7e0d5dee0830a3072d5aa (diff)
Merge tag 'lkmm-scripting.2023.04.07a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull Linux Kernel Memory Model scripting updates from Paul McKenney: "This improves litmus-test documentation and improves the ability to do before/after tests on the https://github.com/paulmckrcu/litmus repo" * tag 'lkmm-scripting.2023.04.07a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: (32 commits) tools/memory-model: Remove out-of-date SRCU documentation tools/memory-model: Document LKMM test procedure tools/memory-model: Use "grep -E" instead of "egrep" tools/memory-model: Use "-unroll 0" to keep --hw runs finite tools/memory-model: Make judgelitmus.sh handle scripted Result: tag tools/memory-model: Add data-race capabilities to judgelitmus.sh tools/memory-model: Add checktheselitmus.sh to run specified litmus tests tools/memory-model: Repair parseargs.sh header comment tools/memory-model: Add "--" to parseargs.sh for additional arguments tools/memory-model: Make history-check scripts use mselect7 tools/memory-model: Make checkghlitmus.sh use mselect7 tools/memory-model: Fix scripting --jobs argument tools/memory-model: Implement --hw support for checkghlitmus.sh tools/memory-model: Add -v flag to jingle7 runs tools/memory-model: Make runlitmus.sh check for jingle errors tools/memory-model: Allow herd to deduce CPU type tools/memory-model: Keep assembly-language litmus tests tools/memory-model: Move from .AArch64.litmus.out to .litmus.AArch.out tools/memory-model: Make runlitmus.sh generate .litmus.out for --hw tools/memory-model: Split runlitmus.sh out of checklitmus.sh ...
Diffstat (limited to 'Documentation/litmus-tests')
-rw-r--r--Documentation/litmus-tests/locking/DCL-broken.litmus54
-rw-r--r--Documentation/litmus-tests/locking/DCL-fixed.litmus55
-rw-r--r--Documentation/litmus-tests/locking/RM-broken.litmus41
-rw-r--r--Documentation/litmus-tests/locking/RM-fixed.litmus41
4 files changed, 191 insertions, 0 deletions
diff --git a/Documentation/litmus-tests/locking/DCL-broken.litmus b/Documentation/litmus-tests/locking/DCL-broken.litmus
new file mode 100644
index 000000000000..bfb7ba4316d6
--- /dev/null
+++ b/Documentation/litmus-tests/locking/DCL-broken.litmus
@@ -0,0 +1,54 @@
+C DCL-broken
+
+(*
+ * Result: Sometimes
+ *
+ * This litmus test demonstrates more than just locking is required to
+ * correctly implement double-checked locking.
+ *)
+
+{
+ int flag;
+ int data;
+}
+
+P0(int *flag, int *data, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = READ_ONCE(*flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ WRITE_ONCE(*flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+P1(int *flag, int *data, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = READ_ONCE(*flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ WRITE_ONCE(*flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+locations [flag;data;0:r0;0:r1;1:r0;1:r1]
+exists (0:r2=0 \/ 1:r2=0)
diff --git a/Documentation/litmus-tests/locking/DCL-fixed.litmus b/Documentation/litmus-tests/locking/DCL-fixed.litmus
new file mode 100644
index 000000000000..d1b60bcb0c8f
--- /dev/null
+++ b/Documentation/litmus-tests/locking/DCL-fixed.litmus
@@ -0,0 +1,55 @@
+C DCL-fixed
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that double-checked locking can be
+ * reliable given proper use of smp_load_acquire() and smp_store_release()
+ * in addition to the locking.
+ *)
+
+{
+ int flag;
+ int data;
+}
+
+P0(int *flag, int *data, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = smp_load_acquire(flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ smp_store_release(flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+P1(int *flag, int *data, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = smp_load_acquire(flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ smp_store_release(flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+locations [flag;data;0:r0;0:r1;1:r0;1:r1]
+exists (0:r2=0 \/ 1:r2=0)
diff --git a/Documentation/litmus-tests/locking/RM-broken.litmus b/Documentation/litmus-tests/locking/RM-broken.litmus
new file mode 100644
index 000000000000..b7ef30cedfe5
--- /dev/null
+++ b/Documentation/litmus-tests/locking/RM-broken.litmus
@@ -0,0 +1,41 @@
+C RM-broken
+
+(*
+ * Result: DEADLOCK
+ *
+ * This litmus test demonstrates that the old "roach motel" approach
+ * to locking, where code can be freely moved into critical sections,
+ * cannot be used in the Linux kernel.
+ *)
+
+{
+ int x;
+ atomic_t y;
+}
+
+P0(int *x, atomic_t *y, spinlock_t *lck)
+{
+ int r2;
+
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ WRITE_ONCE(*x, 1);
+ spin_unlock(lck);
+}
+
+P1(int *x, atomic_t *y, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ spin_lock(lck);
+ r0 = READ_ONCE(*x);
+ r1 = READ_ONCE(*x);
+ r2 = atomic_inc_return(y);
+ spin_unlock(lck);
+}
+
+locations [x;0:r2;1:r0;1:r1;1:r2]
+filter (1:r0=0 /\ 1:r1=1)
+exists (1:r2=1)
diff --git a/Documentation/litmus-tests/locking/RM-fixed.litmus b/Documentation/litmus-tests/locking/RM-fixed.litmus
new file mode 100644
index 000000000000..b62817559616
--- /dev/null
+++ b/Documentation/litmus-tests/locking/RM-fixed.litmus
@@ -0,0 +1,41 @@
+C RM-fixed
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that the old "roach motel" approach
+ * to locking, where code can be freely moved into critical sections,
+ * cannot be used in the Linux kernel.
+ *)
+
+{
+ int x;
+ atomic_t y;
+}
+
+P0(int *x, atomic_t *y, spinlock_t *lck)
+{
+ int r2;
+
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ WRITE_ONCE(*x, 1);
+ spin_unlock(lck);
+}
+
+P1(int *x, atomic_t *y, spinlock_t *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = READ_ONCE(*x);
+ r1 = READ_ONCE(*x);
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ spin_unlock(lck);
+}
+
+locations [x;0:r2;1:r0;1:r1;1:r2]
+filter (1:r0=0 /\ 1:r1=1)
+exists (1:r2=1)