summaryrefslogtreecommitdiff
path: root/tools/memory-model
diff options
context:
space:
mode:
Diffstat (limited to 'tools/memory-model')
-rw-r--r--tools/memory-model/Documentation/README31
-rw-r--r--tools/memory-model/Documentation/access-marking.txt34
-rw-r--r--tools/memory-model/Documentation/explanation.txt2
-rw-r--r--tools/memory-model/Documentation/glossary.txt32
-rw-r--r--tools/memory-model/Documentation/herd-representation.txt113
-rw-r--r--tools/memory-model/Documentation/locking.txt5
-rw-r--r--tools/memory-model/Documentation/ordering.txt22
-rw-r--r--tools/memory-model/Documentation/recipes.txt4
-rw-r--r--tools/memory-model/Documentation/references.txt3
-rw-r--r--tools/memory-model/Documentation/simple.txt6
-rw-r--r--tools/memory-model/README4
-rw-r--r--tools/memory-model/linux-kernel.bell33
-rw-r--r--tools/memory-model/linux-kernel.cat10
-rw-r--r--tools/memory-model/linux-kernel.cfg1
-rw-r--r--tools/memory-model/linux-kernel.def169
-rw-r--r--tools/memory-model/lock.cat62
16 files changed, 388 insertions, 143 deletions
diff --git a/tools/memory-model/Documentation/README b/tools/memory-model/Documentation/README
index db90a26dbdf4..88870b0bceea 100644
--- a/tools/memory-model/Documentation/README
+++ b/tools/memory-model/Documentation/README
@@ -9,6 +9,8 @@ depending on what you know and what you would like to learn. Please note
that the documents later in this list assume that the reader understands
the material provided by documents earlier in this list.
+If LKMM-specific terms lost you, glossary.txt might help you.
+
o You are new to Linux-kernel concurrency: simple.txt
o You have some background in Linux-kernel concurrency, and would
@@ -21,6 +23,12 @@ o You are familiar with the Linux-kernel concurrency primitives
that you need, and just want to get started with LKMM litmus
tests: litmus-tests.txt
+o You need to locklessly access shared variables that are otherwise
+ protected by a lock: locking.txt
+
+ This locking.txt file expands on the "Locking" section in
+ recipes.txt, but is self-contained.
+
o You are familiar with Linux-kernel concurrency, and would
like a detailed intuitive understanding of LKMM, including
situations involving more than two threads: recipes.txt
@@ -28,12 +36,18 @@ o You are familiar with Linux-kernel concurrency, and would
o You would like a detailed understanding of what your compiler can
and cannot do to control dependencies: control-dependencies.txt
+o You would like to mark concurrent normal accesses to shared
+ variables so that intentional "racy" accesses can be properly
+ documented, especially when you are responding to complaints
+ from KCSAN: access-marking.txt
+
o You are familiar with Linux-kernel concurrency and the use of
LKMM, and would like a quick reference: cheatsheet.txt
o You are familiar with Linux-kernel concurrency and the use
of LKMM, and would like to learn about LKMM's requirements,
- rationale, and implementation: explanation.txt
+ rationale, and implementation: explanation.txt and
+ herd-representation.txt
o You are interested in the publications related to LKMM, including
hardware manuals, academic literature, standards-committee
@@ -47,6 +61,10 @@ DESCRIPTION OF FILES
README
This file.
+access-marking.txt
+ Guidelines for marking intentionally concurrent accesses to
+ shared memory.
+
cheatsheet.txt
Quick-reference guide to the Linux-kernel memory model.
@@ -57,10 +75,21 @@ control-dependencies.txt
explanation.txt
Detailed description of the memory model.
+glossary.txt
+ Brief definitions of LKMM-related terms.
+
+herd-representation.txt
+ The (abstract) representation of the Linux-kernel concurrency
+ primitives in terms of events.
+
litmus-tests.txt
The format, features, capabilities, and limitations of the litmus
tests that LKMM can evaluate.
+locking.txt
+ Rules for accessing lock-protected shared variables outside of
+ their corresponding critical sections.
+
ordering.txt
Overview of the Linux kernel's low-level memory-ordering
primitives by category.
diff --git a/tools/memory-model/Documentation/access-marking.txt b/tools/memory-model/Documentation/access-marking.txt
index 65778222183e..3fbe77fd564a 100644
--- a/tools/memory-model/Documentation/access-marking.txt
+++ b/tools/memory-model/Documentation/access-marking.txt
@@ -6,7 +6,8 @@ normal accesses to shared memory, that is "normal" as in accesses that do
not use read-modify-write atomic operations. It also describes how to
document these accesses, both with comments and with special assertions
processed by the Kernel Concurrency Sanitizer (KCSAN). This discussion
-builds on an earlier LWN article [1].
+builds on an earlier LWN article [1] and Linux Foundation mentorship
+session [2].
ACCESS-MARKING OPTIONS
@@ -24,6 +25,11 @@ The Linux kernel provides the following access-marking options:
4. WRITE_ONCE(), for example, "WRITE_ONCE(a, b);"
The various forms of atomic_set() also fit in here.
+5. __data_racy, for example "int __data_racy a;"
+
+6. KCSAN's negative-marking assertions, ASSERT_EXCLUSIVE_ACCESS()
+ and ASSERT_EXCLUSIVE_WRITER(), are described in the
+ "ACCESS-DOCUMENTATION OPTIONS" section below.
These may be used in combination, as shown in this admittedly improbable
example:
@@ -31,7 +37,7 @@ example:
WRITE_ONCE(a, b + data_race(c + d) + READ_ONCE(e));
Neither plain C-language accesses nor data_race() (#1 and #2 above) place
-any sort of constraint on the compiler's choice of optimizations [2].
+any sort of constraint on the compiler's choice of optimizations [3].
In contrast, READ_ONCE() and WRITE_ONCE() (#3 and #4 above) restrict the
compiler's use of code-motion and common-subexpression optimizations.
Therefore, if a given access is involved in an intentional data race,
@@ -205,6 +211,23 @@ because doing otherwise prevents KCSAN from detecting violations of your
code's synchronization rules.
+Use of __data_racy
+------------------
+
+Adding the __data_racy type qualifier to the declaration of a variable
+causes KCSAN to treat all accesses to that variable as if they were
+enclosed by data_race(). However, __data_racy does not affect the
+compiler, though one could imagine hardened kernel builds treating the
+__data_racy type qualifier as if it was the volatile keyword.
+
+Note well that __data_racy is subject to the same pointer-declaration
+rules as are other type qualifiers such as const and volatile.
+For example:
+
+ int __data_racy *p; // Pointer to data-racy data.
+ int *__data_racy p; // Data-racy pointer to non-data-racy data.
+
+
ACCESS-DOCUMENTATION OPTIONS
============================
@@ -342,7 +365,7 @@ as follows:
Because foo is read locklessly, all accesses are marked. The purpose
of the ASSERT_EXCLUSIVE_WRITER() is to allow KCSAN to check for a buggy
-concurrent lockless write.
+concurrent write, whether marked or not.
Lock-Protected Writes With Heuristic Lockless Reads
@@ -594,5 +617,8 @@ REFERENCES
[1] "Concurrency bugs should fear the big bad data-race detector (part 2)"
https://lwn.net/Articles/816854/
-[2] "Who's afraid of a big bad optimizing compiler?"
+[2] "The Kernel Concurrency Sanitizer"
+ https://www.linuxfoundation.org/webinars/the-kernel-concurrency-sanitizer
+
+[3] "Who's afraid of a big bad optimizing compiler?"
https://lwn.net/Articles/793253/
diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt
index 6dc8b3642458..34aa3172071b 100644
--- a/tools/memory-model/Documentation/explanation.txt
+++ b/tools/memory-model/Documentation/explanation.txt
@@ -1896,7 +1896,7 @@ following respects:
3. The srcu_down_read() and srcu_up_read() primitives work
exactly like srcu_read_lock() and srcu_read_unlock(), except
- that matching calls don't have to execute on the same CPU.
+ that matching calls don't have to execute within the same context.
(The names are meant to be suggestive of operations on
semaphores.) Since the matching is determined by the domain
pointer and index value, these primitives make it possible for
diff --git a/tools/memory-model/Documentation/glossary.txt b/tools/memory-model/Documentation/glossary.txt
index 6f3d16dbf467..7ead94bffa4e 100644
--- a/tools/memory-model/Documentation/glossary.txt
+++ b/tools/memory-model/Documentation/glossary.txt
@@ -15,14 +15,14 @@ Address Dependency: When the address of a later memory access is computed
3 do_something(p->a);
4 rcu_read_unlock();
- In this case, because the address of "p->a" on line 3 is computed
- from the value returned by the rcu_dereference() on line 2, the
- address dependency extends from that rcu_dereference() to that
- "p->a". In rare cases, optimizing compilers can destroy address
- dependencies. Please see Documentation/RCU/rcu_dereference.rst
- for more information.
+ In this case, because the address of "p->a" on line 3 is computed
+ from the value returned by the rcu_dereference() on line 2, the
+ address dependency extends from that rcu_dereference() to that
+ "p->a". In rare cases, optimizing compilers can destroy address
+ dependencies. Please see Documentation/RCU/rcu_dereference.rst
+ for more information.
- See also "Control Dependency" and "Data Dependency".
+ See also "Control Dependency" and "Data Dependency".
Acquire: With respect to a lock, acquiring that lock, for example,
using spin_lock(). With respect to a non-lock shared variable,
@@ -59,12 +59,12 @@ Control Dependency: When a later store's execution depends on a test
1 if (READ_ONCE(x))
2 WRITE_ONCE(y, 1);
- Here, the control dependency extends from the READ_ONCE() on
- line 1 to the WRITE_ONCE() on line 2. Control dependencies are
- fragile, and can be easily destroyed by optimizing compilers.
- Please see control-dependencies.txt for more information.
+ Here, the control dependency extends from the READ_ONCE() on
+ line 1 to the WRITE_ONCE() on line 2. Control dependencies are
+ fragile, and can be easily destroyed by optimizing compilers.
+ Please see control-dependencies.txt for more information.
- See also "Address Dependency" and "Data Dependency".
+ See also "Address Dependency" and "Data Dependency".
Cycle: Memory-barrier pairing is restricted to a pair of CPUs, as the
name suggests. And in a great many cases, a pair of CPUs is all
@@ -72,10 +72,10 @@ Cycle: Memory-barrier pairing is restricted to a pair of CPUs, as the
extended to additional CPUs, and the result is called a "cycle".
In a cycle, each CPU's ordering interacts with that of the next:
- CPU 0 CPU 1 CPU 2
- WRITE_ONCE(x, 1); WRITE_ONCE(y, 1); WRITE_ONCE(z, 1);
- smp_mb(); smp_mb(); smp_mb();
- r0 = READ_ONCE(y); r1 = READ_ONCE(z); r2 = READ_ONCE(x);
+ CPU 0 CPU 1 CPU 2
+ WRITE_ONCE(x, 1); WRITE_ONCE(y, 1); WRITE_ONCE(z, 1);
+ smp_mb(); smp_mb(); smp_mb();
+ r0 = READ_ONCE(y); r1 = READ_ONCE(z); r2 = READ_ONCE(x);
CPU 0's smp_mb() interacts with that of CPU 1, which interacts
with that of CPU 2, which in turn interacts with that of CPU 0
diff --git a/tools/memory-model/Documentation/herd-representation.txt b/tools/memory-model/Documentation/herd-representation.txt
new file mode 100644
index 000000000000..4e19b4f2a476
--- /dev/null
+++ b/tools/memory-model/Documentation/herd-representation.txt
@@ -0,0 +1,113 @@
+#
+# Legend:
+# R, a Load event
+# W, a Store event
+# F, a Fence event
+# LKR, a Lock-Read event
+# LKW, a Lock-Write event
+# UL, an Unlock event
+# LF, a Lock-Fail event
+# RL, a Read-Locked event
+# RU, a Read-Unlocked event
+# R*, a Load event included in RMW
+# W*, a Store event included in RMW
+# SRCU, a Sleepable-Read-Copy-Update event
+#
+# po, a Program-Order link
+# rmw, a Read-Modify-Write link - every rmw link is a po link
+#
+# By convention, a blank line in a cell means "same as the preceding line".
+#
+# Note that the syntactic representation does not always match the sets and
+# relations in linux-kernel.cat, due to redefinitions in linux-kernel.bell and
+# lock.cat. For example, the po link between LKR and LKW is upgraded to an rmw
+# link, and W[ACQUIRE] are not included in the Acquire set.
+#
+# Disclaimer. The table includes representations of "add" and "and" operations;
+# corresponding/identical representations of "sub", "inc", "dec" and "or", "xor",
+# "andnot" operations are omitted.
+#
+ ------------------------------------------------------------------------------
+ | C macro | Events |
+ ------------------------------------------------------------------------------
+ | Non-RMW ops | |
+ ------------------------------------------------------------------------------
+ | READ_ONCE | R[ONCE] |
+ | atomic_read | |
+ | WRITE_ONCE | W[ONCE] |
+ | atomic_set | |
+ | smp_load_acquire | R[ACQUIRE] |
+ | atomic_read_acquire | |
+ | smp_store_release | W[RELEASE] |
+ | atomic_set_release | |
+ | smp_store_mb | W[ONCE] ->po F[MB] |
+ | smp_mb | F[MB] |
+ | smp_rmb | F[rmb] |
+ | smp_wmb | F[wmb] |
+ | smp_mb__before_atomic | F[before-atomic] |
+ | smp_mb__after_atomic | F[after-atomic] |
+ | spin_unlock | UL |
+ | spin_is_locked | On success: RL |
+ | | On failure: RU |
+ | smp_mb__after_spinlock | F[after-spinlock] |
+ | smp_mb__after_unlock_lock | F[after-unlock-lock] |
+ | rcu_read_lock | F[rcu-lock] |
+ | rcu_read_unlock | F[rcu-unlock] |
+ | synchronize_rcu | F[sync-rcu] |
+ | rcu_dereference | R[ONCE] |
+ | rcu_assign_pointer | W[RELEASE] |
+ | srcu_read_lock | R[srcu-lock] |
+ | srcu_down_read | |
+ | srcu_read_unlock | W[srcu-unlock] |
+ | srcu_up_read | |
+ | synchronize_srcu | SRCU[sync-srcu] |
+ | smp_mb__after_srcu_read_unlock | F[after-srcu-read-unlock] |
+ ------------------------------------------------------------------------------
+ | RMW ops w/o return value | |
+ ------------------------------------------------------------------------------
+ | atomic_add | R*[NORETURN] ->rmw W*[NORETURN] |
+ | atomic_and | |
+ | spin_lock | LKR ->po LKW |
+ ------------------------------------------------------------------------------
+ | RMW ops w/ return value | |
+ ------------------------------------------------------------------------------
+ | atomic_add_return | R*[MB] ->rmw W*[MB] |
+ | atomic_fetch_add | |
+ | atomic_fetch_and | |
+ | atomic_xchg | |
+ | xchg | |
+ | atomic_add_negative | |
+ | atomic_add_return_relaxed | R*[ONCE] ->rmw W*[ONCE] |
+ | atomic_fetch_add_relaxed | |
+ | atomic_fetch_and_relaxed | |
+ | atomic_xchg_relaxed | |
+ | xchg_relaxed | |
+ | atomic_add_negative_relaxed | |
+ | atomic_add_return_acquire | R*[ACQUIRE] ->rmw W*[ACQUIRE] |
+ | atomic_fetch_add_acquire | |
+ | atomic_fetch_and_acquire | |
+ | atomic_xchg_acquire | |
+ | xchg_acquire | |
+ | atomic_add_negative_acquire | |
+ | atomic_add_return_release | R*[RELEASE] ->rmw W*[RELEASE] |
+ | atomic_fetch_add_release | |
+ | atomic_fetch_and_release | |
+ | atomic_xchg_release | |
+ | xchg_release | |
+ | atomic_add_negative_release | |
+ ------------------------------------------------------------------------------
+ | Conditional RMW ops | |
+ ------------------------------------------------------------------------------
+ | atomic_cmpxchg | On success: R*[MB] ->rmw W*[MB] |
+ | | On failure: R*[MB] |
+ | cmpxchg | |
+ | atomic_add_unless | |
+ | atomic_cmpxchg_relaxed | On success: R*[ONCE] ->rmw W*[ONCE] |
+ | | On failure: R*[ONCE] |
+ | atomic_cmpxchg_acquire | On success: R*[ACQUIRE] ->rmw W*[ACQUIRE] |
+ | | On failure: R*[ACQUIRE] |
+ | atomic_cmpxchg_release | On success: R*[RELEASE] ->rmw W*[RELEASE] |
+ | | On failure: R*[RELEASE] |
+ | spin_trylock | On success: LKR ->po LKW |
+ | | On failure: LF |
+ ------------------------------------------------------------------------------
diff --git a/tools/memory-model/Documentation/locking.txt b/tools/memory-model/Documentation/locking.txt
index 65c898c64a93..d6dc3cc34ab6 100644
--- a/tools/memory-model/Documentation/locking.txt
+++ b/tools/memory-model/Documentation/locking.txt
@@ -1,3 +1,8 @@
+[!] Note:
+ This file expands on the "Locking" section of recipes.txt,
+ focusing on locklessly accessing shared variables that are
+ otherwise protected by a lock.
+
Locking
=======
diff --git a/tools/memory-model/Documentation/ordering.txt b/tools/memory-model/Documentation/ordering.txt
index 9b0949d3f5ec..7ab3744929d8 100644
--- a/tools/memory-model/Documentation/ordering.txt
+++ b/tools/memory-model/Documentation/ordering.txt
@@ -223,7 +223,7 @@ The Linux kernel's compiler barrier is barrier(). This primitive
prohibits compiler code-motion optimizations that might move memory
references across the point in the code containing the barrier(), but
does not constrain hardware memory ordering. For example, this can be
-used to prevent to compiler from moving code across an infinite loop:
+used to prevent the compiler from moving code across an infinite loop:
WRITE_ONCE(x, 1);
while (dontstop)
@@ -274,7 +274,7 @@ different pieces of the concurrent algorithm. The variable stored to
by the smp_store_release(), in this case "y", will normally be used in
an acquire operation in other parts of the concurrent algorithm.
-To see the performance advantages, suppose that the above example read
+To see the performance advantages, suppose that the above example reads
from "x" instead of writing to it. Then an smp_wmb() could not guarantee
ordering, and an smp_mb() would be needed instead:
@@ -394,17 +394,17 @@ from the value returned by the rcu_dereference() or srcu_dereference()
to that subsequent memory access.
A call to rcu_dereference() for a given RCU-protected pointer is
-usually paired with a call to a call to rcu_assign_pointer() for that
-same pointer in much the same way that a call to smp_load_acquire() is
-paired with a call to smp_store_release(). Calls to rcu_dereference()
-and rcu_assign_pointer are often buried in other APIs, for example,
+usually paired with a call to rcu_assign_pointer() for that same pointer
+in much the same way that a call to smp_load_acquire() is paired with
+a call to smp_store_release(). Calls to rcu_dereference() and
+rcu_assign_pointer() are often buried in other APIs, for example,
the RCU list API members defined in include/linux/rculist.h. For more
information, please see the docbook headers in that file, the most
-recent LWN article on the RCU API (https://lwn.net/Articles/777036/),
+recent LWN article on the RCU API (https://lwn.net/Articles/988638/),
and of course the material in Documentation/RCU.
If the pointer value is manipulated between the rcu_dereference()
-that returned it and a later dereference(), please read
+that returned it and a later rcu_dereference(), please read
Documentation/RCU/rcu_dereference.rst. It can also be quite helpful to
review uses in the Linux kernel.
@@ -457,7 +457,7 @@ described earlier in this document.
These operations come in three categories:
o Marked writes, such as WRITE_ONCE() and atomic_set(). These
- primitives required the compiler to emit the corresponding store
+ primitives require the compiler to emit the corresponding store
instructions in the expected execution order, thus suppressing
a number of destructive optimizations. However, they provide no
hardware ordering guarantees, and in fact many CPUs will happily
@@ -465,7 +465,7 @@ o Marked writes, such as WRITE_ONCE() and atomic_set(). These
operations, unless these operations are to the same variable.
o Marked reads, such as READ_ONCE() and atomic_read(). These
- primitives required the compiler to emit the corresponding load
+ primitives require the compiler to emit the corresponding load
instructions in the expected execution order, thus suppressing
a number of destructive optimizations. However, they provide no
hardware ordering guarantees, and in fact many CPUs will happily
@@ -506,7 +506,7 @@ of the old value and the new value.
Unmarked C-language accesses are unordered, and are also subject to
any number of compiler optimizations, many of which can break your
-concurrent code. It is possible to used unmarked C-language accesses for
+concurrent code. It is possible to use unmarked C-language accesses for
shared variables that are subject to concurrent access, but great care
is required on an ongoing basis. The compiler-constraining barrier()
primitive can be helpful, as can the various ordering primitives discussed
diff --git a/tools/memory-model/Documentation/recipes.txt b/tools/memory-model/Documentation/recipes.txt
index 03f58b11c252..52115ee5f393 100644
--- a/tools/memory-model/Documentation/recipes.txt
+++ b/tools/memory-model/Documentation/recipes.txt
@@ -61,6 +61,10 @@ usual) some things to be careful of:
Locking
-------
+[!] Note:
+ locking.txt expands on this section, providing more detail on
+ locklessly accessing lock-protected shared variables.
+
Locking is well-known and straightforward, at least if you don't think
about it too hard. And the basic rule is indeed quite simple: Any CPU that
has acquired a given lock sees any changes previously seen or made by any
diff --git a/tools/memory-model/Documentation/references.txt b/tools/memory-model/Documentation/references.txt
index c5fdfd19df24..d691390620b3 100644
--- a/tools/memory-model/Documentation/references.txt
+++ b/tools/memory-model/Documentation/references.txt
@@ -46,8 +46,7 @@ o ARM Ltd. (Ed.). 2014. "ARM Architecture Reference Manual (ARMv8,
o Imagination Technologies, LTD. 2015. "MIPS(R) Architecture
For Programmers, Volume II-A: The MIPS64(R) Instruction,
- Set Reference Manual". Imagination Technologies,
- LTD. https://imgtec.com/?do-download=4302.
+ Set Reference Manual". Imagination Technologies, LTD.
o Shaked Flur, Kathryn E. Gray, Christopher Pulte, Susmit
Sarkar, Ali Sezgin, Luc Maranget, Will Deacon, and Peter
diff --git a/tools/memory-model/Documentation/simple.txt b/tools/memory-model/Documentation/simple.txt
index 4c789ec8334f..2df148630cdc 100644
--- a/tools/memory-model/Documentation/simple.txt
+++ b/tools/memory-model/Documentation/simple.txt
@@ -134,7 +134,7 @@ Packaged primitives: Sequence locking
Lockless programming is considered by many to be more difficult than
lock-based programming, but there are a few lockless design patterns that
have been built out into an API. One of these APIs is sequence locking.
-Although this APIs can be used in extremely complex ways, there are simple
+Although this API can be used in extremely complex ways, there are simple
and effective ways of using it that avoid the need to pay attention to
memory ordering.
@@ -205,7 +205,7 @@ If you want to keep things simple, use the initialization and read-out
operations from the previous section only when there are no racing
accesses. Otherwise, use only fully ordered operations when accessing
or modifying the variable. This approach guarantees that code prior
-to a given access to that variable will be seen by all CPUs has having
+to a given access to that variable will be seen by all CPUs as having
happened before any code following any later access to that same variable.
Please note that per-CPU functions are not atomic operations and
@@ -266,5 +266,5 @@ More complex use cases
======================
If the alternatives above do not do what you need, please look at the
-recipes-pairs.txt file to peel off the next layer of the memory-ordering
+recipes.txt file to peel off the next layer of the memory-ordering
onion.
diff --git a/tools/memory-model/README b/tools/memory-model/README
index dab38904206a..64c860863aa9 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -20,7 +20,7 @@ that litmus test to be exercised within the Linux kernel.
REQUIREMENTS
============
-Version 7.52 or higher of the "herd7" and "klitmus7" tools must be
+Version 7.58 or higher of the "herd7" and "klitmus7" tools must be
downloaded separately:
https://github.com/herd/herdtools7
@@ -79,7 +79,7 @@ Several thousand more example litmus tests are available here:
https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/herd
https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/litmus
-Documentation describing litmus tests and now to use them may be found
+Documentation describing litmus tests and how to use them may be found
here:
tools/memory-model/Documentation/litmus-tests.txt
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index ce068700939c..fe65998002b9 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -13,17 +13,18 @@
"Linux-kernel memory consistency model"
-enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) ||
- 'release (*smp_store_release*) ||
- 'acquire (*smp_load_acquire*) ||
- 'noreturn (* R of non-return RMW *)
-instructions R[{'once,'acquire,'noreturn}]
-instructions W[{'once,'release}]
-instructions RMW[{'once,'acquire,'release}]
+enum Accesses = 'ONCE (*READ_ONCE,WRITE_ONCE*) ||
+ 'RELEASE (*smp_store_release*) ||
+ 'ACQUIRE (*smp_load_acquire*) ||
+ 'NORETURN (* R of non-return RMW *) ||
+ 'MB (*xchg(),cmpxchg(),...*)
+instructions R[Accesses]
+instructions W[Accesses]
+instructions RMW[Accesses]
enum Barriers = 'wmb (*smp_wmb*) ||
'rmb (*smp_rmb*) ||
- 'mb (*smp_mb*) ||
+ 'MB (*smp_mb*) ||
'barrier (*barrier*) ||
'rcu-lock (*rcu_read_lock*) ||
'rcu-unlock (*rcu_read_unlock*) ||
@@ -35,6 +36,17 @@ enum Barriers = 'wmb (*smp_wmb*) ||
'after-srcu-read-unlock (*smp_mb__after_srcu_read_unlock*)
instructions F[Barriers]
+
+(*
+ * Filter out syntactic annotations that do not provide the corresponding
+ * semantic ordering, such as Acquire on a store or Mb on a failed RMW.
+ *)
+let FailedRMW = RMW \ (domain(rmw) | range(rmw))
+let Acquire = ACQUIRE \ W \ FailedRMW
+let Release = RELEASE \ R \ FailedRMW
+let Mb = MB \ FailedRMW
+let Noreturn = NORETURN \ W
+
(* SRCU *)
enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu
instructions SRCU[SRCU]
@@ -73,7 +85,7 @@ flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
flag ~empty different-values(srcu-rscs) as srcu-bad-value-match
(* Compute marked and plain memory accesses *)
-let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) |
+let Marked = (~M) | IW | ONCE | RELEASE | ACQUIRE | MB | RMW |
LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock
let Plain = M \ Marked
@@ -82,3 +94,6 @@ let carry-dep = (data ; [~ Srcu-unlock] ; rfi)*
let addr = carry-dep ; addr
let ctrl = carry-dep ; ctrl
let data = carry-dep ; data
+
+flag ~empty (if "lkmmv2" then 0 else _)
+ as this-model-requires-variant-higher-than-lkmmv1
diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
index adf3c4f41229..d7e7bf13c831 100644
--- a/tools/memory-model/linux-kernel.cat
+++ b/tools/memory-model/linux-kernel.cat
@@ -34,6 +34,16 @@ let R4rmb = R \ Noreturn (* Reads for which rmb works *)
let rmb = [R4rmb] ; fencerel(Rmb) ; [R4rmb]
let wmb = [W] ; fencerel(Wmb) ; [W]
let mb = ([M] ; fencerel(Mb) ; [M]) |
+ (*
+ * full-barrier RMWs (successful cmpxchg(), xchg(), etc.) act as
+ * though there were enclosed by smp_mb().
+ * The effect of these virtual smp_mb() is formalized by adding
+ * Mb tags to the read and write of the operation, and providing
+ * the same ordering as though there were additional po edges
+ * between the Mb tag and the read resp. write.
+ *)
+ ([M] ; po ; [Mb & R]) |
+ ([Mb & W] ; po ; [M]) |
([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) |
([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) |
([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
diff --git a/tools/memory-model/linux-kernel.cfg b/tools/memory-model/linux-kernel.cfg
index 3c8098e99f41..69b04f3aad73 100644
--- a/tools/memory-model/linux-kernel.cfg
+++ b/tools/memory-model/linux-kernel.cfg
@@ -1,6 +1,7 @@
macros linux-kernel.def
bell linux-kernel.bell
model linux-kernel.cat
+variant lkmmv2
graph columns
squished true
showevents noregs
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index 88a39601f525..49e402782e49 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -6,18 +6,18 @@
// which appeared in ASPLOS 2018.
// ONCE
-READ_ONCE(X) __load{once}(X)
-WRITE_ONCE(X,V) { __store{once}(X,V); }
+READ_ONCE(X) __load{ONCE}(X)
+WRITE_ONCE(X,V) { __store{ONCE}(X,V); }
// Release Acquire and friends
-smp_store_release(X,V) { __store{release}(*X,V); }
-smp_load_acquire(X) __load{acquire}(*X)
-rcu_assign_pointer(X,V) { __store{release}(X,V); }
-rcu_dereference(X) __load{once}(X)
-smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
+smp_store_release(X,V) { __store{RELEASE}(*X,V); }
+smp_load_acquire(X) __load{ACQUIRE}(*X)
+rcu_assign_pointer(X,V) { __store{RELEASE}(X,V); }
+rcu_dereference(X) __load{ONCE}(X)
+smp_store_mb(X,V) { __store{ONCE}(X,V); __fence{MB}; }
// Fences
-smp_mb() { __fence{mb}; }
+smp_mb() { __fence{MB}; }
smp_rmb() { __fence{rmb}; }
smp_wmb() { __fence{wmb}; }
smp_mb__before_atomic() { __fence{before-atomic}; }
@@ -28,14 +28,14 @@ smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; }
barrier() { __fence{barrier}; }
// Exchange
-xchg(X,V) __xchg{mb}(X,V)
-xchg_relaxed(X,V) __xchg{once}(X,V)
-xchg_release(X,V) __xchg{release}(X,V)
-xchg_acquire(X,V) __xchg{acquire}(X,V)
-cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
-cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
-cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
-cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
+xchg(X,V) __xchg{MB}(X,V)
+xchg_relaxed(X,V) __xchg{ONCE}(X,V)
+xchg_release(X,V) __xchg{RELEASE}(X,V)
+xchg_acquire(X,V) __xchg{ACQUIRE}(X,V)
+cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W)
+cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W)
+cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W)
+cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W)
// Spinlocks
spin_lock(X) { __lock(X); }
@@ -63,57 +63,86 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); }
atomic_read_acquire(X) smp_load_acquire(X)
atomic_set_release(X,V) { smp_store_release(X,V); }
-atomic_add(V,X) { __atomic_op(X,+,V); }
-atomic_sub(V,X) { __atomic_op(X,-,V); }
-atomic_inc(X) { __atomic_op(X,+,1); }
-atomic_dec(X) { __atomic_op(X,-,1); }
-
-atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
-atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
-atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
-atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
-atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
-atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
-atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
-atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
-
-atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
-atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
-atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
-atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
-atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
-atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
-atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
-atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
-
-atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
-atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
-atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
-atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
-atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
-atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
-atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
-atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
-
-atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
-atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
-atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
-atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
-atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
-atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
-atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
-atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
-
-atomic_xchg(X,V) __xchg{mb}(X,V)
-atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
-atomic_xchg_release(X,V) __xchg{release}(X,V)
-atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
-atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
-atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
-atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
-atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
-
-atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
-atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0
-atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0
-atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0
+atomic_add(V,X) { __atomic_op{NORETURN}(X,+,V); }
+atomic_sub(V,X) { __atomic_op{NORETURN}(X,-,V); }
+atomic_and(V,X) { __atomic_op{NORETURN}(X,&,V); }
+atomic_or(V,X) { __atomic_op{NORETURN}(X,|,V); }
+atomic_xor(V,X) { __atomic_op{NORETURN}(X,^,V); }
+atomic_inc(X) { __atomic_op{NORETURN}(X,+,1); }
+atomic_dec(X) { __atomic_op{NORETURN}(X,-,1); }
+atomic_andnot(V,X) { __atomic_op{NORETURN}(X,&~,V); }
+
+atomic_add_return(V,X) __atomic_op_return{MB}(X,+,V)
+atomic_add_return_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V)
+atomic_add_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V)
+atomic_add_return_release(V,X) __atomic_op_return{RELEASE}(X,+,V)
+atomic_fetch_add(V,X) __atomic_fetch_op{MB}(X,+,V)
+atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{ONCE}(X,+,V)
+atomic_fetch_add_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,+,V)
+atomic_fetch_add_release(V,X) __atomic_fetch_op{RELEASE}(X,+,V)
+
+atomic_fetch_and(V,X) __atomic_fetch_op{MB}(X,&,V)
+atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&,V)
+atomic_fetch_and_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&,V)
+atomic_fetch_and_release(V,X) __atomic_fetch_op{RELEASE}(X,&,V)
+
+atomic_fetch_or(V,X) __atomic_fetch_op{MB}(X,|,V)
+atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{ONCE}(X,|,V)
+atomic_fetch_or_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,|,V)
+atomic_fetch_or_release(V,X) __atomic_fetch_op{RELEASE}(X,|,V)
+
+atomic_fetch_xor(V,X) __atomic_fetch_op{MB}(X,^,V)
+atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{ONCE}(X,^,V)
+atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,^,V)
+atomic_fetch_xor_release(V,X) __atomic_fetch_op{RELEASE}(X,^,V)
+
+atomic_inc_return(X) __atomic_op_return{MB}(X,+,1)
+atomic_inc_return_relaxed(X) __atomic_op_return{ONCE}(X,+,1)
+atomic_inc_return_acquire(X) __atomic_op_return{ACQUIRE}(X,+,1)
+atomic_inc_return_release(X) __atomic_op_return{RELEASE}(X,+,1)
+atomic_fetch_inc(X) __atomic_fetch_op{MB}(X,+,1)
+atomic_fetch_inc_relaxed(X) __atomic_fetch_op{ONCE}(X,+,1)
+atomic_fetch_inc_acquire(X) __atomic_fetch_op{ACQUIRE}(X,+,1)
+atomic_fetch_inc_release(X) __atomic_fetch_op{RELEASE}(X,+,1)
+
+atomic_sub_return(V,X) __atomic_op_return{MB}(X,-,V)
+atomic_sub_return_relaxed(V,X) __atomic_op_return{ONCE}(X,-,V)
+atomic_sub_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,-,V)
+atomic_sub_return_release(V,X) __atomic_op_return{RELEASE}(X,-,V)
+atomic_fetch_sub(V,X) __atomic_fetch_op{MB}(X,-,V)
+atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{ONCE}(X,-,V)
+atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,-,V)
+atomic_fetch_sub_release(V,X) __atomic_fetch_op{RELEASE}(X,-,V)
+
+atomic_dec_return(X) __atomic_op_return{MB}(X,-,1)
+atomic_dec_return_relaxed(X) __atomic_op_return{ONCE}(X,-,1)
+atomic_dec_return_acquire(X) __atomic_op_return{ACQUIRE}(X,-,1)
+atomic_dec_return_release(X) __atomic_op_return{RELEASE}(X,-,1)
+atomic_fetch_dec(X) __atomic_fetch_op{MB}(X,-,1)
+atomic_fetch_dec_relaxed(X) __atomic_fetch_op{ONCE}(X,-,1)
+atomic_fetch_dec_acquire(X) __atomic_fetch_op{ACQUIRE}(X,-,1)
+atomic_fetch_dec_release(X) __atomic_fetch_op{RELEASE}(X,-,1)
+
+atomic_xchg(X,V) __xchg{MB}(X,V)
+atomic_xchg_relaxed(X,V) __xchg{ONCE}(X,V)
+atomic_xchg_release(X,V) __xchg{RELEASE}(X,V)
+atomic_xchg_acquire(X,V) __xchg{ACQUIRE}(X,V)
+atomic_cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W)
+atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W)
+atomic_cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W)
+atomic_cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W)
+
+atomic_sub_and_test(V,X) __atomic_op_return{MB}(X,-,V) == 0
+atomic_dec_and_test(X) __atomic_op_return{MB}(X,-,1) == 0
+atomic_inc_and_test(X) __atomic_op_return{MB}(X,+,1) == 0
+atomic_add_negative(V,X) __atomic_op_return{MB}(X,+,V) < 0
+atomic_add_negative_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) < 0
+atomic_add_negative_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) < 0
+atomic_add_negative_release(V,X) __atomic_op_return{RELEASE}(X,+,V) < 0
+
+atomic_fetch_andnot(V,X) __atomic_fetch_op{MB}(X,&~,V)
+atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&~,V)
+atomic_fetch_andnot_release(V,X) __atomic_fetch_op{RELEASE}(X,&~,V)
+atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&~,V)
+
+atomic_add_unless(X,V,W) __atomic_add_unless{MB}(X,V,W)
diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat
index 53b5a492739d..03c12efed66a 100644
--- a/tools/memory-model/lock.cat
+++ b/tools/memory-model/lock.cat
@@ -54,6 +54,12 @@ flag ~empty LKR \ domain(lk-rmw) as unpaired-LKR
*)
empty ([LKW] ; po-loc ; [LKR]) \ (po-loc ; [UL] ; po-loc) as lock-nest
+(*
+ * In the same way, spin_is_locked() inside a critical section must always
+ * return True (no RU events can be in a critical section for the same lock).
+ *)
+empty ([LKW] ; po-loc ; [RU]) \ (po-loc ; [UL] ; po-loc) as nested-is-locked
+
(* The final value of a spinlock should not be tested *)
flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final
@@ -79,42 +85,50 @@ empty ([UNMATCHED-LKW] ; loc ; [UNMATCHED-LKW]) \ id as unmatched-locks
(* rfi for LF events: link each LKW to the LF events in its critical section *)
let rfi-lf = ([LKW] ; po-loc ; [LF]) \ ([LKW] ; po-loc ; [UL] ; po-loc)
-(* rfe for LF events *)
+(* Utility macro to convert a single pair to a single-edge relation *)
+let pair-to-relation p = p ++ 0
+
+(*
+ * If a given LF event e is outside a critical section, it cannot read
+ * internally but it may read from an LKW event in another thread.
+ * Compute the relation containing these possible edges.
+ *)
+let possible-rfe-noncrit-lf e = (LKW * {e}) & loc & ext
+
+(* Compute set of sets of possible rfe edges for LF events *)
let all-possible-rfe-lf =
(*
- * Given an LF event r, compute the possible rfe edges for that event
- * (all those starting from LKW events in other threads),
- * and then convert that relation to a set of single-edge relations.
+ * Convert the possible-rfe-noncrit-lf relation for e
+ * to a set of single edges
*)
- let possible-rfe-lf r =
- let pair-to-relation p = p ++ 0
- in map pair-to-relation ((LKW * {r}) & loc & ext)
- (* Do this for each LF event r that isn't in rfi-lf *)
- in map possible-rfe-lf (LF \ range(rfi-lf))
+ let set-of-singleton-rfe-lf e =
+ map pair-to-relation (possible-rfe-noncrit-lf e)
+ (* Do this for each LF event e that isn't in rfi-lf *)
+ in map set-of-singleton-rfe-lf (LF \ range(rfi-lf))
(* Generate all rf relations for LF events *)
with rfe-lf from cross(all-possible-rfe-lf)
let rf-lf = rfe-lf | rfi-lf
(*
- * RU, i.e., spin_is_locked() returning False, is slightly different.
- * We rely on the memory model to rule out cases where spin_is_locked()
- * within one of the lock's critical sections returns False.
+ * A given RU event e may read internally from the last po-previous UL,
+ * or it may read from a UL event in another thread or the initial write.
+ * Compute the relation containing these possible edges.
*)
-
-(* rfi for RU events: an RU may read from the last po-previous UL *)
-let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc)
-
-(* rfe for RU events: an RU may read from an external UL or the initial write *)
-let all-possible-rfe-ru =
- let possible-rfe-ru r =
- let pair-to-relation p = p ++ 0
- in map pair-to-relation (((UL | IW) * {r}) & loc & ext)
- in map possible-rfe-ru RU
+let possible-rf-ru e = (((UL * {e}) & po-loc) \
+ ([UL] ; po-loc ; [UL] ; po-loc)) |
+ (((UL | IW) * {e}) & loc & ext)
+
+(* Compute set of sets of possible rf edges for RU events *)
+let all-possible-rf-ru =
+ (* Convert the possible-rf-ru relation for e to a set of single edges *)
+ let set-of-singleton-rf-ru e =
+ map pair-to-relation (possible-rf-ru e)
+ (* Do this for each RU event e *)
+ in map set-of-singleton-rf-ru RU
(* Generate all rf relations for RU events *)
-with rfe-ru from cross(all-possible-rfe-ru)
-let rf-ru = rfe-ru | rfi-ru
+with rf-ru from cross(all-possible-rf-ru)
(* Final rf relation *)
let rf = rf | rf-lf | rf-ru