From 12305abe982740878ecdf9e22852652d8d164855 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 30 Nov 2021 12:44:09 +0100 Subject: kcsan: Refactor reading of instrumented memory Factor out the switch statement reading instrumented memory into a helper read_instrumented_memory(). No functional change. Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 51 +++++++++++++++++---------------------------------- 1 file changed, 17 insertions(+), 34 deletions(-) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 4b84c8e7884b..6bfd3040f46b 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -325,6 +325,21 @@ static void delay_access(int type) udelay(delay); } +/* + * Reads the instrumented memory for value change detection; value change + * detection is currently done for accesses up to a size of 8 bytes. + */ +static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size) +{ + switch (size) { + case 1: return READ_ONCE(*(const u8 *)ptr); + case 2: return READ_ONCE(*(const u16 *)ptr); + case 4: return READ_ONCE(*(const u32 *)ptr); + case 8: return READ_ONCE(*(const u64 *)ptr); + default: return 0; /* Ignore; we do not diff the values. */ + } +} + void kcsan_save_irqtrace(struct task_struct *task) { #ifdef CONFIG_TRACE_IRQFLAGS @@ -482,23 +497,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned * Read the current value, to later check and infer a race if the data * was modified via a non-instrumented access, e.g. from a device. */ - old = 0; - switch (size) { - case 1: - old = READ_ONCE(*(const u8 *)ptr); - break; - case 2: - old = READ_ONCE(*(const u16 *)ptr); - break; - case 4: - old = READ_ONCE(*(const u32 *)ptr); - break; - case 8: - old = READ_ONCE(*(const u64 *)ptr); - break; - default: - break; /* ignore; we do not diff the values */ - } + old = read_instrumented_memory(ptr, size); /* * Delay this thread, to increase probability of observing a racy @@ -511,23 +510,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned * racy access. */ access_mask = ctx->access_mask; - new = 0; - switch (size) { - case 1: - new = READ_ONCE(*(const u8 *)ptr); - break; - case 2: - new = READ_ONCE(*(const u16 *)ptr); - break; - case 4: - new = READ_ONCE(*(const u32 *)ptr); - break; - case 8: - new = READ_ONCE(*(const u64 *)ptr); - break; - default: - break; /* ignore; we do not diff the values */ - } + new = read_instrumented_memory(ptr, size); diff = old ^ new; if (access_mask) -- cgit From 71f8de7092cb2cf95e3f7055df139118d1445597 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 30 Nov 2021 12:44:10 +0100 Subject: kcsan: Remove redundant zero-initialization of globals They are implicitly zero-initialized, remove explicit initialization. It keeps the upcoming additions to kcsan_ctx consistent with the rest. No functional change intended. Signed-off-by: Marco Elver Acked-by: Mark Rutland Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 6bfd3040f46b..e34a1710b7bc 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -44,11 +44,6 @@ bool kcsan_enabled; /* Per-CPU kcsan_ctx for interrupts */ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = { - .disable_count = 0, - .atomic_next = 0, - .atomic_nest_count = 0, - .in_flat_atomic = false, - .access_mask = 0, .scoped_accesses = {LIST_POISON1, NULL}, }; -- cgit From 9756f64c8f2d19c0029a5827bda8ac275302ec22 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 30 Nov 2021 12:44:11 +0100 Subject: kcsan: Avoid checking scoped accesses from nested contexts Avoid checking scoped accesses from nested contexts (such as nested interrupts or in scheduler code) which share the same kcsan_ctx. This is to avoid detecting false positive races of accesses in the same thread with currently scoped accesses: consider setting up a watchpoint for a non-scoped (normal) access that also "conflicts" with a current scoped access. In a nested interrupt (or in the scheduler), which shares the same kcsan_ctx, we cannot check scoped accesses set up in the parent context -- simply ignore them in this case. With the introduction of kcsan_ctx::disable_scoped, we can also clean up kcsan_check_scoped_accesses()'s recursion guard, and do not need to modify the list's prev pointer. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index e34a1710b7bc..bd359f8ee63a 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -204,15 +204,17 @@ check_access(const volatile void *ptr, size_t size, int type, unsigned long ip); static noinline void kcsan_check_scoped_accesses(void) { struct kcsan_ctx *ctx = get_ctx(); - struct list_head *prev_save = ctx->scoped_accesses.prev; struct kcsan_scoped_access *scoped_access; - ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */ + if (ctx->disable_scoped) + return; + + ctx->disable_scoped++; list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) { check_access(scoped_access->ptr, scoped_access->size, scoped_access->type, scoped_access->ip); } - ctx->scoped_accesses.prev = prev_save; + ctx->disable_scoped--; } /* Rules for generic atomic accesses. Called from fast-path. */ @@ -465,6 +467,15 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned goto out; } + /* + * Avoid races of scoped accesses from nested interrupts (or scheduler). + * Assume setting up a watchpoint for a non-scoped (normal) access that + * also conflicts with a current scoped access. In a nested interrupt, + * which shares the context, it would check a conflicting scoped access. + * To avoid, disable scoped access checking. + */ + ctx->disable_scoped++; + /* * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's * runtime is entered for every memory access, and potentially useful @@ -578,6 +589,7 @@ out_unlock: if (!kcsan_interrupt_watcher) local_irq_restore(irq_flags); kcsan_restore_irqtrace(current); + ctx->disable_scoped--; out: user_access_restore(ua_flags); } -- cgit From 69562e4983d93e2791c0bf128b07462afbd7f4dc Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 5 Aug 2021 14:57:45 +0200 Subject: kcsan: Add core support for a subset of weak memory modeling Add support for modeling a subset of weak memory, which will enable detection of a subset of data races due to missing memory barriers. KCSAN's approach to detecting missing memory barriers is based on modeling access reordering, and enabled if `CONFIG_KCSAN_WEAK_MEMORY=y`, which depends on `CONFIG_KCSAN_STRICT=y`. The feature can be enabled or disabled at boot and runtime via the `kcsan.weak_memory` boot parameter. Each memory access for which a watchpoint is set up, is also selected for simulated reordering within the scope of its function (at most 1 in-flight access). We are limited to modeling the effects of "buffering" (delaying the access), since the runtime cannot "prefetch" accesses (therefore no acquire modeling). Once an access has been selected for reordering, it is checked along every other access until the end of the function scope. If an appropriate memory barrier is encountered, the access will no longer be considered for reordering. When the result of a memory operation should be ordered by a barrier, KCSAN can then detect data races where the conflict only occurs as a result of a missing barrier due to reordering accesses. Suggested-by: Dmitry Vyukov Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 202 ++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 187 insertions(+), 15 deletions(-) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index bd359f8ee63a..481f8a524089 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -40,6 +40,13 @@ module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644); module_param_named(skip_watch, kcsan_skip_watch, long, 0644); module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444); +#ifdef CONFIG_KCSAN_WEAK_MEMORY +static bool kcsan_weak_memory = true; +module_param_named(weak_memory, kcsan_weak_memory, bool, 0644); +#else +#define kcsan_weak_memory false +#endif + bool kcsan_enabled; /* Per-CPU kcsan_ctx for interrupts */ @@ -351,6 +358,67 @@ void kcsan_restore_irqtrace(struct task_struct *task) #endif } +static __always_inline int get_kcsan_stack_depth(void) +{ +#ifdef CONFIG_KCSAN_WEAK_MEMORY + return current->kcsan_stack_depth; +#else + BUILD_BUG(); + return 0; +#endif +} + +static __always_inline void add_kcsan_stack_depth(int val) +{ +#ifdef CONFIG_KCSAN_WEAK_MEMORY + current->kcsan_stack_depth += val; +#else + BUILD_BUG(); +#endif +} + +static __always_inline struct kcsan_scoped_access *get_reorder_access(struct kcsan_ctx *ctx) +{ +#ifdef CONFIG_KCSAN_WEAK_MEMORY + return ctx->disable_scoped ? NULL : &ctx->reorder_access; +#else + return NULL; +#endif +} + +static __always_inline bool +find_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, + int type, unsigned long ip) +{ + struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx); + + if (!reorder_access) + return false; + + /* + * Note: If accesses are repeated while reorder_access is identical, + * never matches the new access, because !(type & KCSAN_ACCESS_SCOPED). + */ + return reorder_access->ptr == ptr && reorder_access->size == size && + reorder_access->type == type && reorder_access->ip == ip; +} + +static inline void +set_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, + int type, unsigned long ip) +{ + struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx); + + if (!reorder_access || !kcsan_weak_memory) + return; + + reorder_access->ptr = ptr; + reorder_access->size = size; + reorder_access->type = type | KCSAN_ACCESS_SCOPED; + reorder_access->ip = ip; + reorder_access->stack_depth = get_kcsan_stack_depth(); +} + /* * Pull everything together: check_access() below contains the performance * critical operations; the fast-path (including check_access) functions should @@ -389,8 +457,10 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, * The access_mask check relies on value-change comparison. To avoid * reporting a race where e.g. the writer set up the watchpoint, but the * reader has access_mask!=0, we have to ignore the found watchpoint. + * + * reorder_access is never created from an access with access_mask set. */ - if (ctx->access_mask) + if (ctx->access_mask && !find_reorder_access(ctx, ptr, size, type, ip)) return; /* @@ -440,11 +510,13 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0; atomic_long_t *watchpoint; u64 old, new, diff; - unsigned long access_mask; enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE; + bool interrupt_watcher = kcsan_interrupt_watcher; unsigned long ua_flags = user_access_save(); struct kcsan_ctx *ctx = get_ctx(); + unsigned long access_mask = ctx->access_mask; unsigned long irq_flags = 0; + bool is_reorder_access; /* * Always reset kcsan_skip counter in slow-path to avoid underflow; see @@ -467,6 +539,17 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned goto out; } + /* + * The local CPU cannot observe reordering of its own accesses, and + * therefore we need to take care of 2 cases to avoid false positives: + * + * 1. Races of the reordered access with interrupts. To avoid, if + * the current access is reorder_access, disable interrupts. + * 2. Avoid races of scoped accesses from nested interrupts (below). + */ + is_reorder_access = find_reorder_access(ctx, ptr, size, type, ip); + if (is_reorder_access) + interrupt_watcher = false; /* * Avoid races of scoped accesses from nested interrupts (or scheduler). * Assume setting up a watchpoint for a non-scoped (normal) access that @@ -482,7 +565,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned * information is lost if dirtied by KCSAN. */ kcsan_save_irqtrace(current); - if (!kcsan_interrupt_watcher) + if (!interrupt_watcher) local_irq_save(irq_flags); watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write); @@ -503,7 +586,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned * Read the current value, to later check and infer a race if the data * was modified via a non-instrumented access, e.g. from a device. */ - old = read_instrumented_memory(ptr, size); + old = is_reorder_access ? 0 : read_instrumented_memory(ptr, size); /* * Delay this thread, to increase probability of observing a racy @@ -515,8 +598,17 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned * Re-read value, and check if it is as expected; if not, we infer a * racy access. */ - access_mask = ctx->access_mask; - new = read_instrumented_memory(ptr, size); + if (!is_reorder_access) { + new = read_instrumented_memory(ptr, size); + } else { + /* + * Reordered accesses cannot be used for value change detection, + * because the memory location may no longer be accessible and + * could result in a fault. + */ + new = 0; + access_mask = 0; + } diff = old ^ new; if (access_mask) @@ -585,11 +677,20 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned */ remove_watchpoint(watchpoint); atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]); + out_unlock: - if (!kcsan_interrupt_watcher) + if (!interrupt_watcher) local_irq_restore(irq_flags); kcsan_restore_irqtrace(current); ctx->disable_scoped--; + + /* + * Reordered accesses cannot be used for value change detection, + * therefore never consider for reordering if access_mask is set. + * ASSERT_EXCLUSIVE are not real accesses, ignore them as well. + */ + if (!access_mask && !is_assert) + set_reorder_access(ctx, ptr, size, type, ip); out: user_access_restore(ua_flags); } @@ -597,7 +698,6 @@ out: static __always_inline void check_access(const volatile void *ptr, size_t size, int type, unsigned long ip) { - const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0; atomic_long_t *watchpoint; long encoded_watchpoint; @@ -608,12 +708,14 @@ check_access(const volatile void *ptr, size_t size, int type, unsigned long ip) if (unlikely(size == 0)) return; +again: /* * Avoid user_access_save in fast-path: find_watchpoint is safe without * user_access_save, as the address that ptr points to is only used to * check if a watchpoint exists; ptr is never dereferenced. */ - watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write, + watchpoint = find_watchpoint((unsigned long)ptr, size, + !(type & KCSAN_ACCESS_WRITE), &encoded_watchpoint); /* * It is safe to check kcsan_is_enabled() after find_watchpoint in the @@ -627,9 +729,42 @@ check_access(const volatile void *ptr, size_t size, int type, unsigned long ip) else { struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */ - if (unlikely(should_watch(ctx, ptr, size, type))) + if (unlikely(should_watch(ctx, ptr, size, type))) { kcsan_setup_watchpoint(ptr, size, type, ip); - else if (unlikely(ctx->scoped_accesses.prev)) + return; + } + + if (!(type & KCSAN_ACCESS_SCOPED)) { + struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx); + + if (reorder_access) { + /* + * reorder_access check: simulates reordering of + * the access after subsequent operations. + */ + ptr = reorder_access->ptr; + type = reorder_access->type; + ip = reorder_access->ip; + /* + * Upon a nested interrupt, this context's + * reorder_access can be modified (shared ctx). + * We know that upon return, reorder_access is + * always invalidated by setting size to 0 via + * __tsan_func_exit(). Therefore we must read + * and check size after the other fields. + */ + barrier(); + size = READ_ONCE(reorder_access->size); + if (size) + goto again; + } + } + + /* + * Always checked last, right before returning from runtime; + * if reorder_access is valid, checked after it was checked. + */ + if (unlikely(ctx->scoped_accesses.prev)) kcsan_check_scoped_accesses(); } } @@ -916,19 +1051,56 @@ DEFINE_TSAN_VOLATILE_READ_WRITE(8); DEFINE_TSAN_VOLATILE_READ_WRITE(16); /* - * The below are not required by KCSAN, but can still be emitted by the - * compiler. + * Function entry and exit are used to determine the validty of reorder_access. + * Reordering of the access ends at the end of the function scope where the + * access happened. This is done for two reasons: + * + * 1. Artificially limits the scope where missing barriers are detected. + * This minimizes false positives due to uninstrumented functions that + * contain the required barriers but were missed. + * + * 2. Simplifies generating the stack trace of the access. */ void __tsan_func_entry(void *call_pc); -void __tsan_func_entry(void *call_pc) +noinline void __tsan_func_entry(void *call_pc) { + if (!IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY)) + return; + + add_kcsan_stack_depth(1); } EXPORT_SYMBOL(__tsan_func_entry); + void __tsan_func_exit(void); -void __tsan_func_exit(void) +noinline void __tsan_func_exit(void) { + struct kcsan_scoped_access *reorder_access; + + if (!IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY)) + return; + + reorder_access = get_reorder_access(get_ctx()); + if (!reorder_access) + goto out; + + if (get_kcsan_stack_depth() <= reorder_access->stack_depth) { + /* + * Access check to catch cases where write without a barrier + * (supposed release) was last access in function: because + * instrumentation is inserted before the real access, a data + * race due to the write giving up a c-s would only be caught if + * we do the conflicting access after. + */ + check_access(reorder_access->ptr, reorder_access->size, + reorder_access->type, reorder_access->ip); + reorder_access->size = 0; + reorder_access->stack_depth = INT_MIN; + } +out: + add_kcsan_stack_depth(-1); } EXPORT_SYMBOL(__tsan_func_exit); + void __tsan_init(void); void __tsan_init(void) { -- cgit From 0b8b0830ac1419d7250fde31ea78793a03f3db44 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 30 Nov 2021 12:44:13 +0100 Subject: kcsan: Add core memory barrier instrumentation functions Add the core memory barrier instrumentation functions. These invalidate the current in-flight reordered access based on the rules for the respective barrier types and in-flight access type. To obtain barrier instrumentation that can be disabled via __no_kcsan with appropriate compiler-support (and not just with objtool help), barrier instrumentation repurposes __atomic_signal_fence(), instead of inserting explicit calls. Crucially, __atomic_signal_fence() normally does not map to any real instructions, but is still intercepted by fsanitize=thread. As a result, like any other instrumentation done by the compiler, barrier instrumentation can be disabled with __no_kcsan. Unfortunately Clang and GCC currently differ in their __no_kcsan aka __no_sanitize_thread behaviour with respect to builtin atomics (and __tsan_func_{entry,exit}) instrumentation. This is already reflected in Kconfig.kcsan's dependencies for KCSAN_WEAK_MEMORY. A later change will introduce support for newer versions of Clang that can implement __no_kcsan to also remove the additional instrumentation introduced by KCSAN_WEAK_MEMORY. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 481f8a524089..916060913966 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -939,6 +939,22 @@ void __kcsan_check_access(const volatile void *ptr, size_t size, int type) } EXPORT_SYMBOL(__kcsan_check_access); +#define DEFINE_MEMORY_BARRIER(name, order_before_cond) \ + void __kcsan_##name(void) \ + { \ + struct kcsan_scoped_access *sa = get_reorder_access(get_ctx()); \ + if (!sa) \ + return; \ + if (order_before_cond) \ + sa->size = 0; \ + } \ + EXPORT_SYMBOL(__kcsan_##name) + +DEFINE_MEMORY_BARRIER(mb, true); +DEFINE_MEMORY_BARRIER(wmb, sa->type & (KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)); +DEFINE_MEMORY_BARRIER(rmb, !(sa->type & KCSAN_ACCESS_WRITE) || (sa->type & KCSAN_ACCESS_COMPOUND)); +DEFINE_MEMORY_BARRIER(release, true); + /* * KCSAN uses the same instrumentation that is emitted by supported compilers * for ThreadSanitizer (TSAN). @@ -1123,10 +1139,19 @@ EXPORT_SYMBOL(__tsan_init); * functions, whose job is to also execute the operation itself. */ +static __always_inline void kcsan_atomic_builtin_memorder(int memorder) +{ + if (memorder == __ATOMIC_RELEASE || + memorder == __ATOMIC_SEQ_CST || + memorder == __ATOMIC_ACQ_REL) + __kcsan_release(); +} + #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \ u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \ u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \ { \ + kcsan_atomic_builtin_memorder(memorder); \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \ } \ @@ -1136,6 +1161,7 @@ EXPORT_SYMBOL(__tsan_init); void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \ void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \ { \ + kcsan_atomic_builtin_memorder(memorder); \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \ @@ -1148,6 +1174,7 @@ EXPORT_SYMBOL(__tsan_init); u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \ u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \ { \ + kcsan_atomic_builtin_memorder(memorder); \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ @@ -1180,6 +1207,7 @@ EXPORT_SYMBOL(__tsan_init); int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \ u##bits val, int mo, int fail_mo) \ { \ + kcsan_atomic_builtin_memorder(mo); \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ @@ -1195,6 +1223,7 @@ EXPORT_SYMBOL(__tsan_init); u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \ int mo, int fail_mo) \ { \ + kcsan_atomic_builtin_memorder(mo); \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ @@ -1226,10 +1255,47 @@ DEFINE_TSAN_ATOMIC_OPS(64); void __tsan_atomic_thread_fence(int memorder); void __tsan_atomic_thread_fence(int memorder) { + kcsan_atomic_builtin_memorder(memorder); __atomic_thread_fence(memorder); } EXPORT_SYMBOL(__tsan_atomic_thread_fence); +/* + * In instrumented files, we emit instrumentation for barriers by mapping the + * kernel barriers to an __atomic_signal_fence(), which is interpreted specially + * and otherwise has no relation to a real __atomic_signal_fence(). No known + * kernel code uses __atomic_signal_fence(). + * + * Since fsanitize=thread instrumentation handles __atomic_signal_fence(), which + * are turned into calls to __tsan_atomic_signal_fence(), such instrumentation + * can be disabled via the __no_kcsan function attribute (vs. an explicit call + * which could not). When __no_kcsan is requested, __atomic_signal_fence() + * generates no code. + * + * Note: The result of using __atomic_signal_fence() with KCSAN enabled is + * potentially limiting the compiler's ability to reorder operations; however, + * if barriers were instrumented with explicit calls (without LTO), the compiler + * couldn't optimize much anyway. The result of a hypothetical architecture + * using __atomic_signal_fence() in normal code would be KCSAN false negatives. + */ void __tsan_atomic_signal_fence(int memorder); -void __tsan_atomic_signal_fence(int memorder) { } +noinline void __tsan_atomic_signal_fence(int memorder) +{ + switch (memorder) { + case __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb: + __kcsan_mb(); + break; + case __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb: + __kcsan_wmb(); + break; + case __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb: + __kcsan_rmb(); + break; + case __KCSAN_BARRIER_TO_SIGNAL_FENCE_release: + __kcsan_release(); + break; + default: + break; + } +} EXPORT_SYMBOL(__tsan_atomic_signal_fence); -- cgit From 3cc21a531252e6693ee989231d5abee6812cfd71 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 30 Nov 2021 12:44:15 +0100 Subject: kcsan: Call scoped accesses reordered in reports The scoping of an access simply denotes the scope in which it may be reordered. However, in reports, it'll be less confusing to say the access is "reordered". This is more accurate when the race occurred. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/kcsan_test.c | 4 ++-- kernel/kcsan/report.c | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index 660729238588..6e3c2b8bc608 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -213,9 +213,9 @@ static bool report_matches(const struct expect_report *r) const bool is_atomic = (ty & KCSAN_ACCESS_ATOMIC); const bool is_scoped = (ty & KCSAN_ACCESS_SCOPED); const char *const access_type_aux = - (is_atomic && is_scoped) ? " (marked, scoped)" + (is_atomic && is_scoped) ? " (marked, reordered)" : (is_atomic ? " (marked)" - : (is_scoped ? " (scoped)" : "")); + : (is_scoped ? " (reordered)" : "")); if (i == 1) { /* Access 2 */ diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index fc15077991c4..1b0e050bdf6a 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -215,9 +215,9 @@ static const char *get_access_type(int type) if (type & KCSAN_ACCESS_ASSERT) { if (type & KCSAN_ACCESS_SCOPED) { if (type & KCSAN_ACCESS_WRITE) - return "assert no accesses (scoped)"; + return "assert no accesses (reordered)"; else - return "assert no writes (scoped)"; + return "assert no writes (reordered)"; } else { if (type & KCSAN_ACCESS_WRITE) return "assert no accesses"; @@ -240,17 +240,17 @@ static const char *get_access_type(int type) case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: return "read-write (marked)"; case KCSAN_ACCESS_SCOPED: - return "read (scoped)"; + return "read (reordered)"; case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC: - return "read (marked, scoped)"; + return "read (marked, reordered)"; case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE: - return "write (scoped)"; + return "write (reordered)"; case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: - return "write (marked, scoped)"; + return "write (marked, reordered)"; case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE: - return "read-write (scoped)"; + return "read-write (reordered)"; case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: - return "read-write (marked, scoped)"; + return "read-write (marked, reordered)"; default: BUG(); } -- cgit From be3f6967ec5947dd7b2f23bf9d42bb2729889618 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 30 Nov 2021 12:44:16 +0100 Subject: kcsan: Show location access was reordered to Also show the location the access was reordered to. An example report: | ================================================================== | BUG: KCSAN: data-race in test_kernel_wrong_memorder / test_kernel_wrong_memorder | | read-write to 0xffffffffc01e61a8 of 8 bytes by task 2311 on cpu 5: | test_kernel_wrong_memorder+0x57/0x90 | access_thread+0x99/0xe0 | kthread+0x2ba/0x2f0 | ret_from_fork+0x22/0x30 | | read-write (reordered) to 0xffffffffc01e61a8 of 8 bytes by task 2310 on cpu 7: | test_kernel_wrong_memorder+0x57/0x90 | access_thread+0x99/0xe0 | kthread+0x2ba/0x2f0 | ret_from_fork+0x22/0x30 | | | +-> reordered to: test_kernel_wrong_memorder+0x80/0x90 | | Reported by Kernel Concurrency Sanitizer on: | CPU: 7 PID: 2310 Comm: access_thread Not tainted 5.14.0-rc1+ #18 | Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 | ================================================================== Reviewed-by: Boqun Feng Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/report.c | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 1b0e050bdf6a..67794404042a 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -308,10 +308,12 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries /* * Skips to the first entry that matches the function of @ip, and then replaces - * that entry with @ip, returning the entries to skip. + * that entry with @ip, returning the entries to skip with @replaced containing + * the replaced entry. */ static int -replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip) +replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip, + unsigned long *replaced) { unsigned long symbolsize, offset; unsigned long target_func; @@ -330,6 +332,7 @@ replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned lon func -= offset; if (func == target_func) { + *replaced = stack_entries[skip]; stack_entries[skip] = ip; return skip; } @@ -342,9 +345,10 @@ fallback: } static int -sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip) +sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip, + unsigned long *replaced) { - return ip ? replace_stack_entry(stack_entries, num_entries, ip) : + return ip ? replace_stack_entry(stack_entries, num_entries, ip, replaced) : get_stack_skipnr(stack_entries, num_entries); } @@ -360,6 +364,14 @@ static int sym_strcmp(void *addr1, void *addr2) return strncmp(buf1, buf2, sizeof(buf1)); } +static void +print_stack_trace(unsigned long stack_entries[], int num_entries, unsigned long reordered_to) +{ + stack_trace_print(stack_entries, num_entries, 0); + if (reordered_to) + pr_err(" |\n +-> reordered to: %pS\n", (void *)reordered_to); +} + static void print_verbose_info(struct task_struct *task) { if (!task) @@ -378,10 +390,12 @@ static void print_report(enum kcsan_value_change value_change, struct other_info *other_info, u64 old, u64 new, u64 mask) { + unsigned long reordered_to = 0; unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 }; int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1); - int skipnr = sanitize_stack_entries(stack_entries, num_stack_entries, ai->ip); + int skipnr = sanitize_stack_entries(stack_entries, num_stack_entries, ai->ip, &reordered_to); unsigned long this_frame = stack_entries[skipnr]; + unsigned long other_reordered_to = 0; unsigned long other_frame = 0; int other_skipnr = 0; /* silence uninit warnings */ @@ -394,7 +408,7 @@ static void print_report(enum kcsan_value_change value_change, if (other_info) { other_skipnr = sanitize_stack_entries(other_info->stack_entries, other_info->num_stack_entries, - other_info->ai.ip); + other_info->ai.ip, &other_reordered_to); other_frame = other_info->stack_entries[other_skipnr]; /* @value_change is only known for the other thread */ @@ -434,10 +448,9 @@ static void print_report(enum kcsan_value_change value_change, other_info->ai.cpu_id); /* Print the other thread's stack trace. */ - stack_trace_print(other_info->stack_entries + other_skipnr, + print_stack_trace(other_info->stack_entries + other_skipnr, other_info->num_stack_entries - other_skipnr, - 0); - + other_reordered_to); if (IS_ENABLED(CONFIG_KCSAN_VERBOSE)) print_verbose_info(other_info->task); @@ -451,9 +464,7 @@ static void print_report(enum kcsan_value_change value_change, get_thread_desc(ai->task_pid), ai->cpu_id); } /* Print stack trace of this thread. */ - stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, - 0); - + print_stack_trace(stack_entries + skipnr, num_stack_entries - skipnr, reordered_to); if (IS_ENABLED(CONFIG_KCSAN_VERBOSE)) print_verbose_info(current); -- cgit From 7310bd1f3eb9445d96b030457d59d9f84375bdd5 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 30 Nov 2021 12:44:18 +0100 Subject: kcsan: test: Match reordered or normal accesses Due to reordering accesses with weak memory modeling, any access can now appear as "(reordered)". Match any permutation of accesses if CONFIG_KCSAN_WEAK_MEMORY=y, so that we effectively match an access if it is denoted "(reordered)" or not. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/kcsan_test.c | 92 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 63 insertions(+), 29 deletions(-) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index 6e3c2b8bc608..ec054879201b 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -151,7 +151,7 @@ struct expect_report { /* Check observed report matches information in @r. */ __no_kcsan -static bool report_matches(const struct expect_report *r) +static bool __report_matches(const struct expect_report *r) { const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT; bool ret = false; @@ -253,6 +253,40 @@ out: return ret; } +static __always_inline const struct expect_report * +__report_set_scoped(struct expect_report *r, int accesses) +{ + BUILD_BUG_ON(accesses > 3); + + if (accesses & 1) + r->access[0].type |= KCSAN_ACCESS_SCOPED; + else + r->access[0].type &= ~KCSAN_ACCESS_SCOPED; + + if (accesses & 2) + r->access[1].type |= KCSAN_ACCESS_SCOPED; + else + r->access[1].type &= ~KCSAN_ACCESS_SCOPED; + + return r; +} + +__no_kcsan +static bool report_matches_any_reordered(struct expect_report *r) +{ + return __report_matches(__report_set_scoped(r, 0)) || + __report_matches(__report_set_scoped(r, 1)) || + __report_matches(__report_set_scoped(r, 2)) || + __report_matches(__report_set_scoped(r, 3)); +} + +#ifdef CONFIG_KCSAN_WEAK_MEMORY +/* Due to reordering accesses, any access may appear as "(reordered)". */ +#define report_matches report_matches_any_reordered +#else +#define report_matches __report_matches +#endif + /* ===== Test kernels ===== */ static long test_sink; @@ -438,13 +472,13 @@ static noinline void test_kernel_xor_1bit(void) __no_kcsan static void test_basic(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_read, &test_var, sizeof(test_var), 0 }, }, }; - static const struct expect_report never = { + struct expect_report never = { .access = { { test_kernel_read, &test_var, sizeof(test_var), 0 }, { test_kernel_read, &test_var, sizeof(test_var), 0 }, @@ -469,14 +503,14 @@ static void test_basic(struct kunit *test) __no_kcsan static void test_concurrent_races(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { /* NULL will match any address. */ { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(0) }, }, }; - static const struct expect_report never = { + struct expect_report never = { .access = { { test_kernel_rmw_array, NULL, 0, 0 }, { test_kernel_rmw_array, NULL, 0, 0 }, @@ -498,13 +532,13 @@ static void test_concurrent_races(struct kunit *test) __no_kcsan static void test_novalue_change(struct kunit *test) { - const struct expect_report expect_rw = { + struct expect_report expect_rw = { .access = { { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_read, &test_var, sizeof(test_var), 0 }, }, }; - const struct expect_report expect_ww = { + struct expect_report expect_ww = { .access = { { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, @@ -530,13 +564,13 @@ static void test_novalue_change(struct kunit *test) __no_kcsan static void test_novalue_change_exception(struct kunit *test) { - const struct expect_report expect_rw = { + struct expect_report expect_rw = { .access = { { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_read, &test_var, sizeof(test_var), 0 }, }, }; - const struct expect_report expect_ww = { + struct expect_report expect_ww = { .access = { { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, @@ -556,7 +590,7 @@ static void test_novalue_change_exception(struct kunit *test) __no_kcsan static void test_unknown_origin(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_read, &test_var, sizeof(test_var), 0 }, { NULL }, @@ -578,7 +612,7 @@ static void test_unknown_origin(struct kunit *test) __no_kcsan static void test_write_write_assume_atomic(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, @@ -604,7 +638,7 @@ static void test_write_write_assume_atomic(struct kunit *test) __no_kcsan static void test_write_write_struct(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, @@ -626,7 +660,7 @@ static void test_write_write_struct(struct kunit *test) __no_kcsan static void test_write_write_struct_part(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, { test_kernel_write_struct_part, &test_struct.val[3], sizeof(test_struct.val[3]), KCSAN_ACCESS_WRITE }, @@ -658,7 +692,7 @@ static void test_read_atomic_write_atomic(struct kunit *test) __no_kcsan static void test_read_plain_atomic_write(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_read, &test_var, sizeof(test_var), 0 }, { test_kernel_write_atomic, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC }, @@ -679,7 +713,7 @@ static void test_read_plain_atomic_write(struct kunit *test) __no_kcsan static void test_read_plain_atomic_rmw(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_read, &test_var, sizeof(test_var), 0 }, { test_kernel_atomic_rmw, &test_var, sizeof(test_var), @@ -701,13 +735,13 @@ static void test_read_plain_atomic_rmw(struct kunit *test) __no_kcsan static void test_zero_size_access(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, }, }; - const struct expect_report never = { + struct expect_report never = { .access = { { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, { test_kernel_read_struct_zero_size, &test_struct.val[3], 0, 0 }, @@ -741,7 +775,7 @@ static void test_data_race(struct kunit *test) __no_kcsan static void test_assert_exclusive_writer(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, @@ -759,7 +793,7 @@ static void test_assert_exclusive_writer(struct kunit *test) __no_kcsan static void test_assert_exclusive_access(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, { test_kernel_read, &test_var, sizeof(test_var), 0 }, @@ -777,19 +811,19 @@ static void test_assert_exclusive_access(struct kunit *test) __no_kcsan static void test_assert_exclusive_access_writer(struct kunit *test) { - const struct expect_report expect_access_writer = { + struct expect_report expect_access_writer = { .access = { { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, }, }; - const struct expect_report expect_access_access = { + struct expect_report expect_access_access = { .access = { { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, }, }; - const struct expect_report never = { + struct expect_report never = { .access = { { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, @@ -813,7 +847,7 @@ static void test_assert_exclusive_access_writer(struct kunit *test) __no_kcsan static void test_assert_exclusive_bits_change(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_assert_bits_change, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, { test_kernel_change_bits, &test_var, sizeof(test_var), @@ -844,13 +878,13 @@ static void test_assert_exclusive_bits_nochange(struct kunit *test) __no_kcsan static void test_assert_exclusive_writer_scoped(struct kunit *test) { - const struct expect_report expect_start = { + struct expect_report expect_start = { .access = { { test_kernel_assert_writer_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED }, { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, }, }; - const struct expect_report expect_inscope = { + struct expect_report expect_inscope = { .access = { { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED }, { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, @@ -871,16 +905,16 @@ static void test_assert_exclusive_writer_scoped(struct kunit *test) __no_kcsan static void test_assert_exclusive_access_scoped(struct kunit *test) { - const struct expect_report expect_start1 = { + struct expect_report expect_start1 = { .access = { { test_kernel_assert_access_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED }, { test_kernel_read, &test_var, sizeof(test_var), 0 }, }, }; - const struct expect_report expect_start2 = { + struct expect_report expect_start2 = { .access = { expect_start1.access[0], expect_start1.access[0] }, }; - const struct expect_report expect_inscope = { + struct expect_report expect_inscope = { .access = { { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED }, { test_kernel_read, &test_var, sizeof(test_var), 0 }, @@ -985,7 +1019,7 @@ static void test_atomic_builtins(struct kunit *test) __no_kcsan static void test_1bit_value_change(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .access = { { test_kernel_read, &test_var, sizeof(test_var), 0 }, { test_kernel_xor_1bit, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, -- cgit From 8bc32b34817862c80a8b9a82ceb643294acd3da3 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 30 Nov 2021 12:44:19 +0100 Subject: kcsan: test: Add test cases for memory barrier instrumentation Adds test cases to check that memory barriers are instrumented correctly, and detection of missing memory barriers is working as intended if CONFIG_KCSAN_STRICT=y. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/kcsan_test.c | 319 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 319 insertions(+) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index ec054879201b..5bf94550bcdf 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -16,9 +16,12 @@ #define pr_fmt(fmt) "kcsan_test: " fmt #include +#include +#include #include #include #include +#include #include #include #include @@ -305,6 +308,16 @@ static DEFINE_SEQLOCK(test_seqlock); __no_kcsan static noinline void sink_value(long v) { WRITE_ONCE(test_sink, v); } +/* + * Generates a delay and some accesses that enter the runtime but do not produce + * data races. + */ +static noinline void test_delay(int iter) +{ + while (iter--) + sink_value(READ_ONCE(test_sink)); +} + static noinline void test_kernel_read(void) { sink_value(test_var); } static noinline void test_kernel_write(void) @@ -466,8 +479,219 @@ static noinline void test_kernel_xor_1bit(void) kcsan_nestable_atomic_end(); } +#define TEST_KERNEL_LOCKED(name, acquire, release) \ + static noinline void test_kernel_##name(void) \ + { \ + long *flag = &test_struct.val[0]; \ + long v = 0; \ + if (!(acquire)) \ + return; \ + while (v++ < 100) { \ + test_var++; \ + barrier(); \ + } \ + release; \ + test_delay(10); \ + } + +TEST_KERNEL_LOCKED(with_memorder, + cmpxchg_acquire(flag, 0, 1) == 0, + smp_store_release(flag, 0)); +TEST_KERNEL_LOCKED(wrong_memorder, + cmpxchg_relaxed(flag, 0, 1) == 0, + WRITE_ONCE(*flag, 0)); +TEST_KERNEL_LOCKED(atomic_builtin_with_memorder, + __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED), + __atomic_store_n(flag, 0, __ATOMIC_RELEASE)); +TEST_KERNEL_LOCKED(atomic_builtin_wrong_memorder, + __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED), + __atomic_store_n(flag, 0, __ATOMIC_RELAXED)); + /* ===== Test cases ===== */ +/* + * Tests that various barriers have the expected effect on internal state. Not + * exhaustive on atomic_t operations. Unlike the selftest, also checks for + * too-strict barrier instrumentation; these can be tolerated, because it does + * not cause false positives, but at least we should be aware of such cases. + */ +static void test_barrier_nothreads(struct kunit *test) +{ +#ifdef CONFIG_KCSAN_WEAK_MEMORY + struct kcsan_scoped_access *reorder_access = ¤t->kcsan_ctx.reorder_access; +#else + struct kcsan_scoped_access *reorder_access = NULL; +#endif + arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED; + DEFINE_SPINLOCK(spinlock); + DEFINE_MUTEX(mutex); + atomic_t dummy; + + KCSAN_TEST_REQUIRES(test, reorder_access != NULL); + KCSAN_TEST_REQUIRES(test, IS_ENABLED(CONFIG_SMP)); + +#define __KCSAN_EXPECT_BARRIER(access_type, barrier, order_before, name) \ + do { \ + reorder_access->type = (access_type) | KCSAN_ACCESS_SCOPED; \ + reorder_access->size = sizeof(test_var); \ + barrier; \ + KUNIT_EXPECT_EQ_MSG(test, reorder_access->size, \ + order_before ? 0 : sizeof(test_var), \ + "improperly instrumented type=(" #access_type "): " name); \ + } while (0) +#define KCSAN_EXPECT_READ_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(0, b, o, #b) +#define KCSAN_EXPECT_WRITE_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_WRITE, b, o, #b) +#define KCSAN_EXPECT_RW_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, b, o, #b) + + /* Force creating a valid entry in reorder_access first. */ + test_var = 0; + while (test_var++ < 1000000 && reorder_access->size != sizeof(test_var)) + __kcsan_check_read(&test_var, sizeof(test_var)); + KUNIT_ASSERT_EQ(test, reorder_access->size, sizeof(test_var)); + + kcsan_nestable_atomic_begin(); /* No watchpoints in called functions. */ + + KCSAN_EXPECT_READ_BARRIER(mb(), true); + KCSAN_EXPECT_READ_BARRIER(wmb(), false); + KCSAN_EXPECT_READ_BARRIER(rmb(), true); + KCSAN_EXPECT_READ_BARRIER(smp_mb(), true); + KCSAN_EXPECT_READ_BARRIER(smp_wmb(), false); + KCSAN_EXPECT_READ_BARRIER(smp_rmb(), true); + KCSAN_EXPECT_READ_BARRIER(dma_wmb(), false); + KCSAN_EXPECT_READ_BARRIER(dma_rmb(), true); + KCSAN_EXPECT_READ_BARRIER(smp_mb__before_atomic(), true); + KCSAN_EXPECT_READ_BARRIER(smp_mb__after_atomic(), true); + KCSAN_EXPECT_READ_BARRIER(smp_mb__after_spinlock(), true); + KCSAN_EXPECT_READ_BARRIER(smp_store_mb(test_var, 0), true); + KCSAN_EXPECT_READ_BARRIER(smp_load_acquire(&test_var), false); + KCSAN_EXPECT_READ_BARRIER(smp_store_release(&test_var, 0), true); + KCSAN_EXPECT_READ_BARRIER(xchg(&test_var, 0), true); + KCSAN_EXPECT_READ_BARRIER(xchg_release(&test_var, 0), true); + KCSAN_EXPECT_READ_BARRIER(xchg_relaxed(&test_var, 0), false); + KCSAN_EXPECT_READ_BARRIER(cmpxchg(&test_var, 0, 0), true); + KCSAN_EXPECT_READ_BARRIER(cmpxchg_release(&test_var, 0, 0), true); + KCSAN_EXPECT_READ_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false); + KCSAN_EXPECT_READ_BARRIER(atomic_read(&dummy), false); + KCSAN_EXPECT_READ_BARRIER(atomic_read_acquire(&dummy), false); + KCSAN_EXPECT_READ_BARRIER(atomic_set(&dummy, 0), false); + KCSAN_EXPECT_READ_BARRIER(atomic_set_release(&dummy, 0), true); + KCSAN_EXPECT_READ_BARRIER(atomic_add(1, &dummy), false); + KCSAN_EXPECT_READ_BARRIER(atomic_add_return(1, &dummy), true); + KCSAN_EXPECT_READ_BARRIER(atomic_add_return_acquire(1, &dummy), false); + KCSAN_EXPECT_READ_BARRIER(atomic_add_return_release(1, &dummy), true); + KCSAN_EXPECT_READ_BARRIER(atomic_add_return_relaxed(1, &dummy), false); + KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add(1, &dummy), true); + KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_acquire(1, &dummy), false); + KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_release(1, &dummy), true); + KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false); + KCSAN_EXPECT_READ_BARRIER(test_and_set_bit(0, &test_var), true); + KCSAN_EXPECT_READ_BARRIER(test_and_clear_bit(0, &test_var), true); + KCSAN_EXPECT_READ_BARRIER(test_and_change_bit(0, &test_var), true); + KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock(0, &test_var), true); + KCSAN_EXPECT_READ_BARRIER(__clear_bit_unlock(0, &test_var), true); + KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); + KCSAN_EXPECT_READ_BARRIER(arch_spin_lock(&arch_spinlock), false); + KCSAN_EXPECT_READ_BARRIER(arch_spin_unlock(&arch_spinlock), true); + KCSAN_EXPECT_READ_BARRIER(spin_lock(&spinlock), false); + KCSAN_EXPECT_READ_BARRIER(spin_unlock(&spinlock), true); + KCSAN_EXPECT_READ_BARRIER(mutex_lock(&mutex), false); + KCSAN_EXPECT_READ_BARRIER(mutex_unlock(&mutex), true); + + KCSAN_EXPECT_WRITE_BARRIER(mb(), true); + KCSAN_EXPECT_WRITE_BARRIER(wmb(), true); + KCSAN_EXPECT_WRITE_BARRIER(rmb(), false); + KCSAN_EXPECT_WRITE_BARRIER(smp_mb(), true); + KCSAN_EXPECT_WRITE_BARRIER(smp_wmb(), true); + KCSAN_EXPECT_WRITE_BARRIER(smp_rmb(), false); + KCSAN_EXPECT_WRITE_BARRIER(dma_wmb(), true); + KCSAN_EXPECT_WRITE_BARRIER(dma_rmb(), false); + KCSAN_EXPECT_WRITE_BARRIER(smp_mb__before_atomic(), true); + KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_atomic(), true); + KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_spinlock(), true); + KCSAN_EXPECT_WRITE_BARRIER(smp_store_mb(test_var, 0), true); + KCSAN_EXPECT_WRITE_BARRIER(smp_load_acquire(&test_var), false); + KCSAN_EXPECT_WRITE_BARRIER(smp_store_release(&test_var, 0), true); + KCSAN_EXPECT_WRITE_BARRIER(xchg(&test_var, 0), true); + KCSAN_EXPECT_WRITE_BARRIER(xchg_release(&test_var, 0), true); + KCSAN_EXPECT_WRITE_BARRIER(xchg_relaxed(&test_var, 0), false); + KCSAN_EXPECT_WRITE_BARRIER(cmpxchg(&test_var, 0, 0), true); + KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_release(&test_var, 0, 0), true); + KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false); + KCSAN_EXPECT_WRITE_BARRIER(atomic_read(&dummy), false); + KCSAN_EXPECT_WRITE_BARRIER(atomic_read_acquire(&dummy), false); + KCSAN_EXPECT_WRITE_BARRIER(atomic_set(&dummy, 0), false); + KCSAN_EXPECT_WRITE_BARRIER(atomic_set_release(&dummy, 0), true); + KCSAN_EXPECT_WRITE_BARRIER(atomic_add(1, &dummy), false); + KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return(1, &dummy), true); + KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_acquire(1, &dummy), false); + KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_release(1, &dummy), true); + KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_relaxed(1, &dummy), false); + KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add(1, &dummy), true); + KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_acquire(1, &dummy), false); + KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_release(1, &dummy), true); + KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false); + KCSAN_EXPECT_WRITE_BARRIER(test_and_set_bit(0, &test_var), true); + KCSAN_EXPECT_WRITE_BARRIER(test_and_clear_bit(0, &test_var), true); + KCSAN_EXPECT_WRITE_BARRIER(test_and_change_bit(0, &test_var), true); + KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock(0, &test_var), true); + KCSAN_EXPECT_WRITE_BARRIER(__clear_bit_unlock(0, &test_var), true); + KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); + KCSAN_EXPECT_WRITE_BARRIER(arch_spin_lock(&arch_spinlock), false); + KCSAN_EXPECT_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock), true); + KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&spinlock), false); + KCSAN_EXPECT_WRITE_BARRIER(spin_unlock(&spinlock), true); + KCSAN_EXPECT_WRITE_BARRIER(mutex_lock(&mutex), false); + KCSAN_EXPECT_WRITE_BARRIER(mutex_unlock(&mutex), true); + + KCSAN_EXPECT_RW_BARRIER(mb(), true); + KCSAN_EXPECT_RW_BARRIER(wmb(), true); + KCSAN_EXPECT_RW_BARRIER(rmb(), true); + KCSAN_EXPECT_RW_BARRIER(smp_mb(), true); + KCSAN_EXPECT_RW_BARRIER(smp_wmb(), true); + KCSAN_EXPECT_RW_BARRIER(smp_rmb(), true); + KCSAN_EXPECT_RW_BARRIER(dma_wmb(), true); + KCSAN_EXPECT_RW_BARRIER(dma_rmb(), true); + KCSAN_EXPECT_RW_BARRIER(smp_mb__before_atomic(), true); + KCSAN_EXPECT_RW_BARRIER(smp_mb__after_atomic(), true); + KCSAN_EXPECT_RW_BARRIER(smp_mb__after_spinlock(), true); + KCSAN_EXPECT_RW_BARRIER(smp_store_mb(test_var, 0), true); + KCSAN_EXPECT_RW_BARRIER(smp_load_acquire(&test_var), false); + KCSAN_EXPECT_RW_BARRIER(smp_store_release(&test_var, 0), true); + KCSAN_EXPECT_RW_BARRIER(xchg(&test_var, 0), true); + KCSAN_EXPECT_RW_BARRIER(xchg_release(&test_var, 0), true); + KCSAN_EXPECT_RW_BARRIER(xchg_relaxed(&test_var, 0), false); + KCSAN_EXPECT_RW_BARRIER(cmpxchg(&test_var, 0, 0), true); + KCSAN_EXPECT_RW_BARRIER(cmpxchg_release(&test_var, 0, 0), true); + KCSAN_EXPECT_RW_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false); + KCSAN_EXPECT_RW_BARRIER(atomic_read(&dummy), false); + KCSAN_EXPECT_RW_BARRIER(atomic_read_acquire(&dummy), false); + KCSAN_EXPECT_RW_BARRIER(atomic_set(&dummy, 0), false); + KCSAN_EXPECT_RW_BARRIER(atomic_set_release(&dummy, 0), true); + KCSAN_EXPECT_RW_BARRIER(atomic_add(1, &dummy), false); + KCSAN_EXPECT_RW_BARRIER(atomic_add_return(1, &dummy), true); + KCSAN_EXPECT_RW_BARRIER(atomic_add_return_acquire(1, &dummy), false); + KCSAN_EXPECT_RW_BARRIER(atomic_add_return_release(1, &dummy), true); + KCSAN_EXPECT_RW_BARRIER(atomic_add_return_relaxed(1, &dummy), false); + KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add(1, &dummy), true); + KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_acquire(1, &dummy), false); + KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_release(1, &dummy), true); + KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false); + KCSAN_EXPECT_RW_BARRIER(test_and_set_bit(0, &test_var), true); + KCSAN_EXPECT_RW_BARRIER(test_and_clear_bit(0, &test_var), true); + KCSAN_EXPECT_RW_BARRIER(test_and_change_bit(0, &test_var), true); + KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock(0, &test_var), true); + KCSAN_EXPECT_RW_BARRIER(__clear_bit_unlock(0, &test_var), true); + KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); + KCSAN_EXPECT_RW_BARRIER(arch_spin_lock(&arch_spinlock), false); + KCSAN_EXPECT_RW_BARRIER(arch_spin_unlock(&arch_spinlock), true); + KCSAN_EXPECT_RW_BARRIER(spin_lock(&spinlock), false); + KCSAN_EXPECT_RW_BARRIER(spin_unlock(&spinlock), true); + KCSAN_EXPECT_RW_BARRIER(mutex_lock(&mutex), false); + KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&mutex), true); + + kcsan_nestable_atomic_end(); +} + /* Simple test with normal data race. */ __no_kcsan static void test_basic(struct kunit *test) @@ -1039,6 +1263,90 @@ static void test_1bit_value_change(struct kunit *test) KUNIT_EXPECT_TRUE(test, match); } +__no_kcsan +static void test_correct_barrier(struct kunit *test) +{ + struct expect_report expect = { + .access = { + { test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, + { test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) }, + }, + }; + bool match_expect = false; + + test_struct.val[0] = 0; /* init unlocked */ + begin_test_checks(test_kernel_with_memorder, test_kernel_with_memorder); + do { + match_expect = report_matches_any_reordered(&expect); + } while (!end_test_checks(match_expect)); + KUNIT_EXPECT_FALSE(test, match_expect); +} + +__no_kcsan +static void test_missing_barrier(struct kunit *test) +{ + struct expect_report expect = { + .access = { + { test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, + { test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) }, + }, + }; + bool match_expect = false; + + test_struct.val[0] = 0; /* init unlocked */ + begin_test_checks(test_kernel_wrong_memorder, test_kernel_wrong_memorder); + do { + match_expect = report_matches_any_reordered(&expect); + } while (!end_test_checks(match_expect)); + if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY)) + KUNIT_EXPECT_TRUE(test, match_expect); + else + KUNIT_EXPECT_FALSE(test, match_expect); +} + +__no_kcsan +static void test_atomic_builtins_correct_barrier(struct kunit *test) +{ + struct expect_report expect = { + .access = { + { test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, + { test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) }, + }, + }; + bool match_expect = false; + + test_struct.val[0] = 0; /* init unlocked */ + begin_test_checks(test_kernel_atomic_builtin_with_memorder, + test_kernel_atomic_builtin_with_memorder); + do { + match_expect = report_matches_any_reordered(&expect); + } while (!end_test_checks(match_expect)); + KUNIT_EXPECT_FALSE(test, match_expect); +} + +__no_kcsan +static void test_atomic_builtins_missing_barrier(struct kunit *test) +{ + struct expect_report expect = { + .access = { + { test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, + { test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) }, + }, + }; + bool match_expect = false; + + test_struct.val[0] = 0; /* init unlocked */ + begin_test_checks(test_kernel_atomic_builtin_wrong_memorder, + test_kernel_atomic_builtin_wrong_memorder); + do { + match_expect = report_matches_any_reordered(&expect); + } while (!end_test_checks(match_expect)); + if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY)) + KUNIT_EXPECT_TRUE(test, match_expect); + else + KUNIT_EXPECT_FALSE(test, match_expect); +} + /* * Generate thread counts for all test cases. Values generated are in interval * [2, 5] followed by exponentially increasing thread counts from 8 to 32. @@ -1088,6 +1396,7 @@ static const void *nthreads_gen_params(const void *prev, char *desc) #define KCSAN_KUNIT_CASE(test_name) KUNIT_CASE_PARAM(test_name, nthreads_gen_params) static struct kunit_case kcsan_test_cases[] = { + KUNIT_CASE(test_barrier_nothreads), KCSAN_KUNIT_CASE(test_basic), KCSAN_KUNIT_CASE(test_concurrent_races), KCSAN_KUNIT_CASE(test_novalue_change), @@ -1112,6 +1421,10 @@ static struct kunit_case kcsan_test_cases[] = { KCSAN_KUNIT_CASE(test_seqlock_noreport), KCSAN_KUNIT_CASE(test_atomic_builtins), KCSAN_KUNIT_CASE(test_1bit_value_change), + KCSAN_KUNIT_CASE(test_correct_barrier), + KCSAN_KUNIT_CASE(test_missing_barrier), + KCSAN_KUNIT_CASE(test_atomic_builtins_correct_barrier), + KCSAN_KUNIT_CASE(test_atomic_builtins_missing_barrier), {}, }; @@ -1176,6 +1489,9 @@ static int test_init(struct kunit *test) observed.nlines = 0; spin_unlock_irqrestore(&observed.lock, flags); + if (strstr(test->name, "nothreads")) + return 0; + if (!torture_init_begin((char *)test->name, 1)) return -EBUSY; @@ -1218,6 +1534,9 @@ static void test_exit(struct kunit *test) struct task_struct **stop_thread; int i; + if (strstr(test->name, "nothreads")) + return; + if (torture_cleanup_begin()) return; -- cgit From 71b0e3aeb28256712945d99ca67b3f5e3ed7e0b1 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Tue, 30 Nov 2021 12:44:21 +0100 Subject: kcsan: selftest: Add test case to check memory barrier instrumentation Memory barrier instrumentation is crucial to avoid false positives. To avoid surprises, run a simple test case in the boot-time selftest to ensure memory barriers are still instrumented correctly. Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/Makefile | 2 + kernel/kcsan/selftest.c | 141 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 143 insertions(+) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile index c2bb07f5bcc7..ff47e896de3b 100644 --- a/kernel/kcsan/Makefile +++ b/kernel/kcsan/Makefile @@ -11,6 +11,8 @@ CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \ -fno-stack-protector -DDISABLE_BRANCH_PROFILING obj-y := core.o debugfs.o report.o + +KCSAN_INSTRUMENT_BARRIERS_selftest.o := y obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c index b4295a3892b7..08c6b84b9ebe 100644 --- a/kernel/kcsan/selftest.c +++ b/kernel/kcsan/selftest.c @@ -7,10 +7,15 @@ #define pr_fmt(fmt) "kcsan: " fmt +#include +#include #include +#include #include #include #include +#include +#include #include #include "encoding.h" @@ -103,6 +108,141 @@ static bool __init test_matching_access(void) return true; } +/* + * Correct memory barrier instrumentation is critical to avoiding false + * positives: simple test to check at boot certain barriers are always properly + * instrumented. See kcsan_test for a more complete test. + */ +static bool __init test_barrier(void) +{ +#ifdef CONFIG_KCSAN_WEAK_MEMORY + struct kcsan_scoped_access *reorder_access = ¤t->kcsan_ctx.reorder_access; +#else + struct kcsan_scoped_access *reorder_access = NULL; +#endif + bool ret = true; + arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED; + DEFINE_SPINLOCK(spinlock); + atomic_t dummy; + long test_var; + + if (!reorder_access || !IS_ENABLED(CONFIG_SMP)) + return true; + +#define __KCSAN_CHECK_BARRIER(access_type, barrier, name) \ + do { \ + reorder_access->type = (access_type) | KCSAN_ACCESS_SCOPED; \ + reorder_access->size = 1; \ + barrier; \ + if (reorder_access->size != 0) { \ + pr_err("improperly instrumented type=(" #access_type "): " name "\n"); \ + ret = false; \ + } \ + } while (0) +#define KCSAN_CHECK_READ_BARRIER(b) __KCSAN_CHECK_BARRIER(0, b, #b) +#define KCSAN_CHECK_WRITE_BARRIER(b) __KCSAN_CHECK_BARRIER(KCSAN_ACCESS_WRITE, b, #b) +#define KCSAN_CHECK_RW_BARRIER(b) __KCSAN_CHECK_BARRIER(KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND, b, #b) + + kcsan_nestable_atomic_begin(); /* No watchpoints in called functions. */ + + KCSAN_CHECK_READ_BARRIER(mb()); + KCSAN_CHECK_READ_BARRIER(rmb()); + KCSAN_CHECK_READ_BARRIER(smp_mb()); + KCSAN_CHECK_READ_BARRIER(smp_rmb()); + KCSAN_CHECK_READ_BARRIER(dma_rmb()); + KCSAN_CHECK_READ_BARRIER(smp_mb__before_atomic()); + KCSAN_CHECK_READ_BARRIER(smp_mb__after_atomic()); + KCSAN_CHECK_READ_BARRIER(smp_mb__after_spinlock()); + KCSAN_CHECK_READ_BARRIER(smp_store_mb(test_var, 0)); + KCSAN_CHECK_READ_BARRIER(smp_store_release(&test_var, 0)); + KCSAN_CHECK_READ_BARRIER(xchg(&test_var, 0)); + KCSAN_CHECK_READ_BARRIER(xchg_release(&test_var, 0)); + KCSAN_CHECK_READ_BARRIER(cmpxchg(&test_var, 0, 0)); + KCSAN_CHECK_READ_BARRIER(cmpxchg_release(&test_var, 0, 0)); + KCSAN_CHECK_READ_BARRIER(atomic_set_release(&dummy, 0)); + KCSAN_CHECK_READ_BARRIER(atomic_add_return(1, &dummy)); + KCSAN_CHECK_READ_BARRIER(atomic_add_return_release(1, &dummy)); + KCSAN_CHECK_READ_BARRIER(atomic_fetch_add(1, &dummy)); + KCSAN_CHECK_READ_BARRIER(atomic_fetch_add_release(1, &dummy)); + KCSAN_CHECK_READ_BARRIER(test_and_set_bit(0, &test_var)); + KCSAN_CHECK_READ_BARRIER(test_and_clear_bit(0, &test_var)); + KCSAN_CHECK_READ_BARRIER(test_and_change_bit(0, &test_var)); + KCSAN_CHECK_READ_BARRIER(clear_bit_unlock(0, &test_var)); + KCSAN_CHECK_READ_BARRIER(__clear_bit_unlock(0, &test_var)); + KCSAN_CHECK_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); + arch_spin_lock(&arch_spinlock); + KCSAN_CHECK_READ_BARRIER(arch_spin_unlock(&arch_spinlock)); + spin_lock(&spinlock); + KCSAN_CHECK_READ_BARRIER(spin_unlock(&spinlock)); + + KCSAN_CHECK_WRITE_BARRIER(mb()); + KCSAN_CHECK_WRITE_BARRIER(wmb()); + KCSAN_CHECK_WRITE_BARRIER(smp_mb()); + KCSAN_CHECK_WRITE_BARRIER(smp_wmb()); + KCSAN_CHECK_WRITE_BARRIER(dma_wmb()); + KCSAN_CHECK_WRITE_BARRIER(smp_mb__before_atomic()); + KCSAN_CHECK_WRITE_BARRIER(smp_mb__after_atomic()); + KCSAN_CHECK_WRITE_BARRIER(smp_mb__after_spinlock()); + KCSAN_CHECK_WRITE_BARRIER(smp_store_mb(test_var, 0)); + KCSAN_CHECK_WRITE_BARRIER(smp_store_release(&test_var, 0)); + KCSAN_CHECK_WRITE_BARRIER(xchg(&test_var, 0)); + KCSAN_CHECK_WRITE_BARRIER(xchg_release(&test_var, 0)); + KCSAN_CHECK_WRITE_BARRIER(cmpxchg(&test_var, 0, 0)); + KCSAN_CHECK_WRITE_BARRIER(cmpxchg_release(&test_var, 0, 0)); + KCSAN_CHECK_WRITE_BARRIER(atomic_set_release(&dummy, 0)); + KCSAN_CHECK_WRITE_BARRIER(atomic_add_return(1, &dummy)); + KCSAN_CHECK_WRITE_BARRIER(atomic_add_return_release(1, &dummy)); + KCSAN_CHECK_WRITE_BARRIER(atomic_fetch_add(1, &dummy)); + KCSAN_CHECK_WRITE_BARRIER(atomic_fetch_add_release(1, &dummy)); + KCSAN_CHECK_WRITE_BARRIER(test_and_set_bit(0, &test_var)); + KCSAN_CHECK_WRITE_BARRIER(test_and_clear_bit(0, &test_var)); + KCSAN_CHECK_WRITE_BARRIER(test_and_change_bit(0, &test_var)); + KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock(0, &test_var)); + KCSAN_CHECK_WRITE_BARRIER(__clear_bit_unlock(0, &test_var)); + KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); + arch_spin_lock(&arch_spinlock); + KCSAN_CHECK_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock)); + spin_lock(&spinlock); + KCSAN_CHECK_WRITE_BARRIER(spin_unlock(&spinlock)); + + KCSAN_CHECK_RW_BARRIER(mb()); + KCSAN_CHECK_RW_BARRIER(wmb()); + KCSAN_CHECK_RW_BARRIER(rmb()); + KCSAN_CHECK_RW_BARRIER(smp_mb()); + KCSAN_CHECK_RW_BARRIER(smp_wmb()); + KCSAN_CHECK_RW_BARRIER(smp_rmb()); + KCSAN_CHECK_RW_BARRIER(dma_wmb()); + KCSAN_CHECK_RW_BARRIER(dma_rmb()); + KCSAN_CHECK_RW_BARRIER(smp_mb__before_atomic()); + KCSAN_CHECK_RW_BARRIER(smp_mb__after_atomic()); + KCSAN_CHECK_RW_BARRIER(smp_mb__after_spinlock()); + KCSAN_CHECK_RW_BARRIER(smp_store_mb(test_var, 0)); + KCSAN_CHECK_RW_BARRIER(smp_store_release(&test_var, 0)); + KCSAN_CHECK_RW_BARRIER(xchg(&test_var, 0)); + KCSAN_CHECK_RW_BARRIER(xchg_release(&test_var, 0)); + KCSAN_CHECK_RW_BARRIER(cmpxchg(&test_var, 0, 0)); + KCSAN_CHECK_RW_BARRIER(cmpxchg_release(&test_var, 0, 0)); + KCSAN_CHECK_RW_BARRIER(atomic_set_release(&dummy, 0)); + KCSAN_CHECK_RW_BARRIER(atomic_add_return(1, &dummy)); + KCSAN_CHECK_RW_BARRIER(atomic_add_return_release(1, &dummy)); + KCSAN_CHECK_RW_BARRIER(atomic_fetch_add(1, &dummy)); + KCSAN_CHECK_RW_BARRIER(atomic_fetch_add_release(1, &dummy)); + KCSAN_CHECK_RW_BARRIER(test_and_set_bit(0, &test_var)); + KCSAN_CHECK_RW_BARRIER(test_and_clear_bit(0, &test_var)); + KCSAN_CHECK_RW_BARRIER(test_and_change_bit(0, &test_var)); + KCSAN_CHECK_RW_BARRIER(clear_bit_unlock(0, &test_var)); + KCSAN_CHECK_RW_BARRIER(__clear_bit_unlock(0, &test_var)); + KCSAN_CHECK_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); + arch_spin_lock(&arch_spinlock); + KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock)); + spin_lock(&spinlock); + KCSAN_CHECK_RW_BARRIER(spin_unlock(&spinlock)); + + kcsan_nestable_atomic_end(); + + return ret; +} + static int __init kcsan_selftest(void) { int passed = 0; @@ -120,6 +260,7 @@ static int __init kcsan_selftest(void) RUN_TEST(test_requires); RUN_TEST(test_encode_decode); RUN_TEST(test_matching_access); + RUN_TEST(test_barrier); pr_info("selftest: %d/%d tests passed\n", passed, total); if (passed != total) -- cgit From a70d36e6a0bd867ef42dce8ef46eb9b5a1515fb0 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Sat, 4 Dec 2021 00:38:17 +0100 Subject: kcsan: Make barrier tests compatible with lockdep The barrier tests in selftest and the kcsan_test module only need the spinlock and mutex to test correct barrier instrumentation. Therefore, these were initially placed on the stack. However, lockdep asserts that locks are in static storage, and will generate this warning: | INFO: trying to register non-static key. | The code is fine but needs lockdep annotation, or maybe | you didn't initialize this object before use? | turning off the locking correctness validator. | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.16.0-rc1+ #3208 | Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.13.0-1ubuntu1.1 04/01/2014 | Call Trace: | | dump_stack_lvl+0x88/0xd8 | dump_stack+0x15/0x1b | register_lock_class+0x6b3/0x840 | ... | test_barrier+0x490/0x14c7 | kcsan_selftest+0x47/0xa0 | ... To fix, move the test locks into static storage. Fixing the above also revealed that lock operations are strengthened on first use with lockdep enabled, due to lockdep calling out into non-instrumented files (recall that kernel/locking/lockdep.c is not instrumented with KCSAN). Only kcsan_test checks for over-instrumentation of *_lock() operations, where we can simply "warm up" the test locks to avoid the test case failing with lockdep. Reported-by: Paul E. McKenney Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/kcsan_test.c | 37 +++++++++++++++++++++++-------------- kernel/kcsan/selftest.c | 14 +++++++------- 2 files changed, 30 insertions(+), 21 deletions(-) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index 5bf94550bcdf..2bad0820f73a 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -300,6 +300,8 @@ static struct { long val[8]; } test_struct; static DEFINE_SEQLOCK(test_seqlock); +static DEFINE_SPINLOCK(test_spinlock); +static DEFINE_MUTEX(test_mutex); /* * Helper to avoid compiler optimizing out reads, and to generate source values @@ -523,8 +525,6 @@ static void test_barrier_nothreads(struct kunit *test) struct kcsan_scoped_access *reorder_access = NULL; #endif arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED; - DEFINE_SPINLOCK(spinlock); - DEFINE_MUTEX(mutex); atomic_t dummy; KCSAN_TEST_REQUIRES(test, reorder_access != NULL); @@ -543,6 +543,15 @@ static void test_barrier_nothreads(struct kunit *test) #define KCSAN_EXPECT_WRITE_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_WRITE, b, o, #b) #define KCSAN_EXPECT_RW_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, b, o, #b) + /* + * Lockdep initialization can strengthen certain locking operations due + * to calling into instrumented files; "warm up" our locks. + */ + spin_lock(&test_spinlock); + spin_unlock(&test_spinlock); + mutex_lock(&test_mutex); + mutex_unlock(&test_mutex); + /* Force creating a valid entry in reorder_access first. */ test_var = 0; while (test_var++ < 1000000 && reorder_access->size != sizeof(test_var)) @@ -592,10 +601,10 @@ static void test_barrier_nothreads(struct kunit *test) KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); KCSAN_EXPECT_READ_BARRIER(arch_spin_lock(&arch_spinlock), false); KCSAN_EXPECT_READ_BARRIER(arch_spin_unlock(&arch_spinlock), true); - KCSAN_EXPECT_READ_BARRIER(spin_lock(&spinlock), false); - KCSAN_EXPECT_READ_BARRIER(spin_unlock(&spinlock), true); - KCSAN_EXPECT_READ_BARRIER(mutex_lock(&mutex), false); - KCSAN_EXPECT_READ_BARRIER(mutex_unlock(&mutex), true); + KCSAN_EXPECT_READ_BARRIER(spin_lock(&test_spinlock), false); + KCSAN_EXPECT_READ_BARRIER(spin_unlock(&test_spinlock), true); + KCSAN_EXPECT_READ_BARRIER(mutex_lock(&test_mutex), false); + KCSAN_EXPECT_READ_BARRIER(mutex_unlock(&test_mutex), true); KCSAN_EXPECT_WRITE_BARRIER(mb(), true); KCSAN_EXPECT_WRITE_BARRIER(wmb(), true); @@ -638,10 +647,10 @@ static void test_barrier_nothreads(struct kunit *test) KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); KCSAN_EXPECT_WRITE_BARRIER(arch_spin_lock(&arch_spinlock), false); KCSAN_EXPECT_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock), true); - KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&spinlock), false); - KCSAN_EXPECT_WRITE_BARRIER(spin_unlock(&spinlock), true); - KCSAN_EXPECT_WRITE_BARRIER(mutex_lock(&mutex), false); - KCSAN_EXPECT_WRITE_BARRIER(mutex_unlock(&mutex), true); + KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&test_spinlock), false); + KCSAN_EXPECT_WRITE_BARRIER(spin_unlock(&test_spinlock), true); + KCSAN_EXPECT_WRITE_BARRIER(mutex_lock(&test_mutex), false); + KCSAN_EXPECT_WRITE_BARRIER(mutex_unlock(&test_mutex), true); KCSAN_EXPECT_RW_BARRIER(mb(), true); KCSAN_EXPECT_RW_BARRIER(wmb(), true); @@ -684,10 +693,10 @@ static void test_barrier_nothreads(struct kunit *test) KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); KCSAN_EXPECT_RW_BARRIER(arch_spin_lock(&arch_spinlock), false); KCSAN_EXPECT_RW_BARRIER(arch_spin_unlock(&arch_spinlock), true); - KCSAN_EXPECT_RW_BARRIER(spin_lock(&spinlock), false); - KCSAN_EXPECT_RW_BARRIER(spin_unlock(&spinlock), true); - KCSAN_EXPECT_RW_BARRIER(mutex_lock(&mutex), false); - KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&mutex), true); + KCSAN_EXPECT_RW_BARRIER(spin_lock(&test_spinlock), false); + KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true); + KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false); + KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true); kcsan_nestable_atomic_end(); } diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c index 08c6b84b9ebe..b6d4da07d80a 100644 --- a/kernel/kcsan/selftest.c +++ b/kernel/kcsan/selftest.c @@ -113,6 +113,7 @@ static bool __init test_matching_access(void) * positives: simple test to check at boot certain barriers are always properly * instrumented. See kcsan_test for a more complete test. */ +static DEFINE_SPINLOCK(test_spinlock); static bool __init test_barrier(void) { #ifdef CONFIG_KCSAN_WEAK_MEMORY @@ -122,7 +123,6 @@ static bool __init test_barrier(void) #endif bool ret = true; arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED; - DEFINE_SPINLOCK(spinlock); atomic_t dummy; long test_var; @@ -172,8 +172,8 @@ static bool __init test_barrier(void) KCSAN_CHECK_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); arch_spin_lock(&arch_spinlock); KCSAN_CHECK_READ_BARRIER(arch_spin_unlock(&arch_spinlock)); - spin_lock(&spinlock); - KCSAN_CHECK_READ_BARRIER(spin_unlock(&spinlock)); + spin_lock(&test_spinlock); + KCSAN_CHECK_READ_BARRIER(spin_unlock(&test_spinlock)); KCSAN_CHECK_WRITE_BARRIER(mb()); KCSAN_CHECK_WRITE_BARRIER(wmb()); @@ -202,8 +202,8 @@ static bool __init test_barrier(void) KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); arch_spin_lock(&arch_spinlock); KCSAN_CHECK_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock)); - spin_lock(&spinlock); - KCSAN_CHECK_WRITE_BARRIER(spin_unlock(&spinlock)); + spin_lock(&test_spinlock); + KCSAN_CHECK_WRITE_BARRIER(spin_unlock(&test_spinlock)); KCSAN_CHECK_RW_BARRIER(mb()); KCSAN_CHECK_RW_BARRIER(wmb()); @@ -235,8 +235,8 @@ static bool __init test_barrier(void) KCSAN_CHECK_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); arch_spin_lock(&arch_spinlock); KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock)); - spin_lock(&spinlock); - KCSAN_CHECK_RW_BARRIER(spin_unlock(&spinlock)); + spin_lock(&test_spinlock); + KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock)); kcsan_nestable_atomic_end(); -- cgit From e3d2b72bbf3c580b0c5c96385777c2f483a45ab5 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 6 Dec 2021 07:41:50 +0100 Subject: kcsan: Avoid nested contexts reading inconsistent reorder_access Nested contexts, such as nested interrupts or scheduler code, share the same kcsan_ctx. When such a nested context reads an inconsistent reorder_access due to an interrupt during set_reorder_access(), we can observe the following warning: | ------------[ cut here ]------------ | Cannot find frame for torture_random kernel/torture.c:456 in stack trace | WARNING: CPU: 13 PID: 147 at kernel/kcsan/report.c:343 replace_stack_entry kernel/kcsan/report.c:343 | ... | Call Trace: | | sanitize_stack_entries kernel/kcsan/report.c:351 [inline] | print_report kernel/kcsan/report.c:409 | kcsan_report_known_origin kernel/kcsan/report.c:693 | kcsan_setup_watchpoint kernel/kcsan/core.c:658 | rcutorture_one_extend kernel/rcu/rcutorture.c:1475 | rcutorture_loop_extend kernel/rcu/rcutorture.c:1558 [inline] | ... | | ---[ end trace ee5299cb933115f5 ]--- | ================================================================== | BUG: KCSAN: data-race in _raw_spin_lock_irqsave / rcutorture_one_extend | | write (reordered) to 0xffffffff8c93b300 of 8 bytes by task 154 on cpu 12: | queued_spin_lock include/asm-generic/qspinlock.h:80 [inline] | do_raw_spin_lock include/linux/spinlock.h:185 [inline] | __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:111 [inline] | _raw_spin_lock_irqsave kernel/locking/spinlock.c:162 | try_to_wake_up kernel/sched/core.c:4003 | sysvec_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1097 | asm_sysvec_apic_timer_interrupt arch/x86/include/asm/idtentry.h:638 | set_reorder_access kernel/kcsan/core.c:416 [inline] <-- inconsistent reorder_access | kcsan_setup_watchpoint kernel/kcsan/core.c:693 | rcutorture_one_extend kernel/rcu/rcutorture.c:1475 | rcutorture_loop_extend kernel/rcu/rcutorture.c:1558 [inline] | rcu_torture_one_read kernel/rcu/rcutorture.c:1600 | rcu_torture_reader kernel/rcu/rcutorture.c:1692 | kthread kernel/kthread.c:327 | ret_from_fork arch/x86/entry/entry_64.S:295 | | read to 0xffffffff8c93b300 of 8 bytes by task 147 on cpu 13: | rcutorture_one_extend kernel/rcu/rcutorture.c:1475 | rcutorture_loop_extend kernel/rcu/rcutorture.c:1558 [inline] | ... The warning is telling us that there was a data race which KCSAN wants to report, but the function where the original access (that is now reordered) happened cannot be found in the stack trace, which prevents KCSAN from generating the right stack trace. The stack trace of "write (reordered)" now only shows where the access was reordered to, but should instead show the stack trace of the original write, with a final line saying "reordered to". At the point where set_reorder_access() is interrupted, it just set reorder_access->ptr and size, at which point size is non-zero. This is sufficient (if ctx->disable_scoped is zero) for further accesses from nested contexts to perform checking of this reorder_access. That then happened in _raw_spin_lock_irqsave(), which is called by scheduler code. However, since reorder_access->ip is still stale (ptr and size belong to a different ip not yet set) this finally leads to replace_stack_entry() not finding the frame in reorder_access->ip and generating the above warning. Fix it by ensuring that a nested context cannot access reorder_access while we update it in set_reorder_access(): set ctx->disable_scoped for the duration that reorder_access is updated, which effectively locks reorder_access and prevents concurrent use by nested contexts. Note, set_reorder_access() can do the update only if disabled_scoped is zero on entry, and must therefore set disable_scoped back to non-zero after the initial check in set_reorder_access(). Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/core.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 916060913966..fe12dfe254ec 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -412,11 +412,20 @@ set_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, if (!reorder_access || !kcsan_weak_memory) return; + /* + * To avoid nested interrupts or scheduler (which share kcsan_ctx) + * reading an inconsistent reorder_access, ensure that the below has + * exclusive access to reorder_access by disallowing concurrent use. + */ + ctx->disable_scoped++; + barrier(); reorder_access->ptr = ptr; reorder_access->size = size; reorder_access->type = type | KCSAN_ACCESS_SCOPED; reorder_access->ip = ip; reorder_access->stack_depth = get_kcsan_stack_depth(); + barrier(); + ctx->disable_scoped--; } /* -- cgit From b473a3891c46393e9c4ccb4e3197d7fb259c7100 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 6 Dec 2021 07:41:51 +0100 Subject: kcsan: Only test clear_bit_unlock_is_negative_byte if arch defines it Some architectures do not define clear_bit_unlock_is_negative_byte(). Only test it when it is actually defined (similar to other usage, such as in lib/test_kasan.c). Link: https://lkml.kernel.org/r/202112050757.x67rHnFU-lkp@intel.com Reported-by: kernel test robot Signed-off-by: Marco Elver Signed-off-by: Paul E. McKenney --- kernel/kcsan/kcsan_test.c | 8 +++++--- kernel/kcsan/selftest.c | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) (limited to 'kernel/kcsan') diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index 2bad0820f73a..a36fca063a73 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -598,7 +598,6 @@ static void test_barrier_nothreads(struct kunit *test) KCSAN_EXPECT_READ_BARRIER(test_and_change_bit(0, &test_var), true); KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock(0, &test_var), true); KCSAN_EXPECT_READ_BARRIER(__clear_bit_unlock(0, &test_var), true); - KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); KCSAN_EXPECT_READ_BARRIER(arch_spin_lock(&arch_spinlock), false); KCSAN_EXPECT_READ_BARRIER(arch_spin_unlock(&arch_spinlock), true); KCSAN_EXPECT_READ_BARRIER(spin_lock(&test_spinlock), false); @@ -644,7 +643,6 @@ static void test_barrier_nothreads(struct kunit *test) KCSAN_EXPECT_WRITE_BARRIER(test_and_change_bit(0, &test_var), true); KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock(0, &test_var), true); KCSAN_EXPECT_WRITE_BARRIER(__clear_bit_unlock(0, &test_var), true); - KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); KCSAN_EXPECT_WRITE_BARRIER(arch_spin_lock(&arch_spinlock), false); KCSAN_EXPECT_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock), true); KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&test_spinlock), false); @@ -690,7 +688,6 @@ static void test_barrier_nothreads(struct kunit *test) KCSAN_EXPECT_RW_BARRIER(test_and_change_bit(0, &test_var), true); KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock(0, &test_var), true); KCSAN_EXPECT_RW_BARRIER(__clear_bit_unlock(0, &test_var), true); - KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); KCSAN_EXPECT_RW_BARRIER(arch_spin_lock(&arch_spinlock), false); KCSAN_EXPECT_RW_BARRIER(arch_spin_unlock(&arch_spinlock), true); KCSAN_EXPECT_RW_BARRIER(spin_lock(&test_spinlock), false); @@ -698,6 +695,11 @@ static void test_barrier_nothreads(struct kunit *test) KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false); KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true); +#ifdef clear_bit_unlock_is_negative_byte + KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); + KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); + KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); +#endif kcsan_nestable_atomic_end(); } diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c index b6d4da07d80a..75712959c84e 100644 --- a/kernel/kcsan/selftest.c +++ b/kernel/kcsan/selftest.c @@ -169,7 +169,6 @@ static bool __init test_barrier(void) KCSAN_CHECK_READ_BARRIER(test_and_change_bit(0, &test_var)); KCSAN_CHECK_READ_BARRIER(clear_bit_unlock(0, &test_var)); KCSAN_CHECK_READ_BARRIER(__clear_bit_unlock(0, &test_var)); - KCSAN_CHECK_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); arch_spin_lock(&arch_spinlock); KCSAN_CHECK_READ_BARRIER(arch_spin_unlock(&arch_spinlock)); spin_lock(&test_spinlock); @@ -199,7 +198,6 @@ static bool __init test_barrier(void) KCSAN_CHECK_WRITE_BARRIER(test_and_change_bit(0, &test_var)); KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock(0, &test_var)); KCSAN_CHECK_WRITE_BARRIER(__clear_bit_unlock(0, &test_var)); - KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); arch_spin_lock(&arch_spinlock); KCSAN_CHECK_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock)); spin_lock(&test_spinlock); @@ -232,12 +230,16 @@ static bool __init test_barrier(void) KCSAN_CHECK_RW_BARRIER(test_and_change_bit(0, &test_var)); KCSAN_CHECK_RW_BARRIER(clear_bit_unlock(0, &test_var)); KCSAN_CHECK_RW_BARRIER(__clear_bit_unlock(0, &test_var)); - KCSAN_CHECK_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); arch_spin_lock(&arch_spinlock); KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock)); spin_lock(&test_spinlock); KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock)); +#ifdef clear_bit_unlock_is_negative_byte + KCSAN_CHECK_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); + KCSAN_CHECK_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); + KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); +#endif kcsan_nestable_atomic_end(); return ret; -- cgit