summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2020-02-21 23:02:09 +0100
committerPaul E. McKenney <paulmck@kernel.org>2020-03-25 09:55:59 -0700
commit48b1fc190a180d971fb69217c88c7247f4f2ca19 (patch)
tree6b3408d62d095a068afc089cfbfd3f98d73cfaa5
parentf5d2313bd3c540be405c4977a63840cd6d0167b5 (diff)
kcsan: Add option to allow watcher interruptions
Add option to allow interrupts while a watchpoint is set up. This can be enabled either via CONFIG_KCSAN_INTERRUPT_WATCHER or via the boot parameter 'kcsan.interrupt_watcher=1'. Note that, currently not all safe per-CPU access primitives and patterns are accounted for, which could result in false positives. For example, asm-generic/percpu.h uses plain operations, which by default are instrumented. On interrupts and subsequent accesses to the same variable, KCSAN would currently report a data race with this option. Therefore, this option should currently remain disabled by default, but may be enabled for specific test scenarios. To avoid new warnings, changes all uses of smp_processor_id() to use the raw version (as already done in kcsan_found_watchpoint()). The exact SMP processor id is for informational purposes in the report, and correctness is not affected. Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
-rw-r--r--kernel/kcsan/core.c34
-rw-r--r--lib/Kconfig.kcsan11
2 files changed, 21 insertions, 24 deletions
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 589b1e7f0f25..e7387fec6679 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -21,6 +21,7 @@ static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
static unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
static unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
+static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
@@ -30,6 +31,7 @@ module_param_named(early_enable, kcsan_early_enable, bool, 0);
module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
+module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
bool kcsan_enabled;
@@ -354,7 +356,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
unsigned long access_mask;
enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
unsigned long ua_flags = user_access_save();
- unsigned long irq_flags;
+ unsigned long irq_flags = 0;
/*
* Always reset kcsan_skip counter in slow-path to avoid underflow; see
@@ -370,26 +372,9 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
goto out;
}
- /*
- * Disable interrupts & preemptions to avoid another thread on the same
- * CPU accessing memory locations for the set up watchpoint; this is to
- * avoid reporting races to e.g. CPU-local data.
- *
- * An alternative would be adding the source CPU to the watchpoint
- * encoding, and checking that watchpoint-CPU != this-CPU. There are
- * several problems with this:
- * 1. we should avoid stealing more bits from the watchpoint encoding
- * as it would affect accuracy, as well as increase performance
- * overhead in the fast-path;
- * 2. if we are preempted, but there *is* a genuine data race, we
- * would *not* report it -- since this is the common case (vs.
- * CPU-local data accesses), it makes more sense (from a data race
- * detection point of view) to simply disable preemptions to ensure
- * as many tasks as possible run on other CPUs.
- *
- * Use raw versions, to avoid lockdep recursion via IRQ flags tracing.
- */
- raw_local_irq_save(irq_flags);
+ if (!kcsan_interrupt_watcher)
+ /* Use raw to avoid lockdep recursion via IRQ flags tracing. */
+ raw_local_irq_save(irq_flags);
watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
if (watchpoint == NULL) {
@@ -507,7 +492,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
- kcsan_report(ptr, size, type, value_change, smp_processor_id(),
+ kcsan_report(ptr, size, type, value_change, raw_smp_processor_id(),
KCSAN_REPORT_RACE_SIGNAL);
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
/* Inferring a race, since the value should not have changed. */
@@ -518,13 +503,14 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
- smp_processor_id(),
+ raw_smp_processor_id(),
KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
}
kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
out_unlock:
- raw_local_irq_restore(irq_flags);
+ if (!kcsan_interrupt_watcher)
+ raw_local_irq_restore(irq_flags);
out:
user_access_restore(ua_flags);
}
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index f0b791143c6a..081ed2e1bf7b 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -88,6 +88,17 @@ config KCSAN_SKIP_WATCH_RANDOMIZE
KCSAN_WATCH_SKIP. If false, the chosen value is always
KCSAN_WATCH_SKIP.
+config KCSAN_INTERRUPT_WATCHER
+ bool "Interruptible watchers"
+ help
+ If enabled, a task that set up a watchpoint may be interrupted while
+ delayed. This option will allow KCSAN to detect races between
+ interrupted tasks and other threads of execution on the same CPU.
+
+ Currently disabled by default, because not all safe per-CPU access
+ primitives and patterns may be accounted for, and therefore could
+ result in false positives.
+
config KCSAN_REPORT_ONCE_IN_MS
int "Duration in milliseconds, in which any given race is only reported once"
default 3000