summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_preemptirq.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2023-01-12 20:43:49 +0100
committerIngo Molnar <mingo@kernel.org>2023-01-13 11:48:16 +0100
commit9aedeaed6fc6fe8452b9b8225e95cc2b8631ff91 (patch)
treef25b3cdfd0811971a800db9cf2836042e1d38d4c /kernel/trace/trace_preemptirq.c
parent408b961146be4c1a776ce285c3c289afab15298a (diff)
tracing, hardirq: No moar _rcuidle() tracing
Robot reported that trace_hardirqs_{on,off}() tickle the forbidden _rcuidle() tracepoint through local_irq_{en,dis}able(). For 'sane' configs, these calls will only happen with RCU enabled and as such can use the regular tracepoint. This also means it's possible to trace them from NMI context again. Reported-by: kernel test robot <lkp@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20230112195541.477416709@infradead.org
Diffstat (limited to 'kernel/trace/trace_preemptirq.c')
-rw-r--r--kernel/trace/trace_preemptirq.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index 629f2854e12b..f992444a0b1f 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -20,6 +20,20 @@
static DEFINE_PER_CPU(int, tracing_irq_cpu);
/*
+ * Use regular trace points on architectures that implement noinstr
+ * tooling: these calls will only happen with RCU enabled, which can
+ * use a regular tracepoint.
+ *
+ * On older architectures, use the rcuidle tracing methods (which
+ * aren't NMI-safe - so exclude NMI contexts):
+ */
+#ifdef CONFIG_ARCH_WANTS_NO_INSTR
+#define trace(point) trace_##point
+#else
+#define trace(point) if (!in_nmi()) trace_##point##_rcuidle
+#endif
+
+/*
* Like trace_hardirqs_on() but without the lockdep invocation. This is
* used in the low level entry code where the ordering vs. RCU is important
* and lockdep uses a staged approach which splits the lockdep hardirq
@@ -28,8 +42,7 @@ static DEFINE_PER_CPU(int, tracing_irq_cpu);
void trace_hardirqs_on_prepare(void)
{
if (this_cpu_read(tracing_irq_cpu)) {
- if (!in_nmi())
- trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
this_cpu_write(tracing_irq_cpu, 0);
}
@@ -40,8 +53,7 @@ NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
void trace_hardirqs_on(void)
{
if (this_cpu_read(tracing_irq_cpu)) {
- if (!in_nmi())
- trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
this_cpu_write(tracing_irq_cpu, 0);
}
@@ -63,8 +75,7 @@ void trace_hardirqs_off_finish(void)
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
- if (!in_nmi())
- trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
}
}
@@ -78,8 +89,7 @@ void trace_hardirqs_off(void)
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
- if (!in_nmi())
- trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
}
}
EXPORT_SYMBOL(trace_hardirqs_off);