summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_preemptirq.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-08-08 21:28:05 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-08-10 15:12:00 -0400
commit3f1756dc210e5abb37121da3e7c10d65920f6ec0 (patch)
tree9adb16d03c5e6f1de6d1fc765da1d5aa0590e78e /kernel/trace/trace_preemptirq.c
parentf27107fa20ad531ace5fd580473ff8dd0c6b9ca9 (diff)
tracing: More reverting of "tracing: Centralize preemptirq tracepoints and unify their usage"
Joel Fernandes created a nice patch that cleaned up the duplicate hooks used by lockdep and irqsoff latency tracer. It made both use tracepoints. But the latency tracer is triggering warnings when using tracepoints to call into the latency tracer's routines. Mainly, they can be called from NMI context. If that happens, then the SRCU may not work properly because on some architectures, SRCU is not safe to be called in both NMI and non-NMI context. This is a partial revert of the clean up patch c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints and unify their usage") that adds back the direct calls into the latency tracer. It also only calls the trace events when not in NMI. Link: http://lkml.kernel.org/r/20180809210654.622445925@goodmis.org Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> Fixes: c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints and unify their usage") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_preemptirq.c')
-rw-r--r--kernel/trace/trace_preemptirq.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index fa656b25f427..71f553cceb3c 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -9,6 +9,7 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
+#include "trace.h"
#define CREATE_TRACE_POINTS
#include <trace/events/preemptirq.h>
@@ -20,7 +21,9 @@ static DEFINE_PER_CPU(int, tracing_irq_cpu);
void trace_hardirqs_on(void)
{
if (this_cpu_read(tracing_irq_cpu)) {
- trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ if (!in_nmi())
+ trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
this_cpu_write(tracing_irq_cpu, 0);
}
@@ -32,7 +35,9 @@ void trace_hardirqs_off(void)
{
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
- trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
+ if (!in_nmi())
+ trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
}
lockdep_hardirqs_off(CALLER_ADDR0);
@@ -42,7 +47,9 @@ EXPORT_SYMBOL(trace_hardirqs_off);
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
{
if (this_cpu_read(tracing_irq_cpu)) {
- trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
+ if (!in_nmi())
+ trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
+ tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
this_cpu_write(tracing_irq_cpu, 0);
}
@@ -54,7 +61,9 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
- trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
+ tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
+ if (!in_nmi())
+ trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
}
lockdep_hardirqs_off(CALLER_ADDR0);
@@ -66,11 +75,15 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
- trace_preempt_enable_rcuidle(a0, a1);
+ if (!in_nmi())
+ trace_preempt_enable_rcuidle(a0, a1);
+ tracer_preempt_on(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
- trace_preempt_disable_rcuidle(a0, a1);
+ if (!in_nmi())
+ trace_preempt_disable_rcuidle(a0, a1);
+ tracer_preempt_off(a0, a1);
}
#endif