summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-11-22 05:26:55 +0100
committerIngo Molnar <mingo@elte.hu>2009-11-22 09:03:42 +0100
commitce71b9df8893ec954e56c5979df6da274f20f65e (patch)
tree76e8a5e33393c2f4fca4083628fc142dcbb55250 /kernel/trace
parente25613683bd5c46d3e8c8ae6416dccc9f357dcdc (diff)
tracing: Use the perf recursion protection from trace event
When we commit a trace to perf, we first check if we are recursing in the same buffer so that we don't mess-up the buffer with a recursing trace. But later on, we do the same check from perf to avoid commit recursion. The recursion check is desired early before we touch the buffer but we want to do this check only once. Then export the recursion protection from perf and use it from the trace events before submitting a trace. v2: Put appropriate Reported-by tag Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Jason Baron <jbaron@redhat.com> LKML-Reference: <1258864015-10579-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_event_profile.c14
-rw-r--r--kernel/trace/trace_kprobe.c48
-rw-r--r--kernel/trace/trace_syscalls.c47
3 files changed, 43 insertions, 66 deletions
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index e0d351b01f5a..d9c60f80aa0d 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -9,31 +9,33 @@
#include "trace.h"
-struct perf_trace_buf *perf_trace_buf;
+char *perf_trace_buf;
EXPORT_SYMBOL_GPL(perf_trace_buf);
-struct perf_trace_buf *perf_trace_buf_nmi;
+char *perf_trace_buf_nmi;
EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
+typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
+
/* Count the events in use (per event id, not per instance) */
static int total_profile_count;
static int ftrace_profile_enable_event(struct ftrace_event_call *event)
{
- struct perf_trace_buf *buf;
+ char *buf;
int ret = -ENOMEM;
if (atomic_inc_return(&event->profile_count))
return 0;
if (!total_profile_count) {
- buf = alloc_percpu(struct perf_trace_buf);
+ buf = (char *)alloc_percpu(perf_trace_t);
if (!buf)
goto fail_buf;
rcu_assign_pointer(perf_trace_buf, buf);
- buf = alloc_percpu(struct perf_trace_buf);
+ buf = (char *)alloc_percpu(perf_trace_t);
if (!buf)
goto fail_buf_nmi;
@@ -79,7 +81,7 @@ int ftrace_profile_enable(int event_id)
static void ftrace_profile_disable_event(struct ftrace_event_call *event)
{
- struct perf_trace_buf *buf, *nmi_buf;
+ char *buf, *nmi_buf;
if (!atomic_add_negative(-1, &event->profile_count))
return;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3696476f307d..22e6f68b05b3 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1208,11 +1208,12 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
struct ftrace_event_call *call = &tp->call;
struct kprobe_trace_entry *entry;
- struct perf_trace_buf *trace_buf;
struct trace_entry *ent;
int size, __size, i, pc, __cpu;
unsigned long irq_flags;
+ char *trace_buf;
char *raw_data;
+ int *recursion;
pc = preempt_count();
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
@@ -1227,6 +1228,10 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
* This also protects the rcu read side
*/
local_irq_save(irq_flags);
+
+ if (perf_swevent_get_recursion_context(&recursion))
+ goto end_recursion;
+
__cpu = smp_processor_id();
if (in_nmi())
@@ -1237,18 +1242,7 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
if (!trace_buf)
goto end;
- trace_buf = per_cpu_ptr(trace_buf, __cpu);
-
- if (trace_buf->recursion++)
- goto end_recursion;
-
- /*
- * Make recursion update visible before entering perf_tp_event
- * so that we protect from perf recursions.
- */
- barrier();
-
- raw_data = trace_buf->buf;
+ raw_data = per_cpu_ptr(trace_buf, __cpu);
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -1263,9 +1257,9 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
perf_tp_event(call->id, entry->ip, 1, entry, size);
-end_recursion:
- trace_buf->recursion--;
end:
+ perf_swevent_put_recursion_context(recursion);
+end_recursion:
local_irq_restore(irq_flags);
return 0;
@@ -1278,10 +1272,11 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
struct ftrace_event_call *call = &tp->call;
struct kretprobe_trace_entry *entry;
- struct perf_trace_buf *trace_buf;
struct trace_entry *ent;
int size, __size, i, pc, __cpu;
unsigned long irq_flags;
+ char *trace_buf;
+ int *recursion;
char *raw_data;
pc = preempt_count();
@@ -1297,6 +1292,10 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
* This also protects the rcu read side
*/
local_irq_save(irq_flags);
+
+ if (perf_swevent_get_recursion_context(&recursion))
+ goto end_recursion;
+
__cpu = smp_processor_id();
if (in_nmi())
@@ -1307,18 +1306,7 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
if (!trace_buf)
goto end;
- trace_buf = per_cpu_ptr(trace_buf, __cpu);
-
- if (trace_buf->recursion++)
- goto end_recursion;
-
- /*
- * Make recursion update visible before entering perf_tp_event
- * so that we protect from perf recursions.
- */
- barrier();
-
- raw_data = trace_buf->buf;
+ raw_data = per_cpu_ptr(trace_buf, __cpu);
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -1334,9 +1322,9 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
-end_recursion:
- trace_buf->recursion--;
end:
+ perf_swevent_put_recursion_context(recursion);
+end_recursion:
local_irq_restore(irq_flags);
return 0;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 51213b0aa81b..0bb934875263 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -477,10 +477,11 @@ static int sys_prof_refcount_exit;
static void prof_syscall_enter(struct pt_regs *regs, long id)
{
struct syscall_metadata *sys_data;
- struct perf_trace_buf *trace_buf;
struct syscall_trace_enter *rec;
unsigned long flags;
+ char *trace_buf;
char *raw_data;
+ int *recursion;
int syscall_nr;
int size;
int cpu;
@@ -505,6 +506,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
/* Protect the per cpu buffer, begin the rcu read side */
local_irq_save(flags);
+ if (perf_swevent_get_recursion_context(&recursion))
+ goto end_recursion;
+
cpu = smp_processor_id();
if (in_nmi())
@@ -515,18 +519,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
if (!trace_buf)
goto end;
- trace_buf = per_cpu_ptr(trace_buf, cpu);
-
- if (trace_buf->recursion++)
- goto end_recursion;
-
- /*
- * Make recursion update visible before entering perf_tp_event
- * so that we protect from perf recursions.
- */
- barrier();
-
- raw_data = trace_buf->buf;
+ raw_data = per_cpu_ptr(trace_buf, cpu);
/* zero the dead bytes from align to not leak stack to user */
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -539,9 +532,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
(unsigned long *)&rec->args);
perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
-end_recursion:
- trace_buf->recursion--;
end:
+ perf_swevent_put_recursion_context(recursion);
+end_recursion:
local_irq_restore(flags);
}
@@ -588,10 +581,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
{
struct syscall_metadata *sys_data;
struct syscall_trace_exit *rec;
- struct perf_trace_buf *trace_buf;
unsigned long flags;
int syscall_nr;
+ char *trace_buf;
char *raw_data;
+ int *recursion;
int size;
int cpu;
@@ -617,6 +611,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
/* Protect the per cpu buffer, begin the rcu read side */
local_irq_save(flags);
+
+ if (perf_swevent_get_recursion_context(&recursion))
+ goto end_recursion;
+
cpu = smp_processor_id();
if (in_nmi())
@@ -627,18 +625,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
if (!trace_buf)
goto end;
- trace_buf = per_cpu_ptr(trace_buf, cpu);
-
- if (trace_buf->recursion++)
- goto end_recursion;
-
- /*
- * Make recursion update visible before entering perf_tp_event
- * so that we protect from perf recursions.
- */
- barrier();
-
- raw_data = trace_buf->buf;
+ raw_data = per_cpu_ptr(trace_buf, cpu);
/* zero the dead bytes from align to not leak stack to user */
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -652,9 +639,9 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
-end_recursion:
- trace_buf->recursion--;
end:
+ perf_swevent_put_recursion_context(recursion);
+end_recursion:
local_irq_restore(flags);
}