summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/trace/events.txt18
-rw-r--r--Documentation/trace/ftrace.txt23
-rw-r--r--arch/x86/kernel/ftrace.c4
-rw-r--r--fs/tracefs/inode.c6
-rw-r--r--include/linux/ftrace.h11
-rw-r--r--include/linux/ring_buffer.h4
-rw-r--r--include/linux/trace_events.h20
-rw-r--r--include/linux/tracepoint.h39
-rw-r--r--include/trace/define_trace.h2
-rw-r--r--include/trace/events/gpio.h4
-rw-r--r--include/trace/perf.h258
-rw-r--r--include/trace/trace_events.h258
-rw-r--r--kernel/trace/Kconfig7
-rw-r--r--kernel/trace/blktrace.c11
-rw-r--r--kernel/trace/ftrace.c197
-rw-r--r--kernel/trace/ring_buffer.c20
-rw-r--r--kernel/trace/ring_buffer_benchmark.c79
-rw-r--r--kernel/trace/trace.c454
-rw-r--r--kernel/trace/trace.h168
-rw-r--r--kernel/trace/trace_benchmark.c2
-rw-r--r--kernel/trace/trace_branch.c15
-rw-r--r--kernel/trace/trace_events.c506
-rw-r--r--kernel/trace/trace_events_filter.c8
-rw-r--r--kernel/trace/trace_export.c2
-rw-r--r--kernel/trace/trace_functions_graph.c63
-rw-r--r--kernel/trace/trace_irqsoff.c106
-rw-r--r--kernel/trace/trace_kdb.c8
-rw-r--r--kernel/trace/trace_mmiotrace.c4
-rw-r--r--kernel/trace/trace_output.c97
-rw-r--r--kernel/trace/trace_output.h4
-rw-r--r--kernel/trace/trace_printk.c14
-rw-r--r--kernel/trace/trace_probe.h8
-rw-r--r--kernel/trace/trace_sched_wakeup.c120
-rw-r--r--kernel/trace/trace_stack.c92
-rw-r--r--kernel/trace/trace_syscalls.c3
-rw-r--r--kernel/tracepoint.c61
-rw-r--r--samples/trace_events/trace-events-sample.h6
-rw-r--r--scripts/recordmcount.c26
-rw-r--r--scripts/recordmcount.h2
39 files changed, 1787 insertions, 943 deletions
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index 75d25a1d6e42..c010be8c85d7 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -288,6 +288,24 @@ prev_pid == 0
# cat sched_wakeup/filter
common_pid == 0
+5.4 PID filtering
+-----------------
+
+The set_event_pid file in the same directory as the top events directory
+exists, will filter all events from tracing any task that does not have the
+PID listed in the set_event_pid file.
+
+# cd /sys/kernel/debug/tracing
+# echo $$ > set_event_pid
+# echo 1 > events/enabled
+
+Will only trace events for the current task.
+
+To add more PIDs without losing the PIDs already included, use '>>'.
+
+# echo 123 244 1 >> set_event_pid
+
+
6. Event triggers
=================
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index ef621d34ba5b..f52f297cb406 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -204,6 +204,12 @@ of ftrace. Here is a list of some of the key files:
Have the function tracer only trace a single thread.
+ set_event_pid:
+
+ Have the events only trace a task with a PID listed in this file.
+ Note, sched_switch and sched_wake_up will also trace events
+ listed in this file.
+
set_graph_function:
Set a "trigger" function where tracing should start
@@ -2437,6 +2443,23 @@ The following commands are supported:
echo '!writeback*:mod:ext3' >> set_ftrace_filter
+ Mod command supports module globbing. Disable tracing for all
+ functions except a specific module:
+
+ echo '!*:mod:!ext3' >> set_ftrace_filter
+
+ Disable tracing for all modules, but still trace kernel:
+
+ echo '!*:mod:*' >> set_ftrace_filter
+
+ Enable filter only for kernel:
+
+ echo '*write*:mod:!*' >> set_ftrace_filter
+
+ Enable filter for module globbing:
+
+ echo '*write*:mod:*snd*' >> set_ftrace_filter
+
- traceon/traceoff
These commands turn tracing on and off when the specified
functions are hit. The parameter determines how many times the
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8b7b0a51e742..311bcf338f07 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -556,6 +556,7 @@ void ftrace_replace_code(int enable)
run_sync();
report = "updating code";
+ count = 0;
for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter);
@@ -563,11 +564,13 @@ void ftrace_replace_code(int enable)
ret = add_update(rec, enable);
if (ret)
goto remove_breakpoints;
+ count++;
}
run_sync();
report = "removing breakpoints";
+ count = 0;
for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter);
@@ -575,6 +578,7 @@ void ftrace_replace_code(int enable)
ret = finish_update(rec, enable);
if (ret)
goto remove_breakpoints;
+ count++;
}
run_sync();
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index cbc8d5d2755a..c66f2423e1f5 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -340,8 +340,12 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
dput(dentry);
dentry = ERR_PTR(-EEXIST);
}
- if (IS_ERR(dentry))
+
+ if (IS_ERR(dentry)) {
mutex_unlock(&parent->d_inode->i_mutex);
+ simple_release_fs(&tracefs_mount, &tracefs_mount_count);
+ }
+
return dentry;
}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6cd8c0ee4b6f..eae6548efbf0 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -263,7 +263,18 @@ static inline void ftrace_kill(void) { }
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_STACK_TRACER
+
+#define STACK_TRACE_ENTRIES 500
+
+struct stack_trace;
+
+extern unsigned stack_trace_index[];
+extern struct stack_trace stack_trace_max;
+extern unsigned long stack_trace_max_size;
+extern arch_spinlock_t stack_trace_max_lock;
+
extern int stack_tracer_enabled;
+void stack_trace_print(void);
int
stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index e2c13cd863bd..4acc552e9279 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -154,8 +154,8 @@ ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
}
#endif
-int ring_buffer_empty(struct ring_buffer *buffer);
-int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
+bool ring_buffer_empty(struct ring_buffer *buffer);
+bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_disable(struct ring_buffer *buffer);
void ring_buffer_record_enable(struct ring_buffer *buffer);
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index ed27917cabc9..429fdfc3baf5 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -168,13 +168,12 @@ struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
int type, unsigned long len,
unsigned long flags, int pc);
-void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc);
-void trace_buffer_unlock_commit(struct ring_buffer *buffer,
+void trace_buffer_unlock_commit(struct trace_array *tr,
+ struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
-void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+void trace_buffer_unlock_commit_regs(struct trace_array *tr,
+ struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
@@ -329,6 +328,7 @@ enum {
EVENT_FILE_FL_SOFT_DISABLED_BIT,
EVENT_FILE_FL_TRIGGER_MODE_BIT,
EVENT_FILE_FL_TRIGGER_COND_BIT,
+ EVENT_FILE_FL_PID_FILTER_BIT,
};
/*
@@ -342,6 +342,7 @@ enum {
* tracepoint may be enabled)
* TRIGGER_MODE - When set, invoke the triggers associated with the event
* TRIGGER_COND - When set, one or more triggers has an associated filter
+ * PID_FILTER - When set, the event is filtered based on pid
*/
enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
@@ -352,6 +353,7 @@ enum {
EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
+ EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
};
struct trace_event_file {
@@ -430,6 +432,8 @@ extern enum event_trigger_type event_triggers_call(struct trace_event_file *file
extern void event_triggers_post_call(struct trace_event_file *file,
enum event_trigger_type tt);
+bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
+
/**
* trace_trigger_soft_disabled - do triggers and test if soft disabled
* @file: The file pointer of the event to test
@@ -449,6 +453,8 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
event_triggers_call(file, NULL);
if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
return true;
+ if (eflags & EVENT_FILE_FL_PID_FILTER)
+ return trace_event_ignore_this_pid(file);
}
return false;
}
@@ -508,7 +514,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
enum event_trigger_type tt = ETT_NONE;
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
if (tt)
event_triggers_post_call(file, tt);
@@ -540,7 +546,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
enum event_trigger_type tt = ETT_NONE;
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit_regs(buffer, event,
+ trace_buffer_unlock_commit_regs(file->tr, buffer, event,
irq_flags, pc, regs);
if (tt)
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index a5f7f3ecafa3..696a339c592c 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -26,6 +26,7 @@ struct notifier_block;
struct tracepoint_func {
void *func;
void *data;
+ int prio;
};
struct tracepoint {
@@ -42,9 +43,14 @@ struct trace_enum_map {
unsigned long enum_value;
};
+#define TRACEPOINT_DEFAULT_PRIO 10
+
extern int
tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
extern int
+tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data,
+ int prio);
+extern int
tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
extern void
for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
@@ -111,7 +117,18 @@ extern void syscall_unregfunc(void);
#define TP_ARGS(args...) args
#define TP_CONDITION(args...) args
-#ifdef CONFIG_TRACEPOINTS
+/*
+ * Individual subsystem my have a separate configuration to
+ * enable their tracepoints. By default, this file will create
+ * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem
+ * wants to be able to disable its tracepoints from being created
+ * it can define NOTRACE before including the tracepoint headers.
+ */
+#if defined(CONFIG_TRACEPOINTS) && !defined(NOTRACE)
+#define TRACEPOINTS_ENABLED
+#endif
+
+#ifdef TRACEPOINTS_ENABLED
/*
* it_func[0] is never NULL because there is at least one element in the array
@@ -167,10 +184,11 @@ extern void syscall_unregfunc(void);
* structure. Force alignment to the same alignment as the section start.
*
* When lockdep is enabled, we make sure to always do the RCU portions of
- * the tracepoint code, regardless of whether tracing is on or we match the
- * condition. This lets us find RCU issues triggered with tracepoints even
- * when this tracepoint is off. This code has no purpose other than poking
- * RCU a bit.
+ * the tracepoint code, regardless of whether tracing is on. However,
+ * don't check if the condition is false, due to interaction with idle
+ * instrumentation. This lets us find RCU issues triggered with tracepoints
+ * even when this tracepoint is off. This code has no purpose other than
+ * poking RCU a bit.
*/
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \
@@ -196,6 +214,13 @@ extern void syscall_unregfunc(void);
(void *)probe, data); \
} \
static inline int \
+ register_trace_prio_##name(void (*probe)(data_proto), void *data,\
+ int prio) \
+ { \
+ return tracepoint_probe_register_prio(&__tracepoint_##name, \
+ (void *)probe, data, prio); \
+ } \
+ static inline int \
unregister_trace_##name(void (*probe)(data_proto), void *data) \
{ \
return tracepoint_probe_unregister(&__tracepoint_##name,\
@@ -234,7 +259,7 @@ extern void syscall_unregfunc(void);
#define EXPORT_TRACEPOINT_SYMBOL(name) \
EXPORT_SYMBOL(__tracepoint_##name)
-#else /* !CONFIG_TRACEPOINTS */
+#else /* !TRACEPOINTS_ENABLED */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
static inline void trace_##name(proto) \
{ } \
@@ -266,7 +291,7 @@ extern void syscall_unregfunc(void);
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)
-#endif /* CONFIG_TRACEPOINTS */
+#endif /* TRACEPOINTS_ENABLED */
#ifdef CONFIG_TRACING
/**
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 09b3880105a9..2d8639ea64d5 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -86,7 +86,7 @@
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args)
-#ifdef CONFIG_EVENT_TRACING
+#ifdef TRACEPOINTS_ENABLED
#include <trace/trace_events.h>
#include <trace/perf.h>
#endif
diff --git a/include/trace/events/gpio.h b/include/trace/events/gpio.h
index 927a8ad9e51b..2da73b92d47e 100644
--- a/include/trace/events/gpio.h
+++ b/include/trace/events/gpio.h
@@ -1,6 +1,10 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gpio
+#ifndef CONFIG_TRACING_EVENTS_GPIO
+#define NOTRACE
+#endif
+
#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_GPIO_H
diff --git a/include/trace/perf.h b/include/trace/perf.h
index 1b5443cebedc..26486fcd74ce 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -1,261 +1,3 @@
-/*
- * Stage 4 of the trace events.
- *
- * Override the macros in <trace/trace_events.h> to include the following:
- *
- * For those macros defined with TRACE_EVENT:
- *
- * static struct trace_event_call event_<call>;
- *
- * static void trace_event_raw_event_<call>(void *__data, proto)
- * {
- * struct trace_event_file *trace_file = __data;
- * struct trace_event_call *event_call = trace_file->event_call;
- * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
- * unsigned long eflags = trace_file->flags;
- * enum event_trigger_type __tt = ETT_NONE;
- * struct ring_buffer_event *event;
- * struct trace_event_raw_<call> *entry; <-- defined in stage 1
- * struct ring_buffer *buffer;
- * unsigned long irq_flags;
- * int __data_size;
- * int pc;
- *
- * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
- * if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
- * event_triggers_call(trace_file, NULL);
- * if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
- * return;
- * }
- *
- * local_save_flags(irq_flags);
- * pc = preempt_count();
- *
- * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
- *
- * event = trace_event_buffer_lock_reserve(&buffer, trace_file,
- * event_<call>->event.type,
- * sizeof(*entry) + __data_size,
- * irq_flags, pc);
- * if (!event)
- * return;
- * entry = ring_buffer_event_data(event);
- *
- * { <assign>; } <-- Here we assign the entries by the __field and
- * __array macros.
- *
- * if (eflags & EVENT_FILE_FL_TRIGGER_COND)
- * __tt = event_triggers_call(trace_file, entry);
- *
- * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
- * &trace_file->flags))
- * ring_buffer_discard_commit(buffer, event);
- * else if (!filter_check_discard(trace_file, entry, buffer, event))
- * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
- *
- * if (__tt)
- * event_triggers_post_call(trace_file, __tt);
- * }
- *
- * static struct trace_event ftrace_event_type_<call> = {
- * .trace = trace_raw_output_<call>, <-- stage 2
- * };
- *
- * static char print_fmt_<call>[] = <TP_printk>;
- *
- * static struct trace_event_class __used event_class_<template> = {
- * .system = "<system>",
- * .define_fields = trace_event_define_fields_<call>,
- * .fields = LIST_HEAD_INIT(event_class_##call.fields),
- * .raw_init = trace_event_raw_init,
- * .probe = trace_event_raw_event_##call,
- * .reg = trace_event_reg,
- * };
- *
- * static struct trace_event_call event_<call> = {
- * .class = event_class_<template>,
- * {
- * .tp = &__tracepoint_<call>,
- * },
- * .event = &ftrace_event_type_<call>,
- * .print_fmt = print_fmt_<call>,
- * .flags = TRACE_EVENT_FL_TRACEPOINT,
- * };
- * // its only safe to use pointers when doing linker tricks to
- * // create an array.
- * static struct trace_event_call __used
- * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
- *
- */
-
-#ifdef CONFIG_PERF_EVENTS
-
-#define _TRACE_PERF_PROTO(call, proto) \
- static notrace void \
- perf_trace_##call(void *__data, proto);
-
-#define _TRACE_PERF_INIT(call) \
- .perf_probe = perf_trace_##call,
-
-#else
-#define _TRACE_PERF_PROTO(call, proto)
-#define _TRACE_PERF_INIT(call)
-#endif /* CONFIG_PERF_EVENTS */
-
-#undef __entry
-#define __entry entry
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- __entry->__data_loc_##item = __data_offsets.item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __assign_str
-#define __assign_str(dst, src) \
- strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __assign_bitmask
-#define __assign_bitmask(dst, src, nr_bits) \
- memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
-
-#undef TP_fast_assign
-#define TP_fast_assign(args...) args
-
-#undef __perf_addr
-#define __perf_addr(a) (a)
-
-#undef __perf_count
-#define __perf_count(c) (c)
-
-#undef __perf_task
-#define __perf_task(t) (t)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
- \
-static notrace void \
-trace_event_raw_event_##call(void *__data, proto) \
-{ \
- struct trace_event_file *trace_file = __data; \
- struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
- struct trace_event_buffer fbuffer; \
- struct trace_event_raw_##call *entry; \
- int __data_size; \
- \
- if (trace_trigger_soft_disabled(trace_file)) \
- return; \
- \
- __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
- \
- entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
- sizeof(*entry) + __data_size); \
- \
- if (!entry) \
- return; \
- \
- tstruct \
- \
- { assign; } \
- \
- trace_event_buffer_commit(&fbuffer); \
-}
-/*
- * The ftrace_test_probe is compiled out, it is only here as a build time check
- * to make sure that if the tracepoint handling changes, the ftrace probe will
- * fail to compile unless it too is updated.
- */
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
-static inline void ftrace_test_probe_##call(void) \
-{ \
- check_trace_callback_type_##call(trace_event_raw_event_##template); \
-}
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-#undef __entry
-#define __entry REC
-
-#undef __print_flags
-#undef __print_symbolic
-#undef __print_hex
-#undef __get_dynamic_array
-#undef __get_dynamic_array_len
-#undef __get_str
-#undef __get_bitmask
-#undef __print_array
-
-#undef TP_printk
-#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-_TRACE_PERF_PROTO(call, PARAMS(proto)); \
-static char print_fmt_##call[] = print; \
-static struct trace_event_class __used __refdata event_class_##call = { \
- .system = TRACE_SYSTEM_STRING, \
- .define_fields = trace_event_define_fields_##call, \
- .fields = LIST_HEAD_INIT(event_class_##call.fields),\
- .raw_init = trace_event_raw_init, \
- .probe = trace_event_raw_event_##call, \
- .reg = trace_event_reg, \
- _TRACE_PERF_INIT(call) \
-};
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
- \
-static struct trace_event_call __used event_##call = { \
- .class = &event_class_##template, \
- { \
- .tp = &__tracepoint_##call, \
- }, \
- .event.funcs = &trace_event_type_funcs_##template, \
- .print_fmt = print_fmt_##template, \
- .flags = TRACE_EVENT_FL_TRACEPOINT, \
-}; \
-static struct trace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
- \
-static char print_fmt_##call[] = print; \
- \
-static struct trace_event_call __used event_##call = { \
- .class = &event_class_##template, \
- { \
- .tp = &__tracepoint_##call, \
- }, \
- .event.funcs = &trace_event_type_funcs_##call, \
- .print_fmt = print_fmt_##call, \
- .flags = TRACE_EVENT_FL_TRACEPOINT, \
-}; \
-static struct trace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef TRACE_SYSTEM_VAR
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 43be3b0e44d3..de996cf61053 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -506,3 +506,261 @@ static inline notrace int trace_event_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+/*
+ * Stage 4 of the trace events.
+ *
+ * Override the macros in <trace/trace_events.h> to include the following:
+ *
+ * For those macros defined with TRACE_EVENT:
+ *
+ * static struct trace_event_call event_<call>;
+ *
+ * static void trace_event_raw_event_<call>(void *__data, proto)
+ * {
+ * struct trace_event_file *trace_file = __data;
+ * struct trace_event_call *event_call = trace_file->event_call;
+ * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
+ * unsigned long eflags = trace_file->flags;
+ * enum event_trigger_type __tt = ETT_NONE;
+ * struct ring_buffer_event *event;
+ * struct trace_event_raw_<call> *entry; <-- defined in stage 1
+ * struct ring_buffer *buffer;
+ * unsigned long irq_flags;
+ * int __data_size;
+ * int pc;
+ *
+ * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
+ * if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
+ * event_triggers_call(trace_file, NULL);
+ * if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
+ * return;
+ * }
+ *
+ * local_save_flags(irq_flags);
+ * pc = preempt_count();
+ *
+ * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
+ *
+ * event = trace_event_buffer_lock_reserve(&buffer, trace_file,
+ * event_<call>->event.type,
+ * sizeof(*entry) + __data_size,
+ * irq_flags, pc);
+ * if (!event)
+ * return;
+ * entry = ring_buffer_event_data(event);
+ *
+ * { <assign>; } <-- Here we assign the entries by the __field and
+ * __array macros.
+ *
+ * if (eflags & EVENT_FILE_FL_TRIGGER_COND)
+ * __tt = event_triggers_call(trace_file, entry);
+ *
+ * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
+ * &trace_file->flags))
+ * ring_buffer_discard_commit(buffer, event);
+ * else if (!filter_check_discard(trace_file, entry, buffer, event))
+ * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ *
+ * if (__tt)
+ * event_triggers_post_call(trace_file, __tt);
+ * }
+ *
+ * static struct trace_event ftrace_event_type_<call> = {
+ * .trace = trace_raw_output_<call>, <-- stage 2
+ * };
+ *
+ * static char print_fmt_<call>[] = <TP_printk>;
+ *
+ * static struct trace_event_class __used event_class_<template> = {
+ * .system = "<system>",
+ * .define_fields = trace_event_define_fields_<call>,
+ * .fields = LIST_HEAD_INIT(event_class_##call.fields),
+ * .raw_init = trace_event_raw_init,
+ * .probe = trace_event_raw_event_##call,
+ * .reg = trace_event_reg,
+ * };
+ *
+ * static struct trace_event_call event_<call> = {
+ * .class = event_class_<template>,
+ * {
+ * .tp = &__tracepoint_<call>,
+ * },
+ * .event = &ftrace_event_type_<call>,
+ * .print_fmt = print_fmt_<call>,
+ * .flags = TRACE_EVENT_FL_TRACEPOINT,
+ * };
+ * // its only safe to use pointers when doing linker tricks to
+ * // create an array.
+ * static struct trace_event_call __used
+ * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
+ *
+ */
+
+#ifdef CONFIG_PERF_EVENTS
+
+#define _TRACE_PERF_PROTO(call, proto) \
+ static notrace void \
+ perf_trace_##call(void *__data, proto);
+
+#define _TRACE_PERF_INIT(call) \
+ .perf_probe = perf_trace_##call,
+
+#else
+#define _TRACE_PERF_PROTO(call, proto)
+#define _TRACE_PERF_INIT(call)
+#endif /* CONFIG_PERF_EVENTS */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __entry->__data_loc_##item = __data_offsets.item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __assign_str
+#define __assign_str(dst, src) \
+ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __assign_bitmask
+#define __assign_bitmask(dst, src, nr_bits) \
+ memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef __perf_addr
+#define __perf_addr(a) (a)
+
+#undef __perf_count
+#define __perf_count(c) (c)
+
+#undef __perf_task
+#define __perf_task(t) (t)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ \
+static notrace void \
+trace_event_raw_event_##call(void *__data, proto) \
+{ \
+ struct trace_event_file *trace_file = __data; \
+ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
+ struct trace_event_buffer fbuffer; \
+ struct trace_event_raw_##call *entry; \
+ int __data_size; \
+ \
+ if (trace_trigger_soft_disabled(trace_file)) \
+ return; \
+ \
+ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
+ sizeof(*entry) + __data_size); \
+ \
+ if (!entry) \
+ return; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ trace_event_buffer_commit(&fbuffer); \
+}
+/*
+ * The ftrace_test_probe is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the ftrace probe will
+ * fail to compile unless it too is updated.
+ */
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+static inline void ftrace_test_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(trace_event_raw_event_##template); \
+}
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef __entry
+#define __entry REC
+
+#undef __print_flags
+#undef __print_symbolic
+#undef __print_hex
+#undef __get_dynamic_array
+#undef __get_dynamic_array_len
+#undef __get_str
+#undef __get_bitmask
+#undef __print_array
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+_TRACE_PERF_PROTO(call, PARAMS(proto)); \
+static char print_fmt_##call[] = print; \
+static struct trace_event_class __used __refdata event_class_##call = { \
+ .system = TRACE_SYSTEM_STRING, \
+ .define_fields = trace_event_define_fields_##call, \
+ .fields = LIST_HEAD_INIT(event_class_##call.fields),\
+ .raw_init = trace_event_raw_init, \
+ .probe = trace_event_raw_event_##call, \
+ .reg = trace_event_reg, \
+ _TRACE_PERF_INIT(call) \
+};
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+ \
+static struct trace_event_call __used event_##call = { \
+ .class = &event_class_##template, \
+ { \
+ .tp = &__tracepoint_##call, \
+ }, \
+ .event.funcs = &trace_event_type_funcs_##template, \
+ .print_fmt = print_fmt_##template, \
+ .flags = TRACE_EVENT_FL_TRACEPOINT, \
+}; \
+static struct trace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
+ \
+static char print_fmt_##call[] = print; \
+ \
+static struct trace_event_call __used event_##call = { \
+ .class = &event_class_##template, \
+ { \
+ .tp = &__tracepoint_##call, \
+ }, \
+ .event.funcs = &trace_event_type_funcs_##call, \
+ .print_fmt = print_fmt_##call, \
+ .flags = TRACE_EVENT_FL_TRACEPOINT, \
+}; \
+static struct trace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 1153c43428f3..8d6363f42169 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -635,6 +635,13 @@ config TRACE_ENUM_MAP_FILE
If unsure, say N
+config TRACING_EVENTS_GPIO
+ bool "Trace gpio events"
+ depends on GPIOLIB
+ default y
+ help
+ Enable tracing events for gpio subsystem
+
endif # FTRACE
endif # TRACING_SUPPORT
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index e3a26188b95e..a990824c8604 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -103,7 +103,7 @@ record_it:
memcpy((void *) t + sizeof(*t), data, len);
if (blk_tracer)
- trace_buffer_unlock_commit(buffer, event, 0, pc);
+ trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
}
}
@@ -278,7 +278,7 @@ record_it:
memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
if (blk_tracer) {
- trace_buffer_unlock_commit(buffer, event, 0, pc);
+ trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
return;
}
}
@@ -1340,6 +1340,7 @@ static const struct {
static enum print_line_t print_one_line(struct trace_iterator *iter,
bool classic)
{
+ struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq;
const struct blk_io_trace *t;
u16 what;
@@ -1348,7 +1349,7 @@ static enum print_line_t print_one_line(struct trace_iterator *iter,
t = te_blk_io_trace(iter->ent);
what = t->action & ((1 << BLK_TC_SHIFT) - 1);
- long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
+ long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
log_action = classic ? &blk_log_action_classic : &blk_log_action;
if (t->action == BLK_TN_MESSAGE) {
@@ -1410,9 +1411,9 @@ blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
/* don't output context-info for blk_classic output */
if (bit == TRACE_BLK_OPT_CLASSIC) {
if (set)
- trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
+ tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
else
- trace_flags |= TRACE_ITER_CONTEXT_INFO;
+ tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
}
return 0;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 00611e95a8ee..3f743b147247 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -243,6 +243,11 @@ static void ftrace_sync_ipi(void *data)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void update_function_graph_func(void);
+
+/* Both enabled by default (can be cleared by function_graph tracer flags */
+static bool fgraph_sleep_time = true;
+static bool fgraph_graph_time = true;
+
#else
static inline void update_function_graph_func(void) { }
#endif
@@ -917,7 +922,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
calltime = trace->rettime - trace->calltime;
- if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
+ if (!fgraph_graph_time) {
int index;
index = trace->depth;
@@ -3420,27 +3425,35 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
inode, file);
}
-static int ftrace_match(char *str, char *regex, int len, int type)
+/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
+struct ftrace_glob {
+ char *search;
+ unsigned len;
+ int type;
+};
+
+static int ftrace_match(char *str, struct ftrace_glob *g)
{
int matched = 0;
int slen;
- switch (type) {
+ switch (g->type) {
case MATCH_FULL:
- if (strcmp(str, regex) == 0)
+ if (strcmp(str, g->search) == 0)
matched = 1;
break;
case MATCH_FRONT_ONLY:
- if (strncmp(str, regex, len) == 0)
+ if (strncmp(str, g->search, g->len) == 0)
matched = 1;
break;
case MATCH_MIDDLE_ONLY:
- if (strstr(str, regex))
+ if (strstr(str, g->search))
matched = 1;
break;
case MATCH_END_ONLY:
slen = strlen(str);
- if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
+ if (slen >= g->len &&
+ memcmp(str + slen - g->len, g->search, g->len) == 0)
matched = 1;
break;
}
@@ -3449,13 +3462,13 @@ static int ftrace_match(char *str, char *regex, int len, int type)
}
static int
-enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
+enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
{
struct ftrace_func_entry *entry;
int ret = 0;
entry = ftrace_lookup_ip(hash, rec->ip);
- if (not) {
+ if (clear_filter) {
/* Do nothing if it doesn't exist */
if (!entry)
return 0;
@@ -3472,42 +3485,68 @@ enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
}
static int
-ftrace_match_record(struct dyn_ftrace *rec, char *mod,
- char *regex, int len, int type)
+ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
+ struct ftrace_glob *mod_g, int exclude_mod)
{
char str[KSYM_SYMBOL_LEN];
char *modname;
kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
- if (mod) {
- /* module lookup requires matching the module */
- if (!modname || strcmp(modname, mod))
+ if (mod_g) {
+ int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
+
+ /* blank module name to match all modules */
+ if (!mod_g->len) {
+ /* blank module globbing: modname xor exclude_mod */
+ if ((!exclude_mod) != (!modname))
+ goto func_match;
+ return 0;
+ }
+
+ /* not matching the module */
+ if (!modname || !mod_matches) {
+ if (exclude_mod)
+ goto func_match;
+ else
+ return 0;
+ }
+
+ if (mod_matches && exclude_mod)
return 0;
+func_match:
/* blank search means to match all funcs in the mod */
- if (!len)
+ if (!func_g->len)
return 1;
}
- return ftrace_match(str, regex, len, type);
+ return ftrace_match(str, func_g);
}
static int
-match_records(struct ftrace_hash *hash, char *buff,
- int len, char *mod, int not)
+match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
{
- unsigned search_len = 0;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
- int type = MATCH_FULL;
- char *search = buff;
+ struct ftrace_glob func_g = { .type = MATCH_FULL };
+ struct ftrace_glob mod_g = { .type = MATCH_FULL };
+ struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
+ int exclude_mod = 0;
int found = 0;
int ret;
+ int clear_filter;
+
+ if (func) {
+ func_g.type = filter_parse_regex(func, len, &func_g.search,
+ &clear_filter);
+ func_g.len = strlen(func_g.search);
+ }
- if (len) {
- type = filter_parse_regex(buff, len, &search, &not);
- search_len = strlen(search);
+ if (mod) {
+ mod_g.type = filter_parse_regex(mod, strlen(mod),
+ &mod_g.search, &exclude_mod);
+ mod_g.len = strlen(mod_g.search);
}
mutex_lock(&ftrace_lock);
@@ -3516,8 +3555,8 @@ match_records(struct ftrace_hash *hash, char *buff,
goto out_unlock;
do_for_each_ftrace_rec(pg, rec) {
- if (ftrace_match_record(rec, mod, search, search_len, type)) {
- ret = enter_record(hash, rec, not);
+ if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
+ ret = enter_record(hash, rec, clear_filter);
if (ret < 0) {
found = ret;
goto out_unlock;
@@ -3534,26 +3573,9 @@ match_records(struct ftrace_hash *hash, char *buff,
static int
ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
{
- return match_records(hash, buff, len, NULL, 0);
+ return match_records(hash, buff, len, NULL);
}
-static int
-ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
-{
- int not = 0;
-
- /* blank or '*' mean the same */
- if (strcmp(buff, "*") == 0)
- buff[0] = 0;
-
- /* handle the case of 'dont filter this module' */
- if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
- buff[0] = 0;
- not = 1;
- }
-
- return match_records(hash, buff, strlen(buff), mod, not);
-}
/*
* We register the module command as a template to show others how
@@ -3562,10 +3584,9 @@ ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
static int
ftrace_mod_callback(struct ftrace_hash *hash,
- char *func, char *cmd, char *param, int enable)
+ char *func, char *cmd, char *module, int enable)
{
- char *mod;
- int ret = -EINVAL;
+ int ret;
/*
* cmd == 'mod' because we only registered this func
@@ -3574,21 +3595,11 @@ ftrace_mod_callback(struct ftrace_hash *hash,
* you can tell which command was used by the cmd
* parameter.
*/
-
- /* we must have a module name */
- if (!param)
- return ret;
-
- mod = strsep(&param, ":");
- if (!strlen(mod))
- return ret;
-
- ret = ftrace_match_module_records(hash, func, mod);
+ ret = match_records(hash, func, strlen(func), module);
if (!ret)
- ret = -EINVAL;
+ return -EINVAL;
if (ret < 0)
return ret;
-
return 0;
}
@@ -3699,19 +3710,20 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
{
struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_probe *entry;
+ struct ftrace_glob func_g;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
struct ftrace_hash *old_hash = *orig_hash;
struct ftrace_hash *hash;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
- int type, len, not;
+ int not;
unsigned long key;
int count = 0;
- char *search;
int ret;
- type = filter_parse_regex(glob, strlen(glob), &search, &not);
- len = strlen(search);
+ func_g.type = filter_parse_regex(glob, strlen(glob),
+ &func_g.search, &not);
+ func_g.len = strlen(func_g.search);
/* we do not support '!' for function probes */
if (WARN_ON(not))
@@ -3738,7 +3750,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
do_for_each_ftrace_rec(pg, rec) {
- if (!ftrace_match_record(rec, NULL, search, len, type))
+ if (!ftrace_match_record(rec, &func_g, NULL, 0))
continue;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
@@ -3811,24 +3823,24 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
struct ftrace_func_entry *rec_entry;
struct ftrace_func_probe *entry;
struct ftrace_func_probe *p;
+ struct ftrace_glob func_g;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
struct ftrace_hash *old_hash = *orig_hash;
struct list_head free_list;
struct ftrace_hash *hash;
struct hlist_node *tmp;
char str[KSYM_SYMBOL_LEN];
- int type = MATCH_FULL;
- int i, len = 0;
- char *search;
- int ret;
+ int i, ret;
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
- glob = NULL;
+ func_g.search = NULL;
else if (glob) {
int not;
- type = filter_parse_regex(glob, strlen(glob), &search, &not);
- len = strlen(search);
+ func_g.type = filter_parse_regex(glob, strlen(glob),
+ &func_g.search, &not);
+ func_g.len = strlen(func_g.search);
+ func_g.search = glob;
/* we do not support '!' for function probes */
if (WARN_ON(not))
@@ -3857,10 +3869,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
continue;
/* do this last, since it is the most expensive */
- if (glob) {
+ if (func_g.search) {
kallsyms_lookup(entry->ip, NULL, NULL,
NULL, str);
- if (!ftrace_match(str, glob, len, type))
+ if (!ftrace_match(str, &func_g))
continue;
}
@@ -3889,7 +3901,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
ftrace_free_entry(entry);
}
mutex_unlock(&ftrace_lock);
-
+
out_unlock:
mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
free_ftrace_hash(hash);
@@ -4605,21 +4617,21 @@ ftrace_graph_release(struct inode *inode, struct file *file)
static int
ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
{
+ struct ftrace_glob func_g;
struct dyn_ftrace *rec;
struct ftrace_page *pg;
- int search_len;
int fail = 1;
- int type, not;
- char *search;
+ int not;
bool exists;
int i;
/* decode regex */
- type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
+ func_g.type = filter_parse_regex(buffer, strlen(buffer),
+ &func_g.search, &not);
if (!not && *idx >= size)
return -EBUSY;
- search_len = strlen(search);
+ func_g.len = strlen(func_g.search);
mutex_lock(&ftrace_lock);
@@ -4630,7 +4642,7 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
do_for_each_ftrace_rec(pg, rec) {
- if (ftrace_match_record(rec, NULL, search, search_len, type)) {
+ if (ftrace_match_record(rec, &func_g, NULL, 0)) {
/* if it is in the array */
exists = false;
for (i = 0; i < *idx; i++) {
@@ -4783,17 +4795,6 @@ static int ftrace_cmp_ips(const void *a, const void *b)
return 0;
}
-static void ftrace_swap_ips(void *a, void *b, int size)
-{
- unsigned long *ipa = a;
- unsigned long *ipb = b;
- unsigned long t;
-
- t = *ipa;
- *ipa = *ipb;
- *ipb = t;
-}
-
static int ftrace_process_locs(struct module *mod,
unsigned long *start,
unsigned long *end)
@@ -4813,7 +4814,7 @@ static int ftrace_process_locs(struct module *mod,
return 0;
sort(start, count, sizeof(*start),
- ftrace_cmp_ips, ftrace_swap_ips);
+ ftrace_cmp_ips, NULL);
start_pg = ftrace_allocate_pages(count);
if (!start_pg)
@@ -5639,6 +5640,16 @@ static struct ftrace_ops graph_ops = {
ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
};
+void ftrace_graph_sleep_time_control(bool enable)
+{
+ fgraph_sleep_time = enable;
+}
+
+void ftrace_graph_graph_time_control(bool enable)
+{
+ fgraph_graph_time = enable;
+}
+
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
return 0;
@@ -5707,7 +5718,7 @@ ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
* Does the user want to count the time a function was asleep.
* If so, do not update the time stamps.
*/
- if (trace_flags & TRACE_ITER_SLEEP_TIME)
+ if (fgraph_sleep_time)
return;
timestamp = trace_clock_local();
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index fc347f8b1bca..75f1d05ea82d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -829,7 +829,7 @@ rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* writer is ever on it, the previous pointer never points
* back to the reader page.
*/
-static int rb_is_reader_page(struct buffer_page *page)
+static bool rb_is_reader_page(struct buffer_page *page)
{
struct list_head *list = page->list.prev;
@@ -2270,7 +2270,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
return skip_time_extend(event);
}
-static inline int rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
+static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event);
/**
@@ -2498,7 +2498,7 @@ static inline void rb_event_discard(struct ring_buffer_event *event)
event->time_delta = 1;
}
-static inline int
+static inline bool
rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
@@ -3039,7 +3039,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_write);
-static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
+static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *reader = cpu_buffer->reader_page;
struct buffer_page *head = rb_set_head_page(cpu_buffer);
@@ -3047,7 +3047,7 @@ static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
/* In case of error, head will be NULL */
if (unlikely(!head))
- return 1;
+ return true;
return reader->read == rb_page_commit(reader) &&
(commit == reader ||
@@ -4267,7 +4267,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
* rind_buffer_empty - is the ring buffer empty?
* @buffer: The ring buffer to test
*/
-int ring_buffer_empty(struct ring_buffer *buffer)
+bool ring_buffer_empty(struct ring_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
@@ -4285,10 +4285,10 @@ int ring_buffer_empty(struct ring_buffer *buffer)
local_irq_restore(flags);
if (!ret)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
EXPORT_SYMBOL_GPL(ring_buffer_empty);
@@ -4297,7 +4297,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
* @buffer: The ring buffer
* @cpu: The CPU buffer to test
*/
-int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
+bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
@@ -4305,7 +4305,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
int ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return 1;
+ return true;
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index a1503a027ee2..6df9a83e20d7 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -24,8 +24,8 @@ struct rb_page {
static int wakeup_interval = 100;
static int reader_finish;
-static struct completion read_start;
-static struct completion read_done;
+static DECLARE_COMPLETION(read_start);
+static DECLARE_COMPLETION(read_done);
static struct ring_buffer *buffer;
static struct task_struct *producer;
@@ -60,12 +60,12 @@ MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
static int read_events;
-static int kill_test;
+static int test_error;
-#define KILL_TEST() \
+#define TEST_ERROR() \
do { \
- if (!kill_test) { \
- kill_test = 1; \
+ if (!test_error) { \
+ test_error = 1; \
WARN_ON(1); \
} \
} while (0)
@@ -75,6 +75,11 @@ enum event_status {
EVENT_DROPPED,
};
+static bool break_test(void)
+{
+ return test_error || kthread_should_stop();
+}
+
static enum event_status read_event(int cpu)
{
struct ring_buffer_event *event;
@@ -87,7 +92,7 @@ static enum event_status read_event(int cpu)
entry = ring_buffer_event_data(event);
if (*entry != cpu) {
- KILL_TEST();
+ TEST_ERROR();
return EVENT_DROPPED;
}
@@ -115,10 +120,10 @@ static enum event_status read_page(int cpu)
rpage = bpage;
/* The commit may have missed event flags set, clear them */
commit = local_read(&rpage->commit) & 0xfffff;
- for (i = 0; i < commit && !kill_test; i += inc) {
+ for (i = 0; i < commit && !test_error ; i += inc) {
if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
- KILL_TEST();
+ TEST_ERROR();
break;
}
@@ -128,7 +133,7 @@ static enum event_status read_page(int cpu)
case RINGBUF_TYPE_PADDING:
/* failed writes may be discarded events */
if (!event->time_delta)
- KILL_TEST();
+ TEST_ERROR();
inc = event->array[0] + 4;
break;
case RINGBUF_TYPE_TIME_EXTEND:
@@ -137,12 +142,12 @@ static enum event_status read_page(int cpu)
case 0:
entry = ring_buffer_event_data(event);
if (*entry != cpu) {
- KILL_TEST();
+ TEST_ERROR();
break;
}
read++;
if (!event->array[0]) {
- KILL_TEST();
+ TEST_ERROR();
break;
}
inc = event->array[0] + 4;
@@ -150,17 +155,17 @@ static enum event_status read_page(int cpu)
default:
entry = ring_buffer_event_data(event);
if (*entry != cpu) {
- KILL_TEST();
+ TEST_ERROR();
break;
}
read++;
inc = ((event->type_len + 1) * 4);
}
- if (kill_test)
+ if (test_error)
break;
if (inc <= 0) {
- KILL_TEST();
+ TEST_ERROR();
break;
}
}
@@ -178,10 +183,14 @@ static void ring_buffer_consumer(void)
read_events ^= 1;
read = 0;
- while (!reader_finish && !kill_test) {
- int found;
+ /*
+ * Continue running until the producer specifically asks to stop
+ * and is ready for the completion.
+ */
+ while (!READ_ONCE(reader_finish)) {
+ int found = 1;
- do {
+ while (found && !test_error) {
int cpu;
found = 0;
@@ -193,19 +202,25 @@ static void ring_buffer_consumer(void)
else
stat = read_page(cpu);
- if (kill_test)
+ if (test_error)
break;
+
if (stat == EVENT_FOUND)
found = 1;
+
}
- } while (found && !kill_test);
+ }
+ /* Wait till the producer wakes us up when there is more data
+ * available or when the producer wants us to finish reading.
+ */
set_current_state(TASK_INTERRUPTIBLE);
if (reader_finish)
break;
schedule();
}
+ __set_current_state(TASK_RUNNING);
reader_finish = 0;
complete(&read_done);
}
@@ -263,10 +278,7 @@ static void ring_buffer_producer(void)
if (cnt % wakeup_interval)
cond_resched();
#endif
- if (kthread_should_stop())
- kill_test = 1;
-
- } while (ktime_before(end_time, timeout) && !kill_test);
+ } while (ktime_before(end_time, timeout) && !break_test());
trace_printk("End ring buffer hammer\n");
if (consumer) {
@@ -276,8 +288,6 @@ static void ring_buffer_producer(void)
/* the completions must be visible before the finish var */
smp_wmb();
reader_finish = 1;
- /* finish var visible before waking up the consumer */
- smp_wmb();
wake_up_process(consumer);
wait_for_completion(&read_done);
}
@@ -287,7 +297,7 @@ static void ring_buffer_producer(void)
entries = ring_buffer_entries(buffer);
overruns = ring_buffer_overruns(buffer);
- if (kill_test && !kthread_should_stop())
+ if (test_error)
trace_printk("ERROR!\n");
if (!disable_reader) {
@@ -368,15 +378,14 @@ static void wait_to_die(void)
static int ring_buffer_consumer_thread(void *arg)
{
- while (!kthread_should_stop() && !kill_test) {
+ while (!break_test()) {
complete(&read_start);
ring_buffer_consumer();
set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop() || kill_test)
+ if (break_test())
break;
-
schedule();
}
__set_current_state(TASK_RUNNING);
@@ -389,27 +398,27 @@ static int ring_buffer_consumer_thread(void *arg)
static int ring_buffer_producer_thread(void *arg)
{
- init_completion(&read_start);
-
- while (!kthread_should_stop() && !kill_test) {
+ while (!break_test()) {
ring_buffer_reset(buffer);
if (consumer) {
- smp_wmb();
wake_up_process(consumer);
wait_for_completion(&read_start);
}
ring_buffer_producer();
- if (kill_test)
+ if (break_test())
goto out_kill;
trace_printk("Sleeping for 10 secs\n");
set_current_state(TASK_INTERRUPTIBLE);
+ if (break_test())
+ goto out_kill;
schedule_timeout(HZ * SLEEP_TIME);
}
out_kill:
+ __set_current_state(TASK_RUNNING);
if (!kthread_should_stop())
wait_to_die();
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6e79408674aa..2198a630ef58 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -214,12 +214,10 @@ __setup("alloc_snapshot", boot_alloc_snapshot);
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
-static char *trace_boot_options __initdata;
static int __init set_trace_boot_options(char *str)
{
strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
- trace_boot_options = trace_boot_options_buf;
return 0;
}
__setup("trace_options=", set_trace_boot_options);
@@ -250,6 +248,19 @@ unsigned long long ns2usecs(cycle_t nsec)
return nsec;
}
+/* trace_flags holds trace_options default values */
+#define TRACE_DEFAULT_FLAGS \
+ (FUNCTION_DEFAULT_FLAGS | \
+ TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
+ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
+ TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
+ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
+
+/* trace_options that are only supported by global_trace */
+#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
+ TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
+
+
/*
* The global_trace is the descriptor that holds the tracing
* buffers for the live tracing. For each CPU, it contains
@@ -262,7 +273,9 @@ unsigned long long ns2usecs(cycle_t nsec)
* pages for the buffer for that CPU. Each CPU has the same number
* of pages allocated for its buffer.
*/
-static struct trace_array global_trace;
+static struct trace_array global_trace = {
+ .trace_flags = TRACE_DEFAULT_FLAGS,
+};
LIST_HEAD(ftrace_trace_arrays);
@@ -468,11 +481,29 @@ static inline void trace_access_lock_init(void)
#endif
-/* trace_flags holds trace_options default values */
-unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
- TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
- TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
- TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
+#ifdef CONFIG_STACKTRACE
+static void __ftrace_trace_stack(struct ring_buffer *buffer,
+ unsigned long flags,
+ int skip, int pc, struct pt_regs *regs);
+static inline void ftrace_trace_stack(struct trace_array *tr,
+ struct ring_buffer *buffer,
+ unsigned long flags,
+ int skip, int pc, struct pt_regs *regs);
+
+#else
+static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
+ unsigned long flags,
+ int skip, int pc, struct pt_regs *regs)
+{
+}
+static inline void ftrace_trace_stack(struct trace_array *tr,
+ struct ring_buffer *buffer,
+ unsigned long flags,
+ int skip, int pc, struct pt_regs *regs)
+{
+}
+
+#endif
static void tracer_tracing_on(struct trace_array *tr)
{
@@ -518,7 +549,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
int alloc;
int pc;
- if (!(trace_flags & TRACE_ITER_PRINTK))
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
return 0;
pc = preempt_count();
@@ -548,7 +579,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
entry->buf[size] = '\0';
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(buffer, irq_flags, 4, pc);
+ ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
return size;
}
@@ -568,7 +599,7 @@ int __trace_bputs(unsigned long ip, const char *str)
int size = sizeof(struct bputs_entry);
int pc;
- if (!(trace_flags & TRACE_ITER_PRINTK))
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
return 0;
pc = preempt_count();
@@ -588,7 +619,7 @@ int __trace_bputs(unsigned long ip, const char *str)
entry->str = str;
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(buffer, irq_flags, 4, pc);
+ ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
return 1;
}
@@ -834,34 +865,18 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
return nsecs / 1000;
}
+/*
+ * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
+ * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
+ * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
+ * of strings in the order that the enums were defined.
+ */
+#undef C
+#define C(a, b) b
+
/* These must match the bit postions in trace_iterator_flags */
static const char *trace_options[] = {
- "print-parent",
- "sym-offset",
- "sym-addr",
- "verbose",
- "raw",
- "hex",
- "bin",
- "block",
- "stacktrace",
- "trace_printk",
- "ftrace_preempt",
- "branch",
- "annotate",
- "userstacktrace",
- "sym-userobj",
- "printk-msg-only",
- "context-info",
- "latency-format",
- "sleep-time",
- "graph-time",
- "record-cmd",
- "overwrite",
- "disable_on_free",
- "irq-info",
- "markers",
- "function-trace",
+ TRACE_FLAGS
NULL
};
@@ -1204,13 +1219,17 @@ static inline int run_tracer_selftest(struct tracer *type)
}
#endif /* CONFIG_FTRACE_STARTUP_TEST */
+static void add_tracer_options(struct trace_array *tr, struct tracer *t);
+
+static void __init apply_trace_boot_options(void);
+
/**
* register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer
*
* Register a new plugin tracer.
*/
-int register_tracer(struct tracer *type)
+int __init register_tracer(struct tracer *type)
{
struct tracer *t;
int ret = 0;
@@ -1253,6 +1272,7 @@ int register_tracer(struct tracer *type)
type->next = trace_types;
trace_types = type;
+ add_tracer_options(&global_trace, type);
out:
tracing_selftest_running = false;
@@ -1268,6 +1288,9 @@ int register_tracer(struct tracer *type)
/* Do we want this tracer to start on bootup? */
tracing_set_tracer(&global_trace, type->name);
default_bootup_tracer = NULL;
+
+ apply_trace_boot_options();
+
/* disable other selftests, since this will break it. */
tracing_selftest_disabled = true;
#ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -1671,23 +1694,16 @@ __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *eve
ring_buffer_unlock_commit(buffer, event);
}
-static inline void
-__trace_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc)
+void trace_buffer_unlock_commit(struct trace_array *tr,
+ struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc)
{
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(buffer, flags, 6, pc);
+ ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
ftrace_trace_userstack(buffer, flags, pc);
}
-
-void trace_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc)
-{
- __trace_buffer_unlock_commit(buffer, event, flags, pc);
-}
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
static struct ring_buffer *temp_buffer;
@@ -1729,22 +1745,15 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
}
EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
-void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc)
-{
- __trace_buffer_unlock_commit(buffer, event, flags, pc);
-}
-EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
-
-void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+void trace_buffer_unlock_commit_regs(struct trace_array *tr,
+ struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs)
{
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
+ ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
@@ -1873,24 +1882,17 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
}
-void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
- int skip, int pc, struct pt_regs *regs)
+static inline void ftrace_trace_stack(struct trace_array *tr,
+ struct ring_buffer *buffer,
+ unsigned long flags,
+ int skip, int pc, struct pt_regs *regs)
{
- if (!(trace_flags & TRACE_ITER_STACKTRACE))
+ if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
return;
__ftrace_trace_stack(buffer, flags, skip, pc, regs);
}
-void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
- int skip, int pc)
-{
- if (!(trace_flags & TRACE_ITER_STACKTRACE))
- return;
-
- __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
-}
-
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc)
{
@@ -1929,7 +1931,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
struct userstack_entry *entry;
struct stack_trace trace;
- if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
+ if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
/*
@@ -2173,7 +2175,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(buffer, flags, 6, pc);
+ ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
}
out:
@@ -2225,7 +2227,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
memcpy(&entry->buf, tbuffer, len + 1);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(buffer, flags, 6, pc);
+ ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
}
out:
preempt_enable_notrace();
@@ -2246,7 +2248,7 @@ int trace_array_printk(struct trace_array *tr,
int ret;
va_list ap;
- if (!(trace_flags & TRACE_ITER_PRINTK))
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
return 0;
va_start(ap, fmt);
@@ -2261,7 +2263,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer,
int ret;
va_list ap;
- if (!(trace_flags & TRACE_ITER_PRINTK))
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
return 0;
va_start(ap, fmt);
@@ -2602,7 +2604,7 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
{
- unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
+ unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
struct trace_buffer *buf = iter->trace_buffer;
struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
struct tracer *type = iter->trace;
@@ -2664,20 +2666,22 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
static void test_cpu_buff_start(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
+ struct trace_array *tr = iter->tr;
- if (!(trace_flags & TRACE_ITER_ANNOTATE))
+ if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
return;
if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
return;
- if (cpumask_test_cpu(iter->cpu, iter->started))
+ if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
return;
if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
return;
- cpumask_set_cpu(iter->cpu, iter->started);
+ if (iter->started)
+ cpumask_set_cpu(iter->cpu, iter->started);
/* Don't print started cpu buffer for the first entry of the trace */
if (iter->idx > 1)
@@ -2687,8 +2691,9 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
{
+ struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq;
- unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
+ unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
struct trace_entry *entry;
struct trace_event *event;
@@ -2698,7 +2703,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
event = ftrace_find_event(entry->type);
- if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+ if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
if (iter->iter_flags & TRACE_FILE_LAT_FMT)
trace_print_lat_context(iter);
else
@@ -2718,13 +2723,14 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
{
+ struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
struct trace_event *event;
entry = iter->ent;
- if (trace_flags & TRACE_ITER_CONTEXT_INFO)
+ if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
trace_seq_printf(s, "%d %d %llu ",
entry->pid, iter->cpu, iter->ts);
@@ -2742,6 +2748,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
{
+ struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq;
unsigned char newline = '\n';
struct trace_entry *entry;
@@ -2749,7 +2756,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
entry = iter->ent;
- if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+ if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
SEQ_PUT_HEX_FIELD(s, entry->pid);
SEQ_PUT_HEX_FIELD(s, iter->cpu);
SEQ_PUT_HEX_FIELD(s, iter->ts);
@@ -2771,13 +2778,14 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
{
+ struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
struct trace_event *event;
entry = iter->ent;
- if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+ if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
SEQ_PUT_FIELD(s, entry->pid);
SEQ_PUT_FIELD(s, iter->cpu);
SEQ_PUT_FIELD(s, iter->ts);
@@ -2826,6 +2834,8 @@ int trace_empty(struct trace_iterator *iter)
/* Called with trace_event_read_lock() held. */
enum print_line_t print_trace_line(struct trace_iterator *iter)
{
+ struct trace_array *tr = iter->tr;
+ unsigned long trace_flags = tr->trace_flags;
enum print_line_t ret;
if (iter->lost_events) {
@@ -2871,6 +2881,7 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
void trace_latency_header(struct seq_file *m)
{
struct trace_iterator *iter = m->private;
+ struct trace_array *tr = iter->tr;
/* print nothing if the buffers are empty */
if (trace_empty(iter))
@@ -2879,13 +2890,15 @@ void trace_latency_header(struct seq_file *m)
if (iter->iter_flags & TRACE_FILE_LAT_FMT)
print_trace_header(m, iter);
- if (!(trace_flags & TRACE_ITER_VERBOSE))
+ if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
print_lat_help_header(m);
}
void trace_default_header(struct seq_file *m)
{
struct trace_iterator *iter = m->private;
+ struct trace_array *tr = iter->tr;
+ unsigned long trace_flags = tr->trace_flags;
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
return;
@@ -3230,7 +3243,7 @@ static int tracing_open(struct inode *inode, struct file *file)
iter = __tracing_open(inode, file, false);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
- else if (trace_flags & TRACE_ITER_LATENCY_FMT)
+ else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
}
@@ -3477,7 +3490,7 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
trace_opts = tr->current_trace->flags->opts;
for (i = 0; trace_options[i]; i++) {
- if (trace_flags & (1 << i))
+ if (tr->trace_flags & (1 << i))
seq_printf(m, "%s\n", trace_options[i]);
else
seq_printf(m, "no%s\n", trace_options[i]);
@@ -3542,7 +3555,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{
/* do nothing if flag is already set */
- if (!!(trace_flags & mask) == !!enabled)
+ if (!!(tr->trace_flags & mask) == !!enabled)
return 0;
/* Give the tracer a chance to approve the change */
@@ -3551,9 +3564,9 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
return -EINVAL;
if (enabled)
- trace_flags |= mask;
+ tr->trace_flags |= mask;
else
- trace_flags &= ~mask;
+ tr->trace_flags &= ~mask;
if (mask == TRACE_ITER_RECORD_CMD)
trace_event_enable_cmd_record(enabled);
@@ -3565,8 +3578,10 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
#endif
}
- if (mask == TRACE_ITER_PRINTK)
+ if (mask == TRACE_ITER_PRINTK) {
trace_printk_start_stop_comm(enabled);
+ trace_printk_control(enabled);
+ }
return 0;
}
@@ -3577,6 +3592,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
int neg = 0;
int ret = -ENODEV;
int i;
+ size_t orig_len = strlen(option);
cmp = strstrip(option);
@@ -3600,9 +3616,36 @@ static int trace_set_options(struct trace_array *tr, char *option)
mutex_unlock(&trace_types_lock);
+ /*
+ * If the first trailing whitespace is replaced with '\0' by strstrip,
+ * turn it back into a space.
+ */
+ if (orig_len > strlen(option))
+ option[strlen(option)] = ' ';
+
return ret;
}
+static void __init apply_trace_boot_options(void)
+{
+ char *buf = trace_boot_options_buf;
+ char *option;
+
+ while (true) {
+ option = strsep(&buf, ",");
+
+ if (!option)
+ break;
+
+ if (*option)
+ trace_set_options(&global_trace, option);
+
+ /* Put back the comma to allow this to be called again */
+ if (buf)
+ *(buf - 1) = ',';
+ }
+}
+
static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -4297,11 +4340,8 @@ int tracing_update_buffers(void)
struct trace_option_dentry;
-static struct trace_option_dentry *
-create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
-
static void
-destroy_trace_option_files(struct trace_option_dentry *topts);
+create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
/*
* Used to clear out the tracer before deletion of an instance.
@@ -4320,20 +4360,13 @@ static void tracing_set_nop(struct trace_array *tr)
tr->current_trace = &nop_trace;
}
-static void update_tracer_options(struct trace_array *tr, struct tracer *t)
+static void add_tracer_options(struct trace_array *tr, struct tracer *t)
{
- static struct trace_option_dentry *topts;
-
/* Only enable if the directory has been created already. */
if (!tr->dir)
return;
- /* Currently, only the top instance has options */
- if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
- return;
-
- destroy_trace_option_files(topts);
- topts = create_trace_option_files(tr, t);
+ create_trace_option_files(tr, t);
}
static int tracing_set_tracer(struct trace_array *tr, const char *buf)
@@ -4402,7 +4435,6 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
free_snapshot(tr);
}
#endif
- update_tracer_options(tr, t);
#ifdef CONFIG_TRACER_MAX_TRACE
if (t->use_max_tr && !had_max_tr) {
@@ -4569,7 +4601,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
/* trace pipe does not show start of buffer */
cpumask_setall(iter->started);
- if (trace_flags & TRACE_ITER_LATENCY_FMT)
+ if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
@@ -4626,11 +4658,13 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
static unsigned int
trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
{
+ struct trace_array *tr = iter->tr;
+
/* Iterators are static, they should be filled or empty */
if (trace_buffer_iter(iter, iter->cpu_file))
return POLLIN | POLLRDNORM;
- if (trace_flags & TRACE_ITER_BLOCK)
+ if (tr->trace_flags & TRACE_ITER_BLOCK)
/*
* Always select as readable when in blocking mode
*/
@@ -5047,7 +5081,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
struct trace_array *tr = inode->i_private;
/* disable tracing ? */
- if (trace_flags & TRACE_ITER_STOP_ON_FREE)
+ if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
tracer_tracing_off(tr);
/* resize the ring buffer to 0 */
tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
@@ -5080,7 +5114,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (tracing_disabled)
return -EINVAL;
- if (!(trace_flags & TRACE_ITER_MARKERS))
+ if (!(tr->trace_flags & TRACE_ITER_MARKERS))
return -EINVAL;
if (cnt > TRACE_BUF_SIZE)
@@ -6132,13 +6166,6 @@ tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
#include "trace_selftest.c"
#endif
-struct trace_option_dentry {
- struct tracer_opt *opt;
- struct tracer_flags *flags;
- struct trace_array *tr;
- struct dentry *entry;
-};
-
static ssize_t
trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
@@ -6191,14 +6218,51 @@ static const struct file_operations trace_options_fops = {
.llseek = generic_file_llseek,
};
+/*
+ * In order to pass in both the trace_array descriptor as well as the index
+ * to the flag that the trace option file represents, the trace_array
+ * has a character array of trace_flags_index[], which holds the index
+ * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
+ * The address of this character array is passed to the flag option file
+ * read/write callbacks.
+ *
+ * In order to extract both the index and the trace_array descriptor,
+ * get_tr_index() uses the following algorithm.
+ *
+ * idx = *ptr;
+ *
+ * As the pointer itself contains the address of the index (remember
+ * index[1] == 1).
+ *
+ * Then to get the trace_array descriptor, by subtracting that index
+ * from the ptr, we get to the start of the index itself.
+ *
+ * ptr - idx == &index[0]
+ *
+ * Then a simple container_of() from that pointer gets us to the
+ * trace_array descriptor.
+ */
+static void get_tr_index(void *data, struct trace_array **ptr,
+ unsigned int *pindex)
+{
+ *pindex = *(unsigned char *)data;
+
+ *ptr = container_of(data - *pindex, struct trace_array,
+ trace_flags_index);
+}
+
static ssize_t
trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- long index = (long)filp->private_data;
+ void *tr_index = filp->private_data;
+ struct trace_array *tr;
+ unsigned int index;
char *buf;
- if (trace_flags & (1 << index))
+ get_tr_index(tr_index, &tr, &index);
+
+ if (tr->trace_flags & (1 << index))
buf = "1\n";
else
buf = "0\n";
@@ -6210,11 +6274,14 @@ static ssize_t
trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct trace_array *tr = &global_trace;
- long index = (long)filp->private_data;
+ void *tr_index = filp->private_data;
+ struct trace_array *tr;
+ unsigned int index;
unsigned long val;
int ret;
+ get_tr_index(tr_index, &tr, &index);
+
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
@@ -6298,21 +6365,39 @@ create_trace_option_file(struct trace_array *tr,
}
-static struct trace_option_dentry *
+static void
create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
{
struct trace_option_dentry *topts;
+ struct trace_options *tr_topts;
struct tracer_flags *flags;
struct tracer_opt *opts;
int cnt;
+ int i;
if (!tracer)
- return NULL;
+ return;
flags = tracer->flags;
if (!flags || !flags->opts)
- return NULL;
+ return;
+
+ /*
+ * If this is an instance, only create flags for tracers
+ * the instance may have.
+ */
+ if (!trace_ok_for_array(tracer, tr))
+ return;
+
+ for (i = 0; i < tr->nr_topts; i++) {
+ /*
+ * Check if these flags have already been added.
+ * Some tracers share flags.
+ */
+ if (tr->topts[i].tracer->flags == tracer->flags)
+ return;
+ }
opts = flags->opts;
@@ -6321,27 +6406,27 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
if (!topts)
- return NULL;
-
- for (cnt = 0; opts[cnt].name; cnt++)
- create_trace_option_file(tr, &topts[cnt], flags,
- &opts[cnt]);
-
- return topts;
-}
-
-static void
-destroy_trace_option_files(struct trace_option_dentry *topts)
-{
- int cnt;
+ return;
- if (!topts)
+ tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
+ GFP_KERNEL);
+ if (!tr_topts) {
+ kfree(topts);
return;
+ }
- for (cnt = 0; topts[cnt].opt; cnt++)
- tracefs_remove(topts[cnt].entry);
+ tr->topts = tr_topts;
+ tr->topts[tr->nr_topts].tracer = tracer;
+ tr->topts[tr->nr_topts].topts = topts;
+ tr->nr_topts++;
- kfree(topts);
+ for (cnt = 0; opts[cnt].name; cnt++) {
+ create_trace_option_file(tr, &topts[cnt], flags,
+ &opts[cnt]);
+ WARN_ONCE(topts[cnt].entry == NULL,
+ "Failed to create trace option: %s",
+ opts[cnt].name);
+ }
}
static struct dentry *
@@ -6354,21 +6439,26 @@ create_trace_option_core_file(struct trace_array *tr,
if (!t_options)
return NULL;
- return trace_create_file(option, 0644, t_options, (void *)index,
- &trace_options_core_fops);
+ return trace_create_file(option, 0644, t_options,
+ (void *)&tr->trace_flags_index[index],
+ &trace_options_core_fops);
}
-static __init void create_trace_options_dir(struct trace_array *tr)
+static void create_trace_options_dir(struct trace_array *tr)
{
struct dentry *t_options;
+ bool top_level = tr == &global_trace;
int i;
t_options = trace_options_init_dentry(tr);
if (!t_options)
return;
- for (i = 0; trace_options[i]; i++)
- create_trace_option_core_file(tr, trace_options[i], i);
+ for (i = 0; trace_options[i]; i++) {
+ if (top_level ||
+ !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
+ create_trace_option_core_file(tr, trace_options[i], i);
+ }
}
static ssize_t
@@ -6435,7 +6525,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
{
enum ring_buffer_flags rb_flags;
- rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
+ rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
buf->tr = tr;
@@ -6505,6 +6595,30 @@ static void free_trace_buffers(struct trace_array *tr)
#endif
}
+static void init_trace_flags_index(struct trace_array *tr)
+{
+ int i;
+
+ /* Used by the trace options files */
+ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
+ tr->trace_flags_index[i] = i;
+}
+
+static void __update_tracer_options(struct trace_array *tr)
+{
+ struct tracer *t;
+
+ for (t = trace_types; t; t = t->next)
+ add_tracer_options(tr, t);
+}
+
+static void update_tracer_options(struct trace_array *tr)
+{
+ mutex_lock(&trace_types_lock);
+ __update_tracer_options(tr);
+ mutex_unlock(&trace_types_lock);
+}
+
static int instance_mkdir(const char *name)
{
struct trace_array *tr;
@@ -6530,6 +6644,8 @@ static int instance_mkdir(const char *name)
if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
goto out_free_tr;
+ tr->trace_flags = global_trace.trace_flags;
+
cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
raw_spin_lock_init(&tr->start_lock);
@@ -6555,6 +6671,8 @@ static int instance_mkdir(const char *name)
}
init_tracer_tracefs(tr, tr->dir);
+ init_trace_flags_index(tr);
+ __update_tracer_options(tr);
list_add(&tr->list, &ftrace_trace_arrays);
@@ -6580,6 +6698,7 @@ static int instance_rmdir(const char *name)
struct trace_array *tr;
int found = 0;
int ret;
+ int i;
mutex_lock(&trace_types_lock);
@@ -6602,9 +6721,14 @@ static int instance_rmdir(const char *name)
tracing_set_nop(tr);
event_trace_del_tracer(tr);
ftrace_destroy_function_files(tr);
- debugfs_remove_recursive(tr->dir);
+ tracefs_remove_recursive(tr->dir);
free_trace_buffers(tr);
+ for (i = 0; i < tr->nr_topts; i++) {
+ kfree(tr->topts[i].topts);
+ }
+ kfree(tr->topts);
+
kfree(tr->name);
kfree(tr);
@@ -6666,6 +6790,8 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
trace_create_file("tracing_on", 0644, d_tracer,
tr, &rb_simple_fops);
+ create_trace_options_dir(tr);
+
#ifdef CONFIG_TRACER_MAX_TRACE
trace_create_file("tracing_max_latency", 0644, d_tracer,
&tr->max_latency, &tracing_max_lat_fops);
@@ -6861,11 +6987,7 @@ static __init int tracer_init_tracefs(void)
create_trace_instances(d_tracer);
- create_trace_options_dir(&global_trace);
-
- /* If the tracer was started via cmdline, create options for it here */
- if (global_trace.current_trace != &nop_trace)
- update_tracer_options(&global_trace, global_trace.current_trace);
+ update_tracer_options(&global_trace);
return 0;
}
@@ -6964,6 +7086,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
static atomic_t dump_running;
+ struct trace_array *tr = &global_trace;
unsigned int old_userobj;
unsigned long flags;
int cnt = 0, cpu;
@@ -6993,10 +7116,10 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
- old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
+ old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
/* don't look at user memory in panic mode */
- trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
+ tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
switch (oops_dump_mode) {
case DUMP_ALL:
@@ -7059,7 +7182,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
printk(KERN_TRACE "---------------------------------\n");
out_enable:
- trace_flags |= old_userobj;
+ tr->trace_flags |= old_userobj;
for_each_tracing_cpu(cpu) {
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
@@ -7074,6 +7197,12 @@ __init static int tracer_alloc_buffers(void)
int ring_buf_size;
int ret = -ENOMEM;
+ /*
+ * Make sure we don't accidently add more trace options
+ * than we have bits for.
+ */
+ BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
+
if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
goto out;
@@ -7132,6 +7261,8 @@ __init static int tracer_alloc_buffers(void)
ftrace_init_global_array_ops(&global_trace);
+ init_trace_flags_index(&global_trace);
+
register_tracer(&nop_trace);
/* All seems OK, enable tracing */
@@ -7148,12 +7279,7 @@ __init static int tracer_alloc_buffers(void)
INIT_LIST_HEAD(&global_trace.events);
list_add(&global_trace.list, &ftrace_trace_arrays);
- while (trace_boot_options) {
- char *option;
-
- option = strsep(&trace_boot_options, ",");
- trace_set_options(&global_trace, option);
- }
+ apply_trace_boot_options();
register_snapshot_cmd();
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 74bde81601a9..dd7620802e72 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -71,9 +71,6 @@ enum trace_type {
tstruct \
}
-#undef TP_ARGS
-#define TP_ARGS(args...) args
-
#undef FTRACE_ENTRY_DUP
#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
@@ -156,9 +153,12 @@ struct trace_array_cpu {
pid_t pid;
kuid_t uid;
char comm[TASK_COMM_LEN];
+
+ bool ignore_pid;
};
struct tracer;
+struct trace_option_dentry;
struct trace_buffer {
struct trace_array *tr;
@@ -168,6 +168,19 @@ struct trace_buffer {
int cpu;
};
+#define TRACE_FLAGS_MAX_SIZE 32
+
+struct trace_options {
+ struct tracer *tracer;
+ struct trace_option_dentry *topts;
+};
+
+struct trace_pid_list {
+ unsigned int nr_pids;
+ int order;
+ pid_t *pids;
+};
+
/*
* The trace array - an array of per-CPU trace arrays. This is the
* highest level data structure that individual tracers deal with.
@@ -193,6 +206,7 @@ struct trace_array {
bool allocated_snapshot;
unsigned long max_latency;
#endif
+ struct trace_pid_list __rcu *filtered_pids;
/*
* max_lock is used to protect the swapping of buffers
* when taking a max snapshot. The buffers themselves are
@@ -216,13 +230,17 @@ struct trace_array {
#endif
int stop_count;
int clock_id;
+ int nr_topts;
struct tracer *current_trace;
+ unsigned int trace_flags;
+ unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
unsigned int flags;
raw_spinlock_t start_lock;
struct dentry *dir;
struct dentry *options;
struct dentry *percpu_dir;
struct dentry *event_dir;
+ struct trace_options *topts;
struct list_head systems;
struct list_head events;
cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
@@ -333,6 +351,13 @@ struct tracer_flags {
#define TRACER_OPT(s, b) .name = #s, .bit = b
+struct trace_option_dentry {
+ struct tracer_opt *opt;
+ struct tracer_flags *flags;
+ struct trace_array *tr;
+ struct dentry *entry;
+};
+
/**
* struct tracer - a specific tracer and its callbacks to interact with tracefs
* @name: the name chosen to select it on the available_tracers file
@@ -611,29 +636,12 @@ void update_max_tr_single(struct trace_array *tr,
#endif /* CONFIG_TRACER_MAX_TRACE */
#ifdef CONFIG_STACKTRACE
-void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
- int skip, int pc);
-
-void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
- int skip, int pc, struct pt_regs *regs);
-
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
int pc);
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc);
#else
-static inline void ftrace_trace_stack(struct ring_buffer *buffer,
- unsigned long flags, int skip, int pc)
-{
-}
-
-static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
- unsigned long flags, int skip,
- int pc, struct pt_regs *regs)
-{
-}
-
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
unsigned long flags, int pc)
{
@@ -707,8 +715,6 @@ int trace_array_printk_buf(struct ring_buffer *buffer,
void trace_printk_seq(struct trace_seq *s);
enum print_line_t print_trace_line(struct trace_iterator *iter);
-extern unsigned long trace_flags;
-
extern char trace_find_mark(unsigned long long duration);
/* Standard output formatting function used for function return traces */
@@ -723,9 +729,14 @@ extern char trace_find_mark(unsigned long long duration);
#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
#define TRACE_GRAPH_PRINT_IRQS 0x40
#define TRACE_GRAPH_PRINT_TAIL 0x80
+#define TRACE_GRAPH_SLEEP_TIME 0x100
+#define TRACE_GRAPH_GRAPH_TIME 0x200
#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
+extern void ftrace_graph_sleep_time_control(bool enable);
+extern void ftrace_graph_graph_time_control(bool enable);
+
extern enum print_line_t
print_graph_function_flags(struct trace_iterator *iter, u32 flags);
extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
@@ -859,7 +870,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops);
#define ftrace_destroy_filter_files(ops) do { } while (0)
#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
-int ftrace_event_is_function(struct trace_event_call *call);
+bool ftrace_event_is_function(struct trace_event_call *call);
/*
* struct trace_parser - servers for reading the user input separated by spaces
@@ -897,42 +908,94 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
size_t cnt, loff_t *ppos);
/*
+ * Only create function graph options if function graph is configured.
+ */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+# define FGRAPH_FLAGS \
+ C(DISPLAY_GRAPH, "display-graph"),
+#else
+# define FGRAPH_FLAGS
+#endif
+
+#ifdef CONFIG_BRANCH_TRACER
+# define BRANCH_FLAGS \
+ C(BRANCH, "branch"),
+#else
+# define BRANCH_FLAGS
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+# define FUNCTION_FLAGS \
+ C(FUNCTION, "function-trace"),
+# define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
+#else
+# define FUNCTION_FLAGS
+# define FUNCTION_DEFAULT_FLAGS 0UL
+#endif
+
+#ifdef CONFIG_STACKTRACE
+# define STACK_FLAGS \
+ C(STACKTRACE, "stacktrace"),
+#else
+# define STACK_FLAGS
+#endif
+
+/*
* trace_iterator_flags is an enumeration that defines bit
* positions into trace_flags that controls the output.
*
* NOTE: These bits must match the trace_options array in
- * trace.c.
+ * trace.c (this macro guarantees it).
+ */
+#define TRACE_FLAGS \
+ C(PRINT_PARENT, "print-parent"), \
+ C(SYM_OFFSET, "sym-offset"), \
+ C(SYM_ADDR, "sym-addr"), \
+ C(VERBOSE, "verbose"), \
+ C(RAW, "raw"), \
+ C(HEX, "hex"), \
+ C(BIN, "bin"), \
+ C(BLOCK, "block"), \
+ C(PRINTK, "trace_printk"), \
+ C(ANNOTATE, "annotate"), \
+ C(USERSTACKTRACE, "userstacktrace"), \
+ C(SYM_USEROBJ, "sym-userobj"), \
+ C(PRINTK_MSGONLY, "printk-msg-only"), \
+ C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
+ C(LATENCY_FMT, "latency-format"), \
+ C(RECORD_CMD, "record-cmd"), \
+ C(OVERWRITE, "overwrite"), \
+ C(STOP_ON_FREE, "disable_on_free"), \
+ C(IRQ_INFO, "irq-info"), \
+ C(MARKERS, "markers"), \
+ FUNCTION_FLAGS \
+ FGRAPH_FLAGS \
+ STACK_FLAGS \
+ BRANCH_FLAGS
+
+/*
+ * By defining C, we can make TRACE_FLAGS a list of bit names
+ * that will define the bits for the flag masks.
*/
-enum trace_iterator_flags {
- TRACE_ITER_PRINT_PARENT = 0x01,
- TRACE_ITER_SYM_OFFSET = 0x02,
- TRACE_ITER_SYM_ADDR = 0x04,
- TRACE_ITER_VERBOSE = 0x08,
- TRACE_ITER_RAW = 0x10,
- TRACE_ITER_HEX = 0x20,
- TRACE_ITER_BIN = 0x40,
- TRACE_ITER_BLOCK = 0x80,
- TRACE_ITER_STACKTRACE = 0x100,
- TRACE_ITER_PRINTK = 0x200,
- TRACE_ITER_PREEMPTONLY = 0x400,
- TRACE_ITER_BRANCH = 0x800,
- TRACE_ITER_ANNOTATE = 0x1000,
- TRACE_ITER_USERSTACKTRACE = 0x2000,
- TRACE_ITER_SYM_USEROBJ = 0x4000,
- TRACE_ITER_PRINTK_MSGONLY = 0x8000,
- TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
- TRACE_ITER_LATENCY_FMT = 0x20000,
- TRACE_ITER_SLEEP_TIME = 0x40000,
- TRACE_ITER_GRAPH_TIME = 0x80000,
- TRACE_ITER_RECORD_CMD = 0x100000,
- TRACE_ITER_OVERWRITE = 0x200000,
- TRACE_ITER_STOP_ON_FREE = 0x400000,
- TRACE_ITER_IRQ_INFO = 0x800000,
- TRACE_ITER_MARKERS = 0x1000000,
- TRACE_ITER_FUNCTION = 0x2000000,
+#undef C
+#define C(a, b) TRACE_ITER_##a##_BIT
+
+enum trace_iterator_bits {
+ TRACE_FLAGS
+ /* Make sure we don't go more than we have bits for */
+ TRACE_ITER_LAST_BIT
};
/*
+ * By redefining C, we can make TRACE_FLAGS a list of masks that
+ * use the bits as defined above.
+ */
+#undef C
+#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
+
+enum trace_iterator_flags { TRACE_FLAGS };
+
+/*
* TRACE_ITER_SYM_MASK masks the options in trace_flags that
* control the output of kernel symbols.
*/
@@ -946,7 +1009,7 @@ extern int enable_branch_tracing(struct trace_array *tr);
extern void disable_branch_tracing(void);
static inline int trace_branch_enable(struct trace_array *tr)
{
- if (trace_flags & TRACE_ITER_BRANCH)
+ if (tr->trace_flags & TRACE_ITER_BRANCH)
return enable_branch_tracing(tr);
return 0;
}
@@ -1269,6 +1332,7 @@ extern const char *__stop___trace_bprintk_fmt[];
extern const char *__start___tracepoint_str[];
extern const char *__stop___tracepoint_str[];
+void trace_printk_control(bool enabled);
void trace_printk_init_buffers(void);
void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index 40a14cbcf8e0..0f109c4130d3 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -43,7 +43,7 @@ static void trace_do_benchmark(void)
unsigned int std = 0;
/* Only run if the tracepoint is actually active */
- if (!trace_benchmark_event_enabled())
+ if (!trace_benchmark_event_enabled() || !tracing_is_on())
return;
local_irq_disable();
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index e2e12ad3186f..3a2a73716a5b 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -125,25 +125,14 @@ void disable_branch_tracing(void)
mutex_unlock(&branch_tracing_mutex);
}
-static void start_branch_trace(struct trace_array *tr)
-{
- enable_branch_tracing(tr);
-}
-
-static void stop_branch_trace(struct trace_array *tr)
-{
- disable_branch_tracing();
-}
-
static int branch_trace_init(struct trace_array *tr)
{
- start_branch_trace(tr);
- return 0;
+ return enable_branch_tracing(tr);
}
static void branch_trace_reset(struct trace_array *tr)
{
- stop_branch_trace(tr);
+ disable_branch_tracing();
}
static enum print_line_t trace_branch_print(struct trace_iterator *iter,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7ca09cdc20c2..6bbc5f652355 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -15,11 +15,15 @@
#include <linux/kthread.h>
#include <linux/tracefs.h>
#include <linux/uaccess.h>
+#include <linux/bsearch.h>
#include <linux/module.h>
#include <linux/ctype.h>
+#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <trace/events/sched.h>
+
#include <asm/setup.h>
#include "trace_output.h"
@@ -38,21 +42,19 @@ static LIST_HEAD(ftrace_common_fields);
static struct kmem_cache *field_cachep;
static struct kmem_cache *file_cachep;
-#define SYSTEM_FL_FREE_NAME (1 << 31)
-
static inline int system_refcount(struct event_subsystem *system)
{
- return system->ref_count & ~SYSTEM_FL_FREE_NAME;
+ return system->ref_count;
}
static int system_refcount_inc(struct event_subsystem *system)
{
- return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
+ return system->ref_count++;
}
static int system_refcount_dec(struct event_subsystem *system)
{
- return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
+ return --system->ref_count;
}
/* Double loops, do not use break, only goto's work */
@@ -212,12 +214,32 @@ int trace_event_raw_init(struct trace_event_call *call)
}
EXPORT_SYMBOL_GPL(trace_event_raw_init);
+bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
+{
+ struct trace_array *tr = trace_file->tr;
+ struct trace_array_cpu *data;
+ struct trace_pid_list *pid_list;
+
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
+ if (!pid_list)
+ return false;
+
+ data = this_cpu_ptr(tr->trace_buffer.data);
+
+ return data->ignore_pid;
+}
+EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
+
void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
struct trace_event_file *trace_file,
unsigned long len)
{
struct trace_event_call *event_call = trace_file->event_call;
+ if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
+ trace_event_ignore_this_pid(trace_file))
+ return NULL;
+
local_save_flags(fbuffer->flags);
fbuffer->pc = preempt_count();
fbuffer->trace_file = trace_file;
@@ -338,6 +360,7 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
int enable, int soft_disable)
{
struct trace_event_call *call = file->event_call;
+ struct trace_array *tr = file->tr;
int ret = 0;
int disable;
@@ -401,7 +424,7 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
if (soft_disable)
set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
- if (trace_flags & TRACE_ITER_RECORD_CMD) {
+ if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
tracing_start_cmdline_record();
set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
}
@@ -446,6 +469,142 @@ static void ftrace_clear_events(struct trace_array *tr)
mutex_unlock(&event_mutex);
}
+static int cmp_pid(const void *key, const void *elt)
+{
+ const pid_t *search_pid = key;
+ const pid_t *pid = elt;
+
+ if (*search_pid == *pid)
+ return 0;
+ if (*search_pid < *pid)
+ return -1;
+ return 1;
+}
+
+static bool
+check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task)
+{
+ pid_t search_pid;
+ pid_t *pid;
+
+ /*
+ * Return false, because if filtered_pids does not exist,
+ * all pids are good to trace.
+ */
+ if (!filtered_pids)
+ return false;
+
+ search_pid = task->pid;
+
+ pid = bsearch(&search_pid, filtered_pids->pids,
+ filtered_pids->nr_pids, sizeof(pid_t),
+ cmp_pid);
+ if (!pid)
+ return true;
+
+ return false;
+}
+
+static void
+event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
+ struct task_struct *prev, struct task_struct *next)
+{
+ struct trace_array *tr = data;
+ struct trace_pid_list *pid_list;
+
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+ this_cpu_write(tr->trace_buffer.data->ignore_pid,
+ check_ignore_pid(pid_list, prev) &&
+ check_ignore_pid(pid_list, next));
+}
+
+static void
+event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
+ struct task_struct *prev, struct task_struct *next)
+{
+ struct trace_array *tr = data;
+ struct trace_pid_list *pid_list;
+
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+ this_cpu_write(tr->trace_buffer.data->ignore_pid,
+ check_ignore_pid(pid_list, next));
+}
+
+static void
+event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
+{
+ struct trace_array *tr = data;
+ struct trace_pid_list *pid_list;
+
+ /* Nothing to do if we are already tracing */
+ if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
+ return;
+
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+ this_cpu_write(tr->trace_buffer.data->ignore_pid,
+ check_ignore_pid(pid_list, task));
+}
+
+static void
+event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
+{
+ struct trace_array *tr = data;
+ struct trace_pid_list *pid_list;
+
+ /* Nothing to do if we are not tracing */
+ if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
+ return;
+
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+ /* Set tracing if current is enabled */
+ this_cpu_write(tr->trace_buffer.data->ignore_pid,
+ check_ignore_pid(pid_list, current));
+}
+
+static void __ftrace_clear_event_pids(struct trace_array *tr)
+{
+ struct trace_pid_list *pid_list;
+ struct trace_event_file *file;
+ int cpu;
+
+ pid_list = rcu_dereference_protected(tr->filtered_pids,
+ lockdep_is_held(&event_mutex));
+ if (!pid_list)
+ return;
+
+ unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
+ unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
+
+ unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
+ unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
+
+ list_for_each_entry(file, &tr->events, list) {
+ clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+ }
+
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
+
+ rcu_assign_pointer(tr->filtered_pids, NULL);
+
+ /* Wait till all users are no longer using pid filtering */
+ synchronize_sched();
+
+ free_pages((unsigned long)pid_list->pids, pid_list->order);
+ kfree(pid_list);
+}
+
+static void ftrace_clear_event_pids(struct trace_array *tr)
+{
+ mutex_lock(&event_mutex);
+ __ftrace_clear_event_pids(tr);
+ mutex_unlock(&event_mutex);
+}
+
static void __put_system(struct event_subsystem *system)
{
struct event_filter *filter = system->filter;
@@ -460,8 +619,7 @@ static void __put_system(struct event_subsystem *system)
kfree(filter->filter_string);
kfree(filter);
}
- if (system->ref_count & SYSTEM_FL_FREE_NAME)
- kfree(system->name);
+ kfree_const(system->name);
kfree(system);
}
@@ -779,6 +937,58 @@ static void t_stop(struct seq_file *m, void *p)
mutex_unlock(&event_mutex);
}
+static void *p_start(struct seq_file *m, loff_t *pos)
+ __acquires(RCU)
+{
+ struct trace_pid_list *pid_list;
+ struct trace_array *tr = m->private;
+
+ /*
+ * Grab the mutex, to keep calls to p_next() having the same
+ * tr->filtered_pids as p_start() has.
+ * If we just passed the tr->filtered_pids around, then RCU would
+ * have been enough, but doing that makes things more complex.
+ */
+ mutex_lock(&event_mutex);
+ rcu_read_lock_sched();
+
+ pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+ if (!pid_list || *pos >= pid_list->nr_pids)
+ return NULL;
+
+ return (void *)&pid_list->pids[*pos];
+}
+
+static void p_stop(struct seq_file *m, void *p)
+ __releases(RCU)
+{
+ rcu_read_unlock_sched();
+ mutex_unlock(&event_mutex);
+}
+
+static void *
+p_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct trace_array *tr = m->private;
+ struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
+
+ (*pos)++;
+
+ if (*pos >= pid_list->nr_pids)
+ return NULL;
+
+ return (void *)&pid_list->pids[*pos];
+}
+
+static int p_show(struct seq_file *m, void *v)
+{
+ pid_t *pid = v;
+
+ seq_printf(m, "%d\n", *pid);
+ return 0;
+}
+
static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
@@ -1336,8 +1546,209 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return r;
}
+static int max_pids(struct trace_pid_list *pid_list)
+{
+ return (PAGE_SIZE << pid_list->order) / sizeof(pid_t);
+}
+
+static void ignore_task_cpu(void *data)
+{
+ struct trace_array *tr = data;
+ struct trace_pid_list *pid_list;
+
+ /*
+ * This function is called by on_each_cpu() while the
+ * event_mutex is held.
+ */
+ pid_list = rcu_dereference_protected(tr->filtered_pids,
+ mutex_is_locked(&event_mutex));
+
+ this_cpu_write(tr->trace_buffer.data->ignore_pid,
+ check_ignore_pid(pid_list, current));
+}
+
+static ssize_t
+ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct trace_array *tr = m->private;
+ struct trace_pid_list *filtered_pids = NULL;
+ struct trace_pid_list *pid_list = NULL;
+ struct trace_event_file *file;
+ struct trace_parser parser;
+ unsigned long val;
+ loff_t this_pos;
+ ssize_t read = 0;
+ ssize_t ret = 0;
+ pid_t pid;
+ int i;
+
+ if (!cnt)
+ return 0;
+
+ ret = tracing_update_buffers();
+ if (ret < 0)
+ return ret;
+
+ if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
+ return -ENOMEM;
+
+ mutex_lock(&event_mutex);
+ /*
+ * Load as many pids into the array before doing a
+ * swap from the tr->filtered_pids to the new list.
+ */
+ while (cnt > 0) {
+
+ this_pos = 0;
+
+ ret = trace_get_user(&parser, ubuf, cnt, &this_pos);
+ if (ret < 0 || !trace_parser_loaded(&parser))
+ break;
+
+ read += ret;
+ ubuf += ret;
+ cnt -= ret;
+
+ parser.buffer[parser.idx] = 0;
+
+ ret = -EINVAL;
+ if (kstrtoul(parser.buffer, 0, &val))
+ break;
+ if (val > INT_MAX)
+ break;
+
+ pid = (pid_t)val;
+
+ ret = -ENOMEM;
+ if (!pid_list) {
+ pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
+ if (!pid_list)
+ break;
+
+ filtered_pids = rcu_dereference_protected(tr->filtered_pids,
+ lockdep_is_held(&event_mutex));
+ if (filtered_pids)
+ pid_list->order = filtered_pids->order;
+ else
+ pid_list->order = 0;
+
+ pid_list->pids = (void *)__get_free_pages(GFP_KERNEL,
+ pid_list->order);
+ if (!pid_list->pids)
+ break;
+
+ if (filtered_pids) {
+ pid_list->nr_pids = filtered_pids->nr_pids;
+ memcpy(pid_list->pids, filtered_pids->pids,
+ pid_list->nr_pids * sizeof(pid_t));
+ } else
+ pid_list->nr_pids = 0;
+ }
+
+ if (pid_list->nr_pids >= max_pids(pid_list)) {
+ pid_t *pid_page;
+
+ pid_page = (void *)__get_free_pages(GFP_KERNEL,
+ pid_list->order + 1);
+ if (!pid_page)
+ break;
+ memcpy(pid_page, pid_list->pids,
+ pid_list->nr_pids * sizeof(pid_t));
+ free_pages((unsigned long)pid_list->pids, pid_list->order);
+
+ pid_list->order++;
+ pid_list->pids = pid_page;
+ }
+
+ pid_list->pids[pid_list->nr_pids++] = pid;
+ trace_parser_clear(&parser);
+ ret = 0;
+ }
+ trace_parser_put(&parser);
+
+ if (ret < 0) {
+ if (pid_list)
+ free_pages((unsigned long)pid_list->pids, pid_list->order);
+ kfree(pid_list);
+ mutex_unlock(&event_mutex);
+ return ret;
+ }
+
+ if (!pid_list) {
+ mutex_unlock(&event_mutex);
+ return ret;
+ }
+
+ sort(pid_list->pids, pid_list->nr_pids, sizeof(pid_t), cmp_pid, NULL);
+
+ /* Remove duplicates */
+ for (i = 1; i < pid_list->nr_pids; i++) {
+ int start = i;
+
+ while (i < pid_list->nr_pids &&
+ pid_list->pids[i - 1] == pid_list->pids[i])
+ i++;
+
+ if (start != i) {
+ if (i < pid_list->nr_pids) {
+ memmove(&pid_list->pids[start], &pid_list->pids[i],
+ (pid_list->nr_pids - i) * sizeof(pid_t));
+ pid_list->nr_pids -= i - start;
+ i = start;
+ } else
+ pid_list->nr_pids = start;
+ }
+ }
+
+ rcu_assign_pointer(tr->filtered_pids, pid_list);
+
+ list_for_each_entry(file, &tr->events, list) {
+ set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
+ }
+
+ if (filtered_pids) {
+ synchronize_sched();
+
+ free_pages((unsigned long)filtered_pids->pids, filtered_pids->order);
+ kfree(filtered_pids);
+ } else {
+ /*
+ * Register a probe that is called before all other probes
+ * to set ignore_pid if next or prev do not match.
+ * Register a probe this is called after all other probes
+ * to only keep ignore_pid set if next pid matches.
+ */
+ register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
+ tr, INT_MAX);
+ register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
+ tr, 0);
+
+ register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
+ tr, INT_MAX);
+ register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
+ tr, 0);
+ }
+
+ /*
+ * Ignoring of pids is done at task switch. But we have to
+ * check for those tasks that are currently running.
+ * Always do this in case a pid was appended or removed.
+ */
+ on_each_cpu(ignore_task_cpu, tr, 1);
+
+ mutex_unlock(&event_mutex);
+
+ ret = read;
+ *ppos += read;
+
+ return ret;
+}
+
static int ftrace_event_avail_open(struct inode *inode, struct file *file);
static int ftrace_event_set_open(struct inode *inode, struct file *file);
+static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
static int ftrace_event_release(struct inode *inode, struct file *file);
static const struct seq_operations show_event_seq_ops = {
@@ -1354,6 +1765,13 @@ static const struct seq_operations show_set_event_seq_ops = {
.stop = t_stop,
};
+static const struct seq_operations show_set_pid_seq_ops = {
+ .start = p_start,
+ .next = p_next,
+ .show = p_show,
+ .stop = p_stop,
+};
+
static const struct file_operations ftrace_avail_fops = {
.open = ftrace_event_avail_open,
.read = seq_read,
@@ -1369,6 +1787,14 @@ static const struct file_operations ftrace_set_event_fops = {
.release = ftrace_event_release,
};
+static const struct file_operations ftrace_set_event_pid_fops = {
+ .open = ftrace_event_set_pid_open,
+ .read = seq_read,
+ .write = ftrace_event_pid_write,
+ .llseek = seq_lseek,
+ .release = ftrace_event_release,
+};
+
static const struct file_operations ftrace_enable_fops = {
.open = tracing_open_generic,
.read = event_enable_read,
@@ -1479,6 +1905,26 @@ ftrace_event_set_open(struct inode *inode, struct file *file)
return ret;
}
+static int
+ftrace_event_set_pid_open(struct inode *inode, struct file *file)
+{
+ const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
+ struct trace_array *tr = inode->i_private;
+ int ret;
+
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;
+
+ if ((file->f_mode & FMODE_WRITE) &&
+ (file->f_flags & O_TRUNC))
+ ftrace_clear_event_pids(tr);
+
+ ret = ftrace_event_open(inode, file, seq_ops);
+ if (ret < 0)
+ trace_array_put(tr);
+ return ret;
+}
+
static struct event_subsystem *
create_new_subsystem(const char *name)
{
@@ -1492,13 +1938,9 @@ create_new_subsystem(const char *name)
system->ref_count = 1;
/* Only allocate if dynamic (kprobes and modules) */
- if (!core_kernel_data((unsigned long)name)) {
- system->ref_count |= SYSTEM_FL_FREE_NAME;
- system->name = kstrdup(name, GFP_KERNEL);
- if (!system->name)
- goto out_free;
- } else
- system->name = name;
+ system->name = kstrdup_const(name, GFP_KERNEL);
+ if (!system->name)
+ goto out_free;
system->filter = NULL;
@@ -1511,8 +1953,7 @@ create_new_subsystem(const char *name)
return system;
out_free:
- if (system->ref_count & SYSTEM_FL_FREE_NAME)
- kfree(system->name);
+ kfree_const(system->name);
kfree(system);
return NULL;
}
@@ -2478,6 +2919,9 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
return -ENOMEM;
}
+ entry = tracefs_create_file("set_event_pid", 0644, parent,
+ tr, &ftrace_set_event_pid_fops);
+
/* ring buffer internal formats */
trace_create_file("header_page", 0444, d_events,
ring_buffer_print_page_header,
@@ -2558,6 +3002,9 @@ int event_trace_del_tracer(struct trace_array *tr)
/* Disable any event triggers and associated soft-disabled events */
clear_event_triggers(tr);
+ /* Clear the pid list */
+ __ftrace_clear_event_pids(tr);
+
/* Disable any running events */
__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
@@ -2595,16 +3042,16 @@ early_enable_events(struct trace_array *tr, bool disable_first)
if (!token)
break;
- if (!*token)
- continue;
- /* Restarting syscalls requires that we stop them first */
- if (disable_first)
- ftrace_set_clr_event(tr, token, 0);
+ if (*token) {
+ /* Restarting syscalls requires that we stop them first */
+ if (disable_first)
+ ftrace_set_clr_event(tr, token, 0);
- ret = ftrace_set_clr_event(tr, token, 1);
- if (ret)
- pr_warn("Failed to enable trace event: %s\n", token);
+ ret = ftrace_set_clr_event(tr, token, 1);
+ if (ret)
+ pr_warn("Failed to enable trace event: %s\n", token);
+ }
/* Put back the comma to allow this to be called again */
if (buf)
@@ -2891,7 +3338,9 @@ static __init void event_trace_self_tests(void)
static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
-static void
+static struct trace_array *event_tr;
+
+static void __init
function_test_events_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{
@@ -2922,7 +3371,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip;
entry->parent_ip = parent_ip;
- trace_buffer_unlock_commit(buffer, event, flags, pc);
+ trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc);
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
@@ -2938,6 +3387,9 @@ static struct ftrace_ops trace_ops __initdata =
static __init void event_trace_self_test_with_function(void)
{
int ret;
+ event_tr = top_trace_array();
+ if (WARN_ON(!event_tr))
+ return;
ret = register_ftrace_function(&trace_ops);
if (WARN_ON(ret < 0)) {
pr_info("Failed to enable function tracer for event tests\n");
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index bd1bf184c5c9..f93a219b18da 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -973,15 +973,15 @@ static bool is_string_field(struct ftrace_event_field *field)
field->filter_type == FILTER_PTR_STRING;
}
-static int is_legal_op(struct ftrace_event_field *field, int op)
+static bool is_legal_op(struct ftrace_event_field *field, int op)
{
if (is_string_field(field) &&
(op != OP_EQ && op != OP_NE && op != OP_GLOB))
- return 0;
+ return false;
if (!is_string_field(field) && op == OP_GLOB)
- return 0;
+ return false;
- return 1;
+ return true;
}
static filter_pred_fn_t select_comparison_fn(int op, int field_size,
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index adabf7da9113..39aa7aa66468 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -187,7 +187,7 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
FTRACE_ENTRY_REG(call, struct_name, etype, \
PARAMS(tstruct), PARAMS(print), filter, NULL)
-int ftrace_event_is_function(struct trace_event_call *call)
+bool ftrace_event_is_function(struct trace_event_call *call)
{
return call == &event_function;
}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index ca98445782ac..92382af7a213 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -83,13 +83,18 @@ static struct tracer_opt trace_opts[] = {
{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
/* Display function name after trailing } */
{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
+ /* Include sleep time (scheduled out) between entry and return */
+ { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
+ /* Include time within nested functions */
+ { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
{ } /* Empty entry */
};
static struct tracer_flags tracer_flags = {
/* Don't display overruns, proc, or tail by default */
.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
- TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
+ TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
+ TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
.opts = trace_opts
};
@@ -107,8 +112,8 @@ enum {
};
static void
-print_graph_duration(unsigned long long duration, struct trace_seq *s,
- u32 flags);
+print_graph_duration(struct trace_array *tr, unsigned long long duration,
+ struct trace_seq *s, u32 flags);
/* Add a function return address to the trace stack on thread info.*/
int
@@ -653,6 +658,7 @@ static void
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
enum trace_type type, int cpu, pid_t pid, u32 flags)
{
+ struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq;
struct trace_entry *ent = iter->ent;
@@ -660,7 +666,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
addr >= (unsigned long)__irqentry_text_end)
return;
- if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+ if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
/* Absolute time */
if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
print_graph_abs_time(iter->ts, s);
@@ -676,19 +682,19 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
}
/* Latency format */
- if (trace_flags & TRACE_ITER_LATENCY_FMT)
+ if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
print_graph_lat_fmt(s, ent);
}
/* No overhead */
- print_graph_duration(0, s, flags | FLAGS_FILL_START);
+ print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
if (type == TRACE_GRAPH_ENT)
trace_seq_puts(s, "==========>");
else
trace_seq_puts(s, "<==========");
- print_graph_duration(0, s, flags | FLAGS_FILL_END);
+ print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
trace_seq_putc(s, '\n');
}
@@ -726,11 +732,11 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
}
static void
-print_graph_duration(unsigned long long duration, struct trace_seq *s,
- u32 flags)
+print_graph_duration(struct trace_array *tr, unsigned long long duration,
+ struct trace_seq *s, u32 flags)
{
if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
- !(trace_flags & TRACE_ITER_CONTEXT_INFO))
+ !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
return;
/* No real adata, just filling the column with spaces */
@@ -764,6 +770,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
struct trace_seq *s, u32 flags)
{
struct fgraph_data *data = iter->private;
+ struct trace_array *tr = iter->tr;
struct ftrace_graph_ret *graph_ret;
struct ftrace_graph_ent *call;
unsigned long long duration;
@@ -792,7 +799,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
}
/* Overhead and duration */
- print_graph_duration(duration, s, flags);
+ print_graph_duration(tr, duration, s, flags);
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
@@ -810,6 +817,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
{
struct ftrace_graph_ent *call = &entry->graph_ent;
struct fgraph_data *data = iter->private;
+ struct trace_array *tr = iter->tr;
int i;
if (data) {
@@ -825,7 +833,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
}
/* No time */
- print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
+ print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
@@ -849,6 +857,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
{
struct fgraph_data *data = iter->private;
struct trace_entry *ent = iter->ent;
+ struct trace_array *tr = iter->tr;
int cpu = iter->cpu;
/* Pid */
@@ -858,7 +867,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
/* Interrupt */
print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
- if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
+ if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
return;
/* Absolute time */
@@ -876,7 +885,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
}
/* Latency format */
- if (trace_flags & TRACE_ITER_LATENCY_FMT)
+ if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
print_graph_lat_fmt(s, ent);
return;
@@ -1027,6 +1036,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
{
unsigned long long duration = trace->rettime - trace->calltime;
struct fgraph_data *data = iter->private;
+ struct trace_array *tr = iter->tr;
pid_t pid = ent->pid;
int cpu = iter->cpu;
int func_match = 1;
@@ -1058,7 +1068,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
print_graph_prologue(iter, s, 0, 0, flags);
/* Overhead and duration */
- print_graph_duration(duration, s, flags);
+ print_graph_duration(tr, duration, s, flags);
/* Closing brace */
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
@@ -1091,7 +1101,8 @@ static enum print_line_t
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
struct trace_iterator *iter, u32 flags)
{
- unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
+ struct trace_array *tr = iter->tr;
+ unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
struct fgraph_data *data = iter->private;
struct trace_event *event;
int depth = 0;
@@ -1104,7 +1115,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
print_graph_prologue(iter, s, 0, 0, flags);
/* No time */
- print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
+ print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
/* Indentation */
if (depth > 0)
@@ -1245,9 +1256,10 @@ static void print_lat_header(struct seq_file *s, u32 flags)
seq_printf(s, "#%.*s||| / \n", size, spaces);
}
-static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
+static void __print_graph_headers_flags(struct trace_array *tr,
+ struct seq_file *s, u32 flags)
{
- int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
+ int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
if (lat)
print_lat_header(s, flags);
@@ -1289,11 +1301,12 @@ static void print_graph_headers(struct seq_file *s)
void print_graph_headers_flags(struct seq_file *s, u32 flags)
{
struct trace_iterator *iter = s->private;
+ struct trace_array *tr = iter->tr;
- if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
+ if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
return;
- if (trace_flags & TRACE_ITER_LATENCY_FMT) {
+ if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
/* print nothing if the buffers are empty */
if (trace_empty(iter))
return;
@@ -1301,7 +1314,7 @@ void print_graph_headers_flags(struct seq_file *s, u32 flags)
print_trace_header(s, iter);
}
- __print_graph_headers_flags(s, flags);
+ __print_graph_headers_flags(tr, s, flags);
}
void graph_trace_open(struct trace_iterator *iter)
@@ -1362,6 +1375,12 @@ func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
if (bit == TRACE_GRAPH_PRINT_IRQS)
ftrace_graph_skip_irqs = !set;
+ if (bit == TRACE_GRAPH_SLEEP_TIME)
+ ftrace_graph_sleep_time_control(set);
+
+ if (bit == TRACE_GRAPH_GRAPH_TIME)
+ ftrace_graph_graph_time_control(set);
+
return 0;
}
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 8523ea345f2b..e4e56589ec1d 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -31,7 +31,6 @@ enum {
static int trace_type __read_mostly;
static int save_flags;
-static bool function_enabled;
static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
static int start_irqsoff_tracer(struct trace_array *tr, int graph);
@@ -57,22 +56,16 @@ irq_trace(void)
# define irq_trace() (0)
#endif
-#define TRACE_DISPLAY_GRAPH 1
-
-static struct tracer_opt trace_opts[] = {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /* display latency trace as call graph */
- { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
+static int irqsoff_display_graph(struct trace_array *tr, int set);
+# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
+#else
+static inline int irqsoff_display_graph(struct trace_array *tr, int set)
+{
+ return -EINVAL;
+}
+# define is_graph(tr) false
#endif
- { } /* Empty entry */
-};
-
-static struct tracer_flags tracer_flags = {
- .val = 0,
- .opts = trace_opts,
-};
-
-#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
/*
* Sequence count - we record it when starting a measurement and
@@ -152,15 +145,11 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static int
-irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
+static int irqsoff_display_graph(struct trace_array *tr, int set)
{
int cpu;
- if (!(bit & TRACE_DISPLAY_GRAPH))
- return -EINVAL;
-
- if (!(is_graph() ^ set))
+ if (!(is_graph(tr) ^ set))
return 0;
stop_irqsoff_tracer(irqsoff_trace, !set);
@@ -209,7 +198,7 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
static void irqsoff_trace_open(struct trace_iterator *iter)
{
- if (is_graph())
+ if (is_graph(iter->tr))
graph_trace_open(iter);
}
@@ -231,7 +220,7 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
* In graph mode call the graph tracer output function,
* otherwise go with the TRACE_FN event handler
*/
- if (is_graph())
+ if (is_graph(iter->tr))
return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
return TRACE_TYPE_UNHANDLED;
@@ -239,7 +228,9 @@ static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
static void irqsoff_print_header(struct seq_file *s)
{
- if (is_graph())
+ struct trace_array *tr = irqsoff_trace;
+
+ if (is_graph(tr))
print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
else
trace_default_header(s);
@@ -250,7 +241,7 @@ __trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
unsigned long flags, int pc)
{
- if (is_graph())
+ if (is_graph(tr))
trace_graph_function(tr, ip, parent_ip, flags, pc);
else
trace_function(tr, ip, parent_ip, flags, pc);
@@ -259,27 +250,23 @@ __trace_function(struct trace_array *tr,
#else
#define __trace_function trace_function
-static int
-irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
-{
- return -EINVAL;
-}
-
+#ifdef CONFIG_FUNCTION_TRACER
static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
{
return -1;
}
+#endif
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
{
return TRACE_TYPE_UNHANDLED;
}
-static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
static void irqsoff_trace_open(struct trace_iterator *iter) { }
static void irqsoff_trace_close(struct trace_iterator *iter) { }
#ifdef CONFIG_FUNCTION_TRACER
+static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
static void irqsoff_print_header(struct seq_file *s)
{
trace_default_header(s);
@@ -295,16 +282,16 @@ static void irqsoff_print_header(struct seq_file *s)
/*
* Should this new latency be reported/recorded?
*/
-static int report_latency(struct trace_array *tr, cycle_t delta)
+static bool report_latency(struct trace_array *tr, cycle_t delta)
{
if (tracing_thresh) {
if (delta < tracing_thresh)
- return 0;
+ return false;
} else {
if (delta <= tr->max_latency)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static void
@@ -523,12 +510,15 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
}
#endif /* CONFIG_PREEMPT_TRACER */
+#ifdef CONFIG_FUNCTION_TRACER
+static bool function_enabled;
+
static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
{
int ret;
/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
- if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
+ if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
return 0;
if (graph)
@@ -556,20 +546,40 @@ static void unregister_irqsoff_function(struct trace_array *tr, int graph)
function_enabled = false;
}
-static void irqsoff_function_set(struct trace_array *tr, int set)
+static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
{
+ if (!(mask & TRACE_ITER_FUNCTION))
+ return 0;
+
if (set)
- register_irqsoff_function(tr, is_graph(), 1);
+ register_irqsoff_function(tr, is_graph(tr), 1);
else
- unregister_irqsoff_function(tr, is_graph());
+ unregister_irqsoff_function(tr, is_graph(tr));
+ return 1;
+}
+#else
+static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
+{
+ return 0;
+}
+static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
+static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
+{
+ return 0;
}
+#endif /* CONFIG_FUNCTION_TRACER */
static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
{
struct tracer *tracer = tr->current_trace;
- if (mask & TRACE_ITER_FUNCTION)
- irqsoff_function_set(tr, set);
+ if (irqsoff_function_set(tr, mask, set))
+ return 0;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if (mask & TRACE_ITER_DISPLAY_GRAPH)
+ return irqsoff_display_graph(tr, set);
+#endif
return trace_keep_overwrite(tracer, mask, set);
}
@@ -602,7 +612,7 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
if (irqsoff_busy)
return -EBUSY;
- save_flags = trace_flags;
+ save_flags = tr->trace_flags;
/* non overwrite screws up the latency tracers */
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
@@ -618,7 +628,7 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
/* Only toplevel instance supports graph tracing */
if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
- is_graph())))
+ is_graph(tr))))
printk(KERN_ERR "failed to start irqsoff tracer\n");
irqsoff_busy = true;
@@ -630,7 +640,7 @@ static void irqsoff_tracer_reset(struct trace_array *tr)
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
- stop_irqsoff_tracer(tr, is_graph());
+ stop_irqsoff_tracer(tr, is_graph(tr));
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
@@ -666,8 +676,6 @@ static struct tracer irqsoff_tracer __read_mostly =
.print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
- .flags = &tracer_flags,
- .set_flag = irqsoff_set_flag,
.flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_irqsoff,
@@ -700,8 +708,6 @@ static struct tracer preemptoff_tracer __read_mostly =
.print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
- .flags = &tracer_flags,
- .set_flag = irqsoff_set_flag,
.flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptoff,
@@ -736,8 +742,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
- .flags = &tracer_flags,
- .set_flag = irqsoff_set_flag,
.flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptirqsoff,
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 3ccf5c2c1320..57149bce6aad 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -21,20 +21,22 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
+ struct trace_array *tr;
unsigned int old_userobj;
int cnt = 0, cpu;
trace_init_global_iter(&iter);
iter.buffer_iter = buffer_iter;
+ tr = iter.tr;
for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
- old_userobj = trace_flags;
+ old_userobj = tr->trace_flags;
/* don't look at user memory in panic mode */
- trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
+ tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
kdb_printf("Dumping ftrace buffer:\n");
@@ -82,7 +84,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
kdb_printf("---------------------------------\n");
out:
- trace_flags = old_userobj;
+ tr->trace_flags = old_userobj;
for_each_tracing_cpu(cpu) {
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 638e110c5bfd..2be8c4f2403d 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -314,7 +314,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
entry->rw = *rw;
if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(buffer, event, 0, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
}
void mmio_trace_rw(struct mmiotrace_rw *rw)
@@ -344,7 +344,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
entry->map = *map;
if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(buffer, event, 0, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
}
void mmio_trace_mapping(struct mmiotrace_map *map)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 8e481a84aeea..282982195e09 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -322,8 +322,8 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
# define IP_FMT "%016lx"
#endif
-int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
- unsigned long ip, unsigned long sym_flags)
+static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
+ unsigned long ip, unsigned long sym_flags)
{
struct file *file = NULL;
unsigned long vmstart = 0;
@@ -355,50 +355,6 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
}
int
-seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
- unsigned long sym_flags)
-{
- struct mm_struct *mm = NULL;
- unsigned int i;
-
- if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
- struct task_struct *task;
- /*
- * we do the lookup on the thread group leader,
- * since individual threads might have already quit!
- */
- rcu_read_lock();
- task = find_task_by_vpid(entry->tgid);
- if (task)
- mm = get_task_mm(task);
- rcu_read_unlock();
- }
-
- for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
- unsigned long ip = entry->caller[i];
-
- if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
- break;
-
- trace_seq_puts(s, " => ");
-
- if (!ip) {
- trace_seq_puts(s, "??");
- trace_seq_putc(s, '\n');
- continue;
- }
-
- seq_print_user_ip(s, mm, ip, sym_flags);
- trace_seq_putc(s, '\n');
- }
-
- if (mm)
- mmput(mm);
-
- return !trace_seq_has_overflowed(s);
-}
-
-int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
{
if (!ip) {
@@ -520,7 +476,8 @@ char trace_find_mark(unsigned long long d)
static int
lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
{
- unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
+ struct trace_array *tr = iter->tr;
+ unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
unsigned long long rel_ts = next_ts - iter->ts;
@@ -563,6 +520,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
int trace_print_context(struct trace_iterator *iter)
{
+ struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent;
unsigned long long t;
@@ -574,7 +532,7 @@ int trace_print_context(struct trace_iterator *iter)
trace_seq_printf(s, "%16s-%-5d [%03d] ",
comm, entry->pid, iter->cpu);
- if (trace_flags & TRACE_ITER_IRQ_INFO)
+ if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
trace_print_lat_fmt(s, entry);
if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
@@ -590,14 +548,15 @@ int trace_print_context(struct trace_iterator *iter)
int trace_print_lat_context(struct trace_iterator *iter)
{
- u64 next_ts;
+ struct trace_array *tr = iter->tr;
/* trace_find_next_entry will reset ent_size */
int ent_size = iter->ent_size;
struct trace_seq *s = &iter->seq;
+ u64 next_ts;
struct trace_entry *entry = iter->ent,
*next_entry = trace_find_next_entry(iter, NULL,
&next_ts);
- unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
+ unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE);
/* Restore the original ent_size */
iter->ent_size = ent_size;
@@ -1079,13 +1038,49 @@ static struct trace_event trace_stack_event = {
static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
int flags, struct trace_event *event)
{
+ struct trace_array *tr = iter->tr;
struct userstack_entry *field;
struct trace_seq *s = &iter->seq;
+ struct mm_struct *mm = NULL;
+ unsigned int i;
trace_assign_type(field, iter->ent);
trace_seq_puts(s, "<user stack trace>\n");
- seq_print_userip_objs(field, s, flags);
+
+ if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) {
+ struct task_struct *task;
+ /*
+ * we do the lookup on the thread group leader,
+ * since individual threads might have already quit!
+ */
+ rcu_read_lock();
+ task = find_task_by_vpid(field->tgid);
+ if (task)
+ mm = get_task_mm(task);
+ rcu_read_unlock();
+ }
+
+ for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+ unsigned long ip = field->caller[i];
+
+ if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
+ break;
+
+ trace_seq_puts(s, " => ");
+
+ if (!ip) {
+ trace_seq_puts(s, "??");
+ trace_seq_putc(s, '\n');
+ continue;
+ }
+
+ seq_print_user_ip(s, mm, ip, flags);
+ trace_seq_putc(s, '\n');
+ }
+
+ if (mm)
+ mmput(mm);
return trace_handle_return(s);
}
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index 4cbfe85b99c8..fabc49bcd493 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -14,10 +14,6 @@ trace_print_printk_msg_only(struct trace_iterator *iter);
extern int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
unsigned long sym_flags);
-extern int seq_print_userip_objs(const struct userstack_entry *entry,
- struct trace_seq *s, unsigned long sym_flags);
-extern int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
- unsigned long ip, unsigned long sym_flags);
extern int trace_print_context(struct trace_iterator *iter);
extern int trace_print_lat_context(struct trace_iterator *iter);
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 36c1455b7567..1c2b28536feb 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -178,6 +178,12 @@ static inline void format_mod_start(void) { }
static inline void format_mod_stop(void) { }
#endif /* CONFIG_MODULES */
+static bool __read_mostly trace_printk_enabled = true;
+
+void trace_printk_control(bool enabled)
+{
+ trace_printk_enabled = enabled;
+}
__initdata_or_module static
struct notifier_block module_trace_bprintk_format_nb = {
@@ -192,7 +198,7 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...)
if (unlikely(!fmt))
return 0;
- if (!(trace_flags & TRACE_ITER_PRINTK))
+ if (!trace_printk_enabled)
return 0;
va_start(ap, fmt);
@@ -207,7 +213,7 @@ int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap)
if (unlikely(!fmt))
return 0;
- if (!(trace_flags & TRACE_ITER_PRINTK))
+ if (!trace_printk_enabled)
return 0;
return trace_vbprintk(ip, fmt, ap);
@@ -219,7 +225,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...)
int ret;
va_list ap;
- if (!(trace_flags & TRACE_ITER_PRINTK))
+ if (!trace_printk_enabled)
return 0;
va_start(ap, fmt);
@@ -231,7 +237,7 @@ EXPORT_SYMBOL_GPL(__trace_printk);
int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
{
- if (!(trace_flags & TRACE_ITER_PRINTK))
+ if (!trace_printk_enabled)
return 0;
return trace_vprintk(ip, fmt, ap);
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index b98dee914542..f6398db09114 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -302,15 +302,15 @@ static nokprobe_inline void call_fetch(struct fetch_param *fprm,
}
/* Check the name is good for event/group/fields */
-static inline int is_good_name(const char *name)
+static inline bool is_good_name(const char *name)
{
if (!isalpha(*name) && *name != '_')
- return 0;
+ return false;
while (*++name != '\0') {
if (!isalpha(*name) && !isdigit(*name) && *name != '_')
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static inline struct event_file_link *
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 4bcfbac289ff..9d4399b553a3 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -34,31 +34,28 @@ static arch_spinlock_t wakeup_lock =
static void wakeup_reset(struct trace_array *tr);
static void __wakeup_reset(struct trace_array *tr);
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
-static void wakeup_graph_return(struct ftrace_graph_ret *trace);
static int save_flags;
-static bool function_enabled;
-
-#define TRACE_DISPLAY_GRAPH 1
-static struct tracer_opt trace_opts[] = {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /* display latency trace as call graph */
- { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
+static int wakeup_display_graph(struct trace_array *tr, int set);
+# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
+#else
+static inline int wakeup_display_graph(struct trace_array *tr, int set)
+{
+ return 0;
+}
+# define is_graph(tr) false
#endif
- { } /* Empty entry */
-};
-
-static struct tracer_flags tracer_flags = {
- .val = 0,
- .opts = trace_opts,
-};
-#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
#ifdef CONFIG_FUNCTION_TRACER
+static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
+static void wakeup_graph_return(struct ftrace_graph_ret *trace);
+
+static bool function_enabled;
+
/*
* Prologue for the wakeup function tracers.
*
@@ -128,14 +125,13 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
atomic_dec(&data->disabled);
preempt_enable_notrace();
}
-#endif /* CONFIG_FUNCTION_TRACER */
static int register_wakeup_function(struct trace_array *tr, int graph, int set)
{
int ret;
/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
- if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
+ if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
return 0;
if (graph)
@@ -163,20 +159,40 @@ static void unregister_wakeup_function(struct trace_array *tr, int graph)
function_enabled = false;
}
-static void wakeup_function_set(struct trace_array *tr, int set)
+static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
{
+ if (!(mask & TRACE_ITER_FUNCTION))
+ return 0;
+
if (set)
- register_wakeup_function(tr, is_graph(), 1);
+ register_wakeup_function(tr, is_graph(tr), 1);
else
- unregister_wakeup_function(tr, is_graph());
+ unregister_wakeup_function(tr, is_graph(tr));
+ return 1;
+}
+#else
+static int register_wakeup_function(struct trace_array *tr, int graph, int set)
+{
+ return 0;
+}
+static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
+static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
+{
+ return 0;
}
+#endif /* CONFIG_FUNCTION_TRACER */
static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
{
struct tracer *tracer = tr->current_trace;
- if (mask & TRACE_ITER_FUNCTION)
- wakeup_function_set(tr, set);
+ if (wakeup_function_set(tr, mask, set))
+ return 0;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if (mask & TRACE_ITER_DISPLAY_GRAPH)
+ return wakeup_display_graph(tr, set);
+#endif
return trace_keep_overwrite(tracer, mask, set);
}
@@ -203,14 +219,9 @@ static void stop_func_tracer(struct trace_array *tr, int graph)
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static int
-wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
+static int wakeup_display_graph(struct trace_array *tr, int set)
{
-
- if (!(bit & TRACE_DISPLAY_GRAPH))
- return -EINVAL;
-
- if (!(is_graph() ^ set))
+ if (!(is_graph(tr) ^ set))
return 0;
stop_func_tracer(tr, !set);
@@ -259,7 +270,7 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
static void wakeup_trace_open(struct trace_iterator *iter)
{
- if (is_graph())
+ if (is_graph(iter->tr))
graph_trace_open(iter);
}
@@ -279,7 +290,7 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
* In graph mode call the graph tracer output function,
* otherwise go with the TRACE_FN event handler
*/
- if (is_graph())
+ if (is_graph(iter->tr))
return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
return TRACE_TYPE_UNHANDLED;
@@ -287,7 +298,7 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
static void wakeup_print_header(struct seq_file *s)
{
- if (is_graph())
+ if (is_graph(wakeup_trace))
print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
else
trace_default_header(s);
@@ -298,7 +309,7 @@ __trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip,
unsigned long flags, int pc)
{
- if (is_graph())
+ if (is_graph(tr))
trace_graph_function(tr, ip, parent_ip, flags, pc);
else
trace_function(tr, ip, parent_ip, flags, pc);
@@ -306,27 +317,20 @@ __trace_function(struct trace_array *tr,
#else
#define __trace_function trace_function
-static int
-wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
-{
- return -EINVAL;
-}
-
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
-{
- return -1;
-}
-
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
{
return TRACE_TYPE_UNHANDLED;
}
-static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
static void wakeup_trace_open(struct trace_iterator *iter) { }
static void wakeup_trace_close(struct trace_iterator *iter) { }
#ifdef CONFIG_FUNCTION_TRACER
+static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
+{
+ return -1;
+}
+static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
static void wakeup_print_header(struct seq_file *s)
{
trace_default_header(s);
@@ -342,16 +346,16 @@ static void wakeup_print_header(struct seq_file *s)
/*
* Should this new latency be reported/recorded?
*/
-static int report_latency(struct trace_array *tr, cycle_t delta)
+static bool report_latency(struct trace_array *tr, cycle_t delta)
{
if (tracing_thresh) {
if (delta < tracing_thresh)
- return 0;
+ return false;
} else {
if (delta <= tr->max_latency)
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static void
@@ -388,7 +392,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(next);
if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(buffer, event, flags, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
}
static void
@@ -416,7 +420,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee);
if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(buffer, event, flags, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
}
static void notrace
@@ -635,7 +639,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
*/
smp_wmb();
- if (start_func_tracer(tr, is_graph()))
+ if (start_func_tracer(tr, is_graph(tr)))
printk(KERN_ERR "failed to start wakeup tracer\n");
return;
@@ -648,7 +652,7 @@ fail_deprobe:
static void stop_wakeup_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
- stop_func_tracer(tr, is_graph());
+ stop_func_tracer(tr, is_graph(tr));
unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
unregister_trace_sched_wakeup(probe_wakeup, NULL);
@@ -659,7 +663,7 @@ static bool wakeup_busy;
static int __wakeup_tracer_init(struct trace_array *tr)
{
- save_flags = trace_flags;
+ save_flags = tr->trace_flags;
/* non overwrite screws up the latency tracers */
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
@@ -740,8 +744,6 @@ static struct tracer wakeup_tracer __read_mostly =
.print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
- .flags = &tracer_flags,
- .set_flag = wakeup_set_flag,
.flag_changed = wakeup_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,
@@ -762,8 +764,6 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
- .flags = &tracer_flags,
- .set_flag = wakeup_set_flag,
.flag_changed = wakeup_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,
@@ -784,8 +784,6 @@ static struct tracer wakeup_dl_tracer __read_mostly =
.print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
- .flags = &tracer_flags,
- .set_flag = wakeup_set_flag,
.flag_changed = wakeup_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 8abf1ba18085..dda9e6742950 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -16,24 +16,22 @@
#include "trace.h"
-#define STACK_TRACE_ENTRIES 500
-
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
-static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
+unsigned stack_trace_index[STACK_TRACE_ENTRIES];
/*
* Reserve one entry for the passed in ip. This will allow
* us to remove most or all of the stack size overhead
* added by the stack tracer itself.
*/
-static struct stack_trace max_stack_trace = {
+struct stack_trace stack_trace_max = {
.max_entries = STACK_TRACE_ENTRIES - 1,
.entries = &stack_dump_trace[0],
};
-static unsigned long max_stack_size;
-static arch_spinlock_t max_stack_lock =
+unsigned long stack_trace_max_size;
+arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static DEFINE_PER_CPU(int, trace_active);
@@ -42,30 +40,38 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled;
static int last_stack_tracer_enabled;
-static inline void print_max_stack(void)
+void stack_trace_print(void)
{
long i;
int size;
pr_emerg(" Depth Size Location (%d entries)\n"
" ----- ---- --------\n",
- max_stack_trace.nr_entries);
+ stack_trace_max.nr_entries);
- for (i = 0; i < max_stack_trace.nr_entries; i++) {
+ for (i = 0; i < stack_trace_max.nr_entries; i++) {
if (stack_dump_trace[i] == ULONG_MAX)
break;
- if (i+1 == max_stack_trace.nr_entries ||
+ if (i+1 == stack_trace_max.nr_entries ||
stack_dump_trace[i+1] == ULONG_MAX)
- size = stack_dump_index[i];
+ size = stack_trace_index[i];
else
- size = stack_dump_index[i] - stack_dump_index[i+1];
+ size = stack_trace_index[i] - stack_trace_index[i+1];
- pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
+ pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
size, (void *)stack_dump_trace[i]);
}
}
-static inline void
+/*
+ * When arch-specific code overides this function, the following
+ * data should be filled up, assuming stack_trace_max_lock is held to
+ * prevent concurrent updates.
+ * stack_trace_index[]
+ * stack_trace_max
+ * stack_trace_max_size
+ */
+void __weak
check_stack(unsigned long ip, unsigned long *stack)
{
unsigned long this_size, flags; unsigned long *p, *top, *start;
@@ -78,7 +84,7 @@ check_stack(unsigned long ip, unsigned long *stack)
/* Remove the frame of the tracer */
this_size -= frame_size;
- if (this_size <= max_stack_size)
+ if (this_size <= stack_trace_max_size)
return;
/* we do not handle interrupt stacks yet */
@@ -90,7 +96,7 @@ check_stack(unsigned long ip, unsigned long *stack)
return;
local_irq_save(flags);
- arch_spin_lock(&max_stack_lock);
+ arch_spin_lock(&stack_trace_max_lock);
/*
* RCU may not be watching, make it see us.
@@ -103,18 +109,18 @@ check_stack(unsigned long ip, unsigned long *stack)
this_size -= tracer_frame;
/* a race could have already updated it */
- if (this_size <= max_stack_size)
+ if (this_size <= stack_trace_max_size)
goto out;
- max_stack_size = this_size;
+ stack_trace_max_size = this_size;
- max_stack_trace.nr_entries = 0;
- max_stack_trace.skip = 3;
+ stack_trace_max.nr_entries = 0;
+ stack_trace_max.skip = 3;
- save_stack_trace(&max_stack_trace);
+ save_stack_trace(&stack_trace_max);
/* Skip over the overhead of the stack tracer itself */
- for (i = 0; i < max_stack_trace.nr_entries; i++) {
+ for (i = 0; i < stack_trace_max.nr_entries; i++) {
if (stack_dump_trace[i] == ip)
break;
}
@@ -134,18 +140,18 @@ check_stack(unsigned long ip, unsigned long *stack)
* loop will only happen once. This code only takes place
* on a new max, so it is far from a fast path.
*/
- while (i < max_stack_trace.nr_entries) {
+ while (i < stack_trace_max.nr_entries) {
int found = 0;
- stack_dump_index[x] = this_size;
+ stack_trace_index[x] = this_size;
p = start;
- for (; p < top && i < max_stack_trace.nr_entries; p++) {
+ for (; p < top && i < stack_trace_max.nr_entries; p++) {
if (stack_dump_trace[i] == ULONG_MAX)
break;
if (*p == stack_dump_trace[i]) {
stack_dump_trace[x] = stack_dump_trace[i++];
- this_size = stack_dump_index[x++] =
+ this_size = stack_trace_index[x++] =
(top - p) * sizeof(unsigned long);
found = 1;
/* Start the search from here */
@@ -160,7 +166,7 @@ check_stack(unsigned long ip, unsigned long *stack)
if (unlikely(!tracer_frame)) {
tracer_frame = (p - stack) *
sizeof(unsigned long);
- max_stack_size -= tracer_frame;
+ stack_trace_max_size -= tracer_frame;
}
}
}
@@ -169,18 +175,18 @@ check_stack(unsigned long ip, unsigned long *stack)
i++;
}
- max_stack_trace.nr_entries = x;
+ stack_trace_max.nr_entries = x;
for (; x < i; x++)
stack_dump_trace[x] = ULONG_MAX;
if (task_stack_end_corrupted(current)) {
- print_max_stack();
+ stack_trace_print();
BUG();
}
out:
rcu_irq_exit();
- arch_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&stack_trace_max_lock);
local_irq_restore(flags);
}
@@ -251,9 +257,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
cpu = smp_processor_id();
per_cpu(trace_active, cpu)++;
- arch_spin_lock(&max_stack_lock);
+ arch_spin_lock(&stack_trace_max_lock);
*ptr = val;
- arch_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&stack_trace_max_lock);
per_cpu(trace_active, cpu)--;
local_irq_restore(flags);
@@ -273,7 +279,7 @@ __next(struct seq_file *m, loff_t *pos)
{
long n = *pos - 1;
- if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
+ if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
return NULL;
m->private = (void *)n;
@@ -296,7 +302,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
cpu = smp_processor_id();
per_cpu(trace_active, cpu)++;
- arch_spin_lock(&max_stack_lock);
+ arch_spin_lock(&stack_trace_max_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
@@ -308,7 +314,7 @@ static void t_stop(struct seq_file *m, void *p)
{
int cpu;
- arch_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&stack_trace_max_lock);
cpu = smp_processor_id();
per_cpu(trace_active, cpu)--;
@@ -343,9 +349,9 @@ static int t_show(struct seq_file *m, void *v)
seq_printf(m, " Depth Size Location"
" (%d entries)\n"
" ----- ---- --------\n",
- max_stack_trace.nr_entries);
+ stack_trace_max.nr_entries);
- if (!stack_tracer_enabled && !max_stack_size)
+ if (!stack_tracer_enabled && !stack_trace_max_size)
print_disabled(m);
return 0;
@@ -353,17 +359,17 @@ static int t_show(struct seq_file *m, void *v)
i = *(long *)v;
- if (i >= max_stack_trace.nr_entries ||
+ if (i >= stack_trace_max.nr_entries ||
stack_dump_trace[i] == ULONG_MAX)
return 0;
- if (i+1 == max_stack_trace.nr_entries ||
+ if (i+1 == stack_trace_max.nr_entries ||
stack_dump_trace[i+1] == ULONG_MAX)
- size = stack_dump_index[i];
+ size = stack_trace_index[i];
else
- size = stack_dump_index[i] - stack_dump_index[i+1];
+ size = stack_trace_index[i] - stack_trace_index[i+1];
- seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
+ seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
trace_lookup_stack(m, i);
@@ -453,7 +459,7 @@ static __init int stack_trace_init(void)
return 0;
trace_create_file("stack_max_size", 0644, d_tracer,
- &max_stack_size, &stack_max_size_fops);
+ &stack_trace_max_size, &stack_max_size_fops);
trace_create_file("stack_trace", 0444, d_tracer,
NULL, &stack_trace_fops);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 7d567a4b9fa7..0655afbea83f 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -110,6 +110,7 @@ static enum print_line_t
print_syscall_enter(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
+ struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq;
struct trace_entry *ent = iter->ent;
struct syscall_trace_enter *trace;
@@ -136,7 +137,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags,
goto end;
/* parameter types */
- if (trace_flags & TRACE_ITER_VERBOSE)
+ if (tr->trace_flags & TRACE_ITER_VERBOSE)
trace_seq_printf(s, "%s ", entry->types[i]);
/* parameter values */
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 3490407dc7b7..ecd536de603a 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -91,11 +91,13 @@ static void debug_print_probes(struct tracepoint_func *funcs)
printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
}
-static struct tracepoint_func *func_add(struct tracepoint_func **funcs,
- struct tracepoint_func *tp_func)
+static struct tracepoint_func *
+func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
+ int prio)
{
- int nr_probes = 0;
struct tracepoint_func *old, *new;
+ int nr_probes = 0;
+ int pos = -1;
if (WARN_ON(!tp_func->func))
return ERR_PTR(-EINVAL);
@@ -104,18 +106,33 @@ static struct tracepoint_func *func_add(struct tracepoint_func **funcs,
old = *funcs;
if (old) {
/* (N -> N+1), (N != 0, 1) probes */
- for (nr_probes = 0; old[nr_probes].func; nr_probes++)
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
+ /* Insert before probes of lower priority */
+ if (pos < 0 && old[nr_probes].prio < prio)
+ pos = nr_probes;
if (old[nr_probes].func == tp_func->func &&
old[nr_probes].data == tp_func->data)
return ERR_PTR(-EEXIST);
+ }
}
/* + 2 : one for new probe, one for NULL func */
new = allocate_probes(nr_probes + 2);
if (new == NULL)
return ERR_PTR(-ENOMEM);
- if (old)
- memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
- new[nr_probes] = *tp_func;
+ if (old) {
+ if (pos < 0) {
+ pos = nr_probes;
+ memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
+ } else {
+ /* Copy higher priority probes ahead of the new probe */
+ memcpy(new, old, pos * sizeof(struct tracepoint_func));
+ /* Copy the rest after it. */
+ memcpy(new + pos + 1, old + pos,
+ (nr_probes - pos) * sizeof(struct tracepoint_func));
+ }
+ } else
+ pos = 0;
+ new[pos] = *tp_func;
new[nr_probes + 1].func = NULL;
*funcs = new;
debug_print_probes(*funcs);
@@ -174,7 +191,7 @@ static void *func_remove(struct tracepoint_func **funcs,
* Add the probe function to a tracepoint.
*/
static int tracepoint_add_func(struct tracepoint *tp,
- struct tracepoint_func *func)
+ struct tracepoint_func *func, int prio)
{
struct tracepoint_func *old, *tp_funcs;
@@ -183,7 +200,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
tp_funcs = rcu_dereference_protected(tp->funcs,
lockdep_is_held(&tracepoints_mutex));
- old = func_add(&tp_funcs, func);
+ old = func_add(&tp_funcs, func, prio);
if (IS_ERR(old)) {
WARN_ON_ONCE(1);
return PTR_ERR(old);
@@ -240,6 +257,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
* @tp: tracepoint
* @probe: probe handler
* @data: tracepoint data
+ * @prio: priority of this function over other registered functions
*
* Returns 0 if ok, error value on error.
* Note: if @tp is within a module, the caller is responsible for
@@ -247,7 +265,8 @@ static int tracepoint_remove_func(struct tracepoint *tp,
* performed either with a tracepoint module going notifier, or from
* within module exit functions.
*/
-int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
+int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
+ void *data, int prio)
{
struct tracepoint_func tp_func;
int ret;
@@ -255,10 +274,30 @@ int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
mutex_lock(&tracepoints_mutex);
tp_func.func = probe;
tp_func.data = data;
- ret = tracepoint_add_func(tp, &tp_func);
+ tp_func.prio = prio;
+ ret = tracepoint_add_func(tp, &tp_func, prio);
mutex_unlock(&tracepoints_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
+
+/**
+ * tracepoint_probe_register - Connect a probe to a tracepoint
+ * @tp: tracepoint
+ * @probe: probe handler
+ * @data: tracepoint data
+ * @prio: priority of this function over other registered functions
+ *
+ * Returns 0 if ok, error value on error.
+ * Note: if @tp is within a module, the caller is responsible for
+ * unregistering the probe before the module is gone. This can be
+ * performed either with a tracepoint module going notifier, or from
+ * within module exit functions.
+ */
+int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
+{
+ return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
+}
EXPORT_SYMBOL_GPL(tracepoint_probe_register);
/**
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 125d6402f64f..d6b75bb495b3 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -4,14 +4,14 @@
*
* The define_trace.h below will also look for a file name of
* TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here.
- * In this case, it would look for sample.h
+ * In this case, it would look for sample-trace.h
*
* If the header name will be different than the system name
* (as in this case), then you can override the header name that
* define_trace.h will look up by defining TRACE_INCLUDE_FILE
*
* This file is called trace-events-sample.h but we want the system
- * to be called "sample". Therefore we must define the name of this
+ * to be called "sample-trace". Therefore we must define the name of this
* file:
*
* #define TRACE_INCLUDE_FILE trace-events-sample
@@ -106,7 +106,7 @@
*
* memcpy(__entry->foo, bar, 10);
*
- * __dynamic_array: This is similar to array, but can vary is size from
+ * __dynamic_array: This is similar to array, but can vary its size from
* instance to instance of the tracepoint being called.
* Like __array, this too has three elements (type, name, size);
* type is the type of the element, name is the name of the array.
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 3d1984e59a30..698768bdc581 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -42,6 +42,7 @@
#ifndef EM_AARCH64
#define EM_AARCH64 183
+#define R_AARCH64_NONE 0
#define R_AARCH64_ABS64 257
#endif
@@ -160,6 +161,22 @@ static int make_nop_x86(void *map, size_t const offset)
return 0;
}
+static unsigned char ideal_nop4_arm64[4] = {0x1f, 0x20, 0x03, 0xd5};
+static int make_nop_arm64(void *map, size_t const offset)
+{
+ uint32_t *ptr;
+
+ ptr = map + offset;
+ /* bl <_mcount> is 0x94000000 before relocation */
+ if (*ptr != 0x94000000)
+ return -1;
+
+ /* Convert to nop */
+ ulseek(fd_map, offset, SEEK_SET);
+ uwrite(fd_map, ideal_nop, 4);
+ return 0;
+}
+
/*
* Get the whole file as a programming convenience in order to avoid
* malloc+lseek+read+free of many pieces. If successful, then mmap
@@ -345,6 +362,7 @@ do_file(char const *const fname)
break;
case EM_386:
reltype = R_386_32;
+ rel_type_nop = R_386_NONE;
make_nop = make_nop_x86;
ideal_nop = ideal_nop5_x86_32;
mcount_adjust_32 = -1;
@@ -353,7 +371,12 @@ do_file(char const *const fname)
altmcount = "__gnu_mcount_nc";
break;
case EM_AARCH64:
- reltype = R_AARCH64_ABS64; gpfx = '_'; break;
+ reltype = R_AARCH64_ABS64;
+ make_nop = make_nop_arm64;
+ rel_type_nop = R_AARCH64_NONE;
+ ideal_nop = ideal_nop4_arm64;
+ gpfx = '_';
+ break;
case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break;
case EM_METAG: reltype = R_METAG_ADDR32;
altmcount = "_mcount_wrapper";
@@ -371,6 +394,7 @@ do_file(char const *const fname)
make_nop = make_nop_x86;
ideal_nop = ideal_nop5_x86_64;
reltype = R_X86_64_64;
+ rel_type_nop = R_X86_64_NONE;
mcount_adjust_64 = -1;
break;
} /* end switch */
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index 49b582a225b0..b9897e2be404 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -377,7 +377,7 @@ static void nop_mcount(Elf_Shdr const *const relhdr,
if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
if (make_nop)
- ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset);
+ ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset));
if (warn_on_notrace_sect && !once) {
printf("Section %s has mcount callers being ignored\n",
txtname);