summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c143
1 files changed, 41 insertions, 102 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 5b781d2be383..38fe1483c508 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,106 +13,32 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
-#include <linux/slab.h>
#include <linux/fs.h>
#include "trace.h"
-static void tracing_start_function_trace(struct trace_array *tr);
-static void tracing_stop_function_trace(struct trace_array *tr);
-static void
-function_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs);
-static void
-function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs);
-static struct ftrace_ops trace_ops;
-static struct ftrace_ops trace_stack_ops;
-static struct tracer_flags func_flags;
-
-/* Our option */
-enum {
- TRACE_FUNC_OPT_STACK = 0x1,
-};
-
-static int allocate_ftrace_ops(struct trace_array *tr)
-{
- struct ftrace_ops *ops;
-
- ops = kzalloc(sizeof(*ops), GFP_KERNEL);
- if (!ops)
- return -ENOMEM;
+/* function tracing enabled */
+static int ftrace_function_enabled;
- /* Currently only the non stack verision is supported */
- ops->func = function_trace_call;
- ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
-
- tr->ops = ops;
- ops->private = tr;
- return 0;
-}
-
-
-int ftrace_create_function_files(struct trace_array *tr,
- struct dentry *parent)
-{
- int ret;
-
- /* The top level array uses the "global_ops". */
- if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) {
- ret = allocate_ftrace_ops(tr);
- if (ret)
- return ret;
- }
-
- ftrace_create_filter_files(tr->ops, parent);
-
- return 0;
-}
+static struct trace_array *func_trace;
-void ftrace_destroy_function_files(struct trace_array *tr)
-{
- ftrace_destroy_filter_files(tr->ops);
- kfree(tr->ops);
- tr->ops = NULL;
-}
+static void tracing_start_function_trace(void);
+static void tracing_stop_function_trace(void);
static int function_trace_init(struct trace_array *tr)
{
- struct ftrace_ops *ops;
-
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
- /* There's only one global tr */
- if (!trace_ops.private) {
- trace_ops.private = tr;
- trace_stack_ops.private = tr;
- }
-
- if (func_flags.val & TRACE_FUNC_OPT_STACK)
- ops = &trace_stack_ops;
- else
- ops = &trace_ops;
- tr->ops = ops;
- } else if (!tr->ops) {
- /*
- * Instance trace_arrays get their ops allocated
- * at instance creation. Unless it failed
- * the allocation.
- */
- return -ENOMEM;
- }
-
+ func_trace = tr;
tr->trace_buffer.cpu = get_cpu();
put_cpu();
tracing_start_cmdline_record();
- tracing_start_function_trace(tr);
+ tracing_start_function_trace();
return 0;
}
static void function_trace_reset(struct trace_array *tr)
{
- tracing_stop_function_trace(tr);
+ tracing_stop_function_trace();
tracing_stop_cmdline_record();
}
@@ -121,18 +47,25 @@ static void function_trace_start(struct trace_array *tr)
tracing_reset_online_cpus(&tr->trace_buffer);
}
+/* Our option */
+enum {
+ TRACE_FUNC_OPT_STACK = 0x1,
+};
+
+static struct tracer_flags func_flags;
+
static void
function_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{
- struct trace_array *tr = op->private;
+ struct trace_array *tr = func_trace;
struct trace_array_cpu *data;
unsigned long flags;
int bit;
int cpu;
int pc;
- if (unlikely(!tr->function_enabled))
+ if (unlikely(!ftrace_function_enabled))
return;
pc = preempt_count();
@@ -158,14 +91,14 @@ static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{
- struct trace_array *tr = op->private;
+ struct trace_array *tr = func_trace;
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;
- if (unlikely(!tr->function_enabled))
+ if (unlikely(!ftrace_function_enabled))
return;
/*
@@ -195,6 +128,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
local_irq_restore(flags);
}
+
static struct ftrace_ops trace_ops __read_mostly =
{
.func = function_trace_call,
@@ -219,21 +153,29 @@ static struct tracer_flags func_flags = {
.opts = func_opts
};
-static void tracing_start_function_trace(struct trace_array *tr)
+static void tracing_start_function_trace(void)
{
- tr->function_enabled = 0;
- register_ftrace_function(tr->ops);
- tr->function_enabled = 1;
+ ftrace_function_enabled = 0;
+
+ if (func_flags.val & TRACE_FUNC_OPT_STACK)
+ register_ftrace_function(&trace_stack_ops);
+ else
+ register_ftrace_function(&trace_ops);
+
+ ftrace_function_enabled = 1;
}
-static void tracing_stop_function_trace(struct trace_array *tr)
+static void tracing_stop_function_trace(void)
{
- tr->function_enabled = 0;
- unregister_ftrace_function(tr->ops);
+ ftrace_function_enabled = 0;
+
+ if (func_flags.val & TRACE_FUNC_OPT_STACK)
+ unregister_ftrace_function(&trace_stack_ops);
+ else
+ unregister_ftrace_function(&trace_ops);
}
-static int
-func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
+static int func_set_flag(u32 old_flags, u32 bit, int set)
{
switch (bit) {
case TRACE_FUNC_OPT_STACK:
@@ -241,14 +183,12 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
break;
- unregister_ftrace_function(tr->ops);
-
if (set) {
- tr->ops = &trace_stack_ops;
- register_ftrace_function(tr->ops);
+ unregister_ftrace_function(&trace_ops);
+ register_ftrace_function(&trace_stack_ops);
} else {
- tr->ops = &trace_ops;
- register_ftrace_function(tr->ops);
+ unregister_ftrace_function(&trace_stack_ops);
+ register_ftrace_function(&trace_ops);
}
break;
@@ -268,7 +208,6 @@ static struct tracer function_trace __tracer_data =
.wait_pipe = poll_wait_pipe,
.flags = &func_flags,
.set_flag = func_set_flag,
- .allow_instances = true,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_function,
#endif