From 4104d326b670c2b66f575d2004daa28b2d1b4c8d Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Fri, 10 Jan 2014 17:01:58 -0500 Subject: ftrace: Remove global function list and call function directly Instead of having a list of global functions that are called, as only one global function is allow to be enabled at a time, there's no reason to have a list. Instead, simply have all the users of the global ops, use the global ops directly, instead of registering their own ftrace_ops. Just switch what function is used before enabling the function tracer. This removes a lot of code as well as the complexity involved with it. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9212b017bc72..f0ff2c2453e7 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -62,9 +62,6 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, * set in the flags member. * * ENABLED - set/unset when ftrace_ops is registered/unregistered - * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops - * is part of the global tracers sharing the same filter - * via set_ftrace_* debugfs files. * DYNAMIC - set when ftrace_ops is registered to denote dynamically * allocated ftrace_ops which need special care * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops @@ -96,15 +93,14 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, - FTRACE_OPS_FL_GLOBAL = 1 << 1, - FTRACE_OPS_FL_DYNAMIC = 1 << 2, - FTRACE_OPS_FL_CONTROL = 1 << 3, - FTRACE_OPS_FL_SAVE_REGS = 1 << 4, - FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, - FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, - FTRACE_OPS_FL_STUB = 1 << 7, - FTRACE_OPS_FL_INITIALIZED = 1 << 8, - FTRACE_OPS_FL_DELETED = 1 << 9, + FTRACE_OPS_FL_DYNAMIC = 1 << 1, + FTRACE_OPS_FL_CONTROL = 1 << 2, + FTRACE_OPS_FL_SAVE_REGS = 1 << 3, + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, + FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, + FTRACE_OPS_FL_STUB = 1 << 6, + FTRACE_OPS_FL_INITIALIZED = 1 << 7, + FTRACE_OPS_FL_DELETED = 1 << 8, }; /* -- cgit From bdffd893a0e9c431304142d12d9a0a21d365c502 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 29 Apr 2014 14:17:40 -0500 Subject: tracing: Replace __get_cpu_var uses with this_cpu_ptr Replace uses of &__get_cpu_var for address calculation with this_cpu_ptr. Link: http://lkml.kernel.org/p/alpine.DEB.2.10.1404291415560.18364@gentwo.org Acked-by: Masami Hiramatsu Signed-off-by: Christoph Lameter Signed-off-by: Steven Rostedt --- include/linux/kprobes.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 925eaf28fca9..7bd2ad01e39c 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -355,7 +355,7 @@ static inline void reset_current_kprobe(void) static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) { - return (&__get_cpu_var(kprobe_ctlblk)); + return this_cpu_ptr(&kprobe_ctlblk); } int register_kprobe(struct kprobe *p); -- cgit From 7c65bbc7dcface00b295bbd18bce82fe1db3d633 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 6 May 2014 09:26:30 -0400 Subject: tracing: Add trace__enabled() function There are some code paths in the kernel that need to do some preparations before it calls a tracepoint. As that code is worthless overhead when the tracepoint is not enabled, it would be prudent to have that code only run when the tracepoint is active. To accomplish this, all tracepoints now get a static inline function called "trace__enabled()" which returns true when the tracepoint is enabled and false otherwise. As an added bonus, that function uses the static_key of the tracepoint such that no branch is needed. if (trace_mytracepoint_enabled()) { arg = process_tp_arg(); trace_mytracepoint(arg); } Will keep the "process_tp_arg()" (which may be expensive to run) from being executed when the tracepoint isn't enabled. It's best to encapsulate the tracepoint itself in the if statement just to keep races. For example, if you had: if (trace_mytracepoint_enabled()) arg = process_tp_arg(); trace_mytracepoint(arg); There's a chance that the tracepoint could be enabled just after the if statement, and arg will be undefined when calling the tracepoint. Link: http://lkml.kernel.org/r/20140506094407.507b6435@gandalf.local.home Acked-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt --- include/linux/tracepoint.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 9d30ee469c2a..2e2a5f7717e5 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -185,6 +185,11 @@ extern void syscall_unregfunc(void); static inline void \ check_trace_callback_type_##name(void (*cb)(data_proto)) \ { \ + } \ + static inline bool \ + trace_##name##_enabled(void) \ + { \ + return static_key_false(&__tracepoint_##name.key); \ } /* @@ -230,6 +235,11 @@ extern void syscall_unregfunc(void); } \ static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ { \ + } \ + static inline bool \ + trace_##name##_enabled(void) \ + { \ + return false; \ } #define DEFINE_TRACE_FN(name, reg, unreg) -- cgit From 7413af1fb70e7efa6dbc7f27663e7a5126b3aa33 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 6 May 2014 21:34:14 -0400 Subject: ftrace: Make get_ftrace_addr() and get_ftrace_addr_old() global Move and rename get_ftrace_addr() and get_ftrace_addr_old() to ftrace_get_addr_new() and ftrace_get_addr_curr() respectively. This moves these two helper functions in the generic code out from the arch specific code, and renames them to have a better generic name. This will allow other archs to use them as well as makes it a bit easier to work on getting separate trampolines for different functions. ftrace_get_addr_new() returns the trampoline address that the mcount call address will be converted to. ftrace_get_addr_curr() returns the trampoline address of what the mcount call address currently jumps to. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f0ff2c2453e7..2f8cbffecd3d 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -400,6 +400,8 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable); int ftrace_test_record(struct dyn_ftrace *rec, int enable); void ftrace_run_stop_machine(int command); unsigned long ftrace_location(unsigned long ip); +unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); +unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); extern ftrace_func_t ftrace_trace_function; -- cgit From f1b2f2bd5821c6ab7feed2e133343dd54b212ed9 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Wed, 7 May 2014 16:09:49 -0400 Subject: ftrace: Remove FTRACE_UPDATE_MODIFY_CALL_REGS flag As the decision to what needs to be done (converting a call to the ftrace_caller to ftrace_caller_regs or to convert from ftrace_caller_regs to ftrace_caller) can easily be determined from the rec->flags of FTRACE_FL_REGS and FTRACE_FL_REGS_EN, there's no need to have the ftrace_check_record() return either a UPDATE_MODIFY_CALL_REGS or a UPDATE_MODIFY_CALL. Just he latter is enough. This added flag causes more complexity than is required. Remove it. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 2f8cbffecd3d..3e6dfb31f8e6 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -362,14 +362,12 @@ enum { * IGNORE - The function is already what we want it to be * MAKE_CALL - Start tracing the function * MODIFY_CALL - Stop saving regs for the function - * MODIFY_CALL_REGS - Start saving regs for the function * MAKE_NOP - Stop tracing the function */ enum { FTRACE_UPDATE_IGNORE, FTRACE_UPDATE_MAKE_CALL, FTRACE_UPDATE_MODIFY_CALL, - FTRACE_UPDATE_MODIFY_CALL_REGS, FTRACE_UPDATE_MAKE_NOP, }; -- cgit From 4449bf927b61bdb4389393c6fea6837214d1ace7 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 6 May 2014 13:10:24 -0400 Subject: tracing: Add __bitmask() macro to trace events to cpumasks and other bitmasks Being able to show a cpumask of events can be useful as some events may affect only some CPUs. There is no standard way to record the cpumask and converting it to a string is rather expensive during the trace as traces happen in hotpaths. It would be better to record the raw event mask and be able to parse it at print time. The following macros were added for use with the TRACE_EVENT() macro: __bitmask() __assign_bitmask() __get_bitmask() To test this, I added this to the sched_migrate_task event, which looked like this: TRACE_EVENT(sched_migrate_task, TP_PROTO(struct task_struct *p, int dest_cpu, const struct cpumask *cpus), TP_ARGS(p, dest_cpu, cpus), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, prio ) __field( int, orig_cpu ) __field( int, dest_cpu ) __bitmask( cpumask, num_possible_cpus() ) ), TP_fast_assign( memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; __entry->prio = p->prio; __entry->orig_cpu = task_cpu(p); __entry->dest_cpu = dest_cpu; __assign_bitmask(cpumask, cpumask_bits(cpus), num_possible_cpus()); ), TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d cpumask=%s", __entry->comm, __entry->pid, __entry->prio, __entry->orig_cpu, __entry->dest_cpu, __get_bitmask(cpumask)) ); With the output of: ksmtuned-3613 [003] d..2 485.220508: sched_migrate_task: comm=ksmtuned pid=3615 prio=120 orig_cpu=3 dest_cpu=2 cpumask=00000000,0000000f migration/1-13 [001] d..5 485.221202: sched_migrate_task: comm=ksmtuned pid=3614 prio=120 orig_cpu=1 dest_cpu=0 cpumask=00000000,0000000f awk-3615 [002] d.H5 485.221747: sched_migrate_task: comm=rcu_preempt pid=7 prio=120 orig_cpu=0 dest_cpu=1 cpumask=00000000,000000ff migration/2-18 [002] d..5 485.222062: sched_migrate_task: comm=ksmtuned pid=3615 prio=120 orig_cpu=2 dest_cpu=3 cpumask=00000000,0000000f Link: http://lkml.kernel.org/r/1399377998-14870-6-git-send-email-javi.merino@arm.com Link: http://lkml.kernel.org/r/20140506132238.22e136d1@gandalf.local.home Suggested-by: Javi Merino Tested-by: Javi Merino Signed-off-by: Steven Rostedt --- include/linux/ftrace_event.h | 3 +++ include/linux/trace_seq.h | 10 ++++++++++ 2 files changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index d16da3e53bc7..cff3106ffe2c 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -38,6 +38,9 @@ const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, *symbol_array); #endif +const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, + unsigned int bitmask_size); + const char *ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int len); diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index a32d86ec8bf2..136116924d8d 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -46,6 +46,9 @@ extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, extern void *trace_seq_reserve(struct trace_seq *s, size_t len); extern int trace_seq_path(struct trace_seq *s, const struct path *path); +extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, + int nmaskbits); + #else /* CONFIG_TRACING */ static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { @@ -57,6 +60,13 @@ trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) return 0; } +static inline int +trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, + int nmaskbits) +{ + return 0; +} + static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) { return 0; -- cgit