summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_sched_wakeup.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
-rw-r--r--kernel/trace/trace_sched_wakeup.c451
1 files changed, 227 insertions, 224 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index ddec53b67646..8faa73d3bba1 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* trace task wakeup timings
*
@@ -25,35 +26,28 @@ static struct task_struct *wakeup_task;
static int wakeup_cpu;
static int wakeup_current_cpu;
static unsigned wakeup_prio = -1;
-static int wakeup_rt;
-static int wakeup_dl;
-static int tracing_dl = 0;
+static bool wakeup_rt;
+static bool wakeup_dl;
+static bool tracing_dl;
static arch_spinlock_t wakeup_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static void wakeup_reset(struct trace_array *tr);
static void __wakeup_reset(struct trace_array *tr);
+static int start_func_tracer(struct trace_array *tr, int graph);
+static void stop_func_tracer(struct trace_array *tr, int graph);
static int save_flags;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static int wakeup_display_graph(struct trace_array *tr, int set);
-# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
+# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER(DISPLAY_GRAPH))
#else
-static inline int wakeup_display_graph(struct trace_array *tr, int set)
-{
- return 0;
-}
# define is_graph(tr) false
#endif
-
#ifdef CONFIG_FUNCTION_TRACER
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
-static void wakeup_graph_return(struct ftrace_graph_ret *trace);
-
static bool function_enabled;
/*
@@ -73,7 +67,7 @@ static bool function_enabled;
static int
func_prolog_preempt_disable(struct trace_array *tr,
struct trace_array_cpu **data,
- int *pc)
+ unsigned int *trace_ctx)
{
long disabled;
int cpu;
@@ -81,144 +75,30 @@ func_prolog_preempt_disable(struct trace_array *tr,
if (likely(!wakeup_task))
return 0;
- *pc = preempt_count();
+ *trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
cpu = raw_smp_processor_id();
if (cpu != wakeup_current_cpu)
goto out_enable;
- *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
- disabled = atomic_inc_return(&(*data)->disabled);
+ *data = per_cpu_ptr(tr->array_buffer.data, cpu);
+ disabled = local_inc_return(&(*data)->disabled);
if (unlikely(disabled != 1))
goto out;
return 1;
out:
- atomic_dec(&(*data)->disabled);
+ local_dec(&(*data)->disabled);
out_enable:
preempt_enable_notrace();
return 0;
}
-/*
- * wakeup uses its own tracer function to keep the overhead down:
- */
-static void
-wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *pt_regs)
-{
- struct trace_array *tr = wakeup_trace;
- struct trace_array_cpu *data;
- unsigned long flags;
- int pc;
-
- if (!func_prolog_preempt_disable(tr, &data, &pc))
- return;
-
- local_irq_save(flags);
- trace_function(tr, ip, parent_ip, flags, pc);
- local_irq_restore(flags);
-
- atomic_dec(&data->disabled);
- preempt_enable_notrace();
-}
-
-static int register_wakeup_function(struct trace_array *tr, int graph, int set)
-{
- int ret;
-
- /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
- if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
- return 0;
-
- if (graph)
- ret = register_ftrace_graph(&wakeup_graph_return,
- &wakeup_graph_entry);
- else
- ret = register_ftrace_function(tr->ops);
-
- if (!ret)
- function_enabled = true;
-
- return ret;
-}
-
-static void unregister_wakeup_function(struct trace_array *tr, int graph)
-{
- if (!function_enabled)
- return;
-
- if (graph)
- unregister_ftrace_graph();
- else
- unregister_ftrace_function(tr->ops);
-
- function_enabled = false;
-}
-
-static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
-{
- if (!(mask & TRACE_ITER_FUNCTION))
- return 0;
-
- if (set)
- register_wakeup_function(tr, is_graph(tr), 1);
- else
- unregister_wakeup_function(tr, is_graph(tr));
- return 1;
-}
-#else
-static int register_wakeup_function(struct trace_array *tr, int graph, int set)
-{
- return 0;
-}
-static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
-static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
-{
- return 0;
-}
-#endif /* CONFIG_FUNCTION_TRACER */
-
-static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
-{
- struct tracer *tracer = tr->current_trace;
-
- if (wakeup_function_set(tr, mask, set))
- return 0;
-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if (mask & TRACE_ITER_DISPLAY_GRAPH)
- return wakeup_display_graph(tr, set);
-#endif
-
- return trace_keep_overwrite(tracer, mask, set);
-}
-
-static int start_func_tracer(struct trace_array *tr, int graph)
-{
- int ret;
- ret = register_wakeup_function(tr, graph, 0);
-
- if (!ret && tracing_is_enabled())
- tracer_enabled = 1;
- else
- tracer_enabled = 0;
-
- return ret;
-}
-
-static void stop_func_tracer(struct trace_array *tr, int graph)
-{
- tracer_enabled = 0;
-
- unregister_wakeup_function(tr, graph);
-}
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int wakeup_display_graph(struct trace_array *tr, int set)
{
if (!(is_graph(tr) ^ set))
@@ -232,14 +112,17 @@ static int wakeup_display_graph(struct trace_array *tr, int set)
return start_func_tracer(tr, set);
}
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
+static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
+ struct fgraph_ops *gops,
+ struct ftrace_regs *fregs)
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
- unsigned long flags;
- int pc, ret = 0;
+ unsigned int trace_ctx;
+ u64 *calltime;
+ int ret = 0;
- if (ftrace_graph_ignore_func(trace))
+ if (ftrace_graph_ignore_func(gops, trace))
return 0;
/*
* Do not trace a function if it's filtered by set_graph_notrace.
@@ -251,35 +134,52 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
if (ftrace_graph_notrace_addr(trace->func))
return 1;
- if (!func_prolog_preempt_disable(tr, &data, &pc))
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return 0;
- local_save_flags(flags);
- ret = __trace_graph_entry(tr, trace, flags, pc);
- atomic_dec(&data->disabled);
+ calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
+ if (calltime) {
+ *calltime = trace_clock_local();
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
+ }
+ local_dec(&data->disabled);
preempt_enable_notrace();
return ret;
}
-static void wakeup_graph_return(struct ftrace_graph_ret *trace)
+static void wakeup_graph_return(struct ftrace_graph_ret *trace,
+ struct fgraph_ops *gops,
+ struct ftrace_regs *fregs)
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
- unsigned long flags;
- int pc;
+ unsigned int trace_ctx;
+ u64 *calltime;
+ u64 rettime;
+ int size;
- if (!func_prolog_preempt_disable(tr, &data, &pc))
+ ftrace_graph_addr_finish(gops, trace);
+
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return;
- local_save_flags(flags);
- __trace_graph_return(tr, trace, flags, pc);
- atomic_dec(&data->disabled);
+ rettime = trace_clock_local();
+
+ calltime = fgraph_retrieve_data(gops->idx, &size);
+ if (calltime)
+ __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
+ local_dec(&data->disabled);
preempt_enable_notrace();
return;
}
+static struct fgraph_ops fgraph_wakeup_ops = {
+ .entryfunc = &wakeup_graph_entry,
+ .retfunc = &wakeup_graph_return,
+};
+
static void wakeup_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
@@ -293,8 +193,11 @@ static void wakeup_trace_close(struct trace_iterator *iter)
}
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
- TRACE_GRAPH_PRINT_ABS_TIME | \
- TRACE_GRAPH_PRINT_DURATION)
+ TRACE_GRAPH_PRINT_CPU | \
+ TRACE_GRAPH_PRINT_REL_TIME | \
+ TRACE_GRAPH_PRINT_DURATION | \
+ TRACE_GRAPH_PRINT_OVERHEAD | \
+ TRACE_GRAPH_PRINT_IRQS)
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
{
@@ -315,20 +218,87 @@ static void wakeup_print_header(struct seq_file *s)
else
trace_default_header(s);
}
+#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
+/*
+ * wakeup uses its own tracer function to keep the overhead down:
+ */
static void
-__trace_function(struct trace_array *tr,
- unsigned long ip, unsigned long parent_ip,
- unsigned long flags, int pc)
+wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
{
- if (is_graph(tr))
- trace_graph_function(tr, ip, parent_ip, flags, pc);
+ struct trace_array *tr = wakeup_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ unsigned int trace_ctx;
+
+ if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
+ return;
+
+ local_irq_save(flags);
+ trace_function(tr, ip, parent_ip, trace_ctx, fregs);
+ local_irq_restore(flags);
+
+ local_dec(&data->disabled);
+ preempt_enable_notrace();
+}
+
+static int register_wakeup_function(struct trace_array *tr, int graph, int set)
+{
+ int ret;
+
+ /* 'set' is set if TRACE_ITER(FUNCTION) is about to be set */
+ if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER(FUNCTION))))
+ return 0;
+
+ if (graph)
+ ret = register_ftrace_graph(&fgraph_wakeup_ops);
else
- trace_function(tr, ip, parent_ip, flags, pc);
+ ret = register_ftrace_function(tr->ops);
+
+ if (!ret)
+ function_enabled = true;
+
+ return ret;
}
-#else
-#define __trace_function trace_function
+static void unregister_wakeup_function(struct trace_array *tr, int graph)
+{
+ if (!function_enabled)
+ return;
+
+ if (graph)
+ unregister_ftrace_graph(&fgraph_wakeup_ops);
+ else
+ unregister_ftrace_function(tr->ops);
+
+ function_enabled = false;
+}
+
+static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
+{
+ if (!(mask & TRACE_ITER(FUNCTION)))
+ return 0;
+
+ if (set)
+ register_wakeup_function(tr, is_graph(tr), 1);
+ else
+ unregister_wakeup_function(tr, is_graph(tr));
+ return 1;
+}
+#else /* CONFIG_FUNCTION_TRACER */
+static int register_wakeup_function(struct trace_array *tr, int graph, int set)
+{
+ return 0;
+}
+static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
+static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
+{
+ return 0;
+}
+#endif /* else CONFIG_FUNCTION_TRACER */
+
+#ifndef CONFIG_FUNCTION_GRAPH_TRACER
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
{
return TRACE_TYPE_UNHANDLED;
@@ -337,23 +307,58 @@ static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
static void wakeup_trace_open(struct trace_iterator *iter) { }
static void wakeup_trace_close(struct trace_iterator *iter) { }
-#ifdef CONFIG_FUNCTION_TRACER
-static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
-{
- return -1;
-}
-static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
static void wakeup_print_header(struct seq_file *s)
{
trace_default_header(s);
}
-#else
-static void wakeup_print_header(struct seq_file *s)
+#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
+
+static void
+__trace_function(struct trace_array *tr,
+ unsigned long ip, unsigned long parent_ip,
+ unsigned int trace_ctx)
{
- trace_latency_header(s);
+ if (is_graph(tr))
+ trace_graph_function(tr, ip, parent_ip, trace_ctx);
+ else
+ trace_function(tr, ip, parent_ip, trace_ctx, NULL);
+}
+
+static int wakeup_flag_changed(struct trace_array *tr, u64 mask, int set)
+{
+ struct tracer *tracer = tr->current_trace;
+
+ if (wakeup_function_set(tr, mask, set))
+ return 0;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if (mask & TRACE_ITER(DISPLAY_GRAPH))
+ return wakeup_display_graph(tr, set);
+#endif
+
+ return trace_keep_overwrite(tracer, mask, set);
+}
+
+static int start_func_tracer(struct trace_array *tr, int graph)
+{
+ int ret;
+
+ ret = register_wakeup_function(tr, graph, 0);
+
+ if (!ret && tracing_is_enabled())
+ tracer_enabled = 1;
+ else
+ tracer_enabled = 0;
+
+ return ret;
+}
+
+static void stop_func_tracer(struct trace_array *tr, int graph)
+{
+ tracer_enabled = 0;
+
+ unregister_wakeup_function(tr, graph);
}
-#endif /* CONFIG_FUNCTION_TRACER */
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
* Should this new latency be reported/recorded?
@@ -383,68 +388,65 @@ static void
tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *prev,
struct task_struct *next,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
- struct trace_event_call *call = &event_context_switch;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->prev_pid = prev->pid;
entry->prev_prio = prev->prio;
- entry->prev_state = prev->state;
+ entry->prev_state = task_state_index(prev);
entry->next_pid = next->pid;
entry->next_prio = next->prio;
- entry->next_state = next->state;
+ entry->next_state = task_state_index(next);
entry->next_cpu = task_cpu(next);
- if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
}
static void
tracing_sched_wakeup_trace(struct trace_array *tr,
struct task_struct *wakee,
struct task_struct *curr,
- unsigned long flags, int pc)
+ unsigned int trace_ctx)
{
- struct trace_event_call *call = &event_wakeup;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
- sizeof(*entry), flags, pc);
+ sizeof(*entry), trace_ctx);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->prev_pid = curr->pid;
entry->prev_prio = curr->prio;
- entry->prev_state = curr->state;
+ entry->prev_state = task_state_index(curr);
entry->next_pid = wakee->pid;
entry->next_prio = wakee->prio;
- entry->next_state = wakee->state;
+ entry->next_state = task_state_index(wakee);
entry->next_cpu = task_cpu(wakee);
- if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
+ trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
}
static void notrace
probe_wakeup_sched_switch(void *ignore, bool preempt,
- struct task_struct *prev, struct task_struct *next)
+ struct task_struct *prev, struct task_struct *next,
+ unsigned int prev_state)
{
struct trace_array_cpu *data;
u64 T0, T1, delta;
unsigned long flags;
long disabled;
int cpu;
- int pc;
+ unsigned int trace_ctx;
tracing_record_cmdline(prev);
@@ -463,15 +465,15 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
if (next != wakeup_task)
return;
- pc = preempt_count();
-
/* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id();
- disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+ disabled = local_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (likely(disabled != 1))
goto out;
local_irq_save(flags);
+ trace_ctx = tracing_gen_ctx_flags(flags);
+
arch_spin_lock(&wakeup_lock);
/* We could race with grabbing wakeup_lock */
@@ -479,10 +481,11 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
goto out_unlock;
/* The task we are waiting for is waking up */
- data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
- __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
- tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
+ __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
+ tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
+ __trace_stack(wakeup_trace, trace_ctx, 0);
T0 = data->preempt_timestamp;
T1 = ftrace_now(cpu);
@@ -493,7 +496,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
if (likely(!is_tracing_stopped())) {
wakeup_trace->max_latency = delta;
- update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
+ update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
}
out_unlock:
@@ -501,14 +504,14 @@ out_unlock:
arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
out:
- atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+ local_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
}
static void __wakeup_reset(struct trace_array *tr)
{
wakeup_cpu = -1;
wakeup_prio = -1;
- tracing_dl = 0;
+ tracing_dl = false;
if (wakeup_task)
put_task_struct(wakeup_task);
@@ -520,7 +523,7 @@ static void wakeup_reset(struct trace_array *tr)
{
unsigned long flags;
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(&tr->array_buffer);
local_irq_save(flags);
arch_spin_lock(&wakeup_lock);
@@ -534,9 +537,8 @@ probe_wakeup(void *ignore, struct task_struct *p)
{
struct trace_array_cpu *data;
int cpu = smp_processor_id();
- unsigned long flags;
long disabled;
- int pc;
+ unsigned int trace_ctx;
if (likely(!tracer_enabled))
return;
@@ -553,15 +555,16 @@ probe_wakeup(void *ignore, struct task_struct *p)
* - wakeup_dl handles tasks belonging to sched_dl class only.
*/
if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
- (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
+ (wakeup_rt && !rt_or_dl_task(p)) ||
(!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
return;
- pc = preempt_count();
- disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+ disabled = local_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (unlikely(disabled != 1))
goto out;
+ trace_ctx = tracing_gen_ctx();
+
/* interrupts should be off from try_to_wake_up */
arch_spin_lock(&wakeup_lock);
@@ -582,30 +585,28 @@ probe_wakeup(void *ignore, struct task_struct *p)
* another task until the first one wakes up.
*/
if (dl_task(p))
- tracing_dl = 1;
+ tracing_dl = true;
else
- tracing_dl = 0;
+ tracing_dl = false;
- wakeup_task = p;
- get_task_struct(wakeup_task);
+ wakeup_task = get_task_struct(p);
- local_save_flags(flags);
-
- data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
+ data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu);
- tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
+ tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
+ __trace_stack(wakeup_trace, trace_ctx, 0);
/*
* We must be careful in using CALLER_ADDR2. But since wake_up
* is not called by an assembly function (where as schedule is)
* it should be safe to use it here.
*/
- __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+ __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
out_locked:
arch_spin_unlock(&wakeup_lock);
out:
- atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
+ local_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
}
static void start_wakeup_tracer(struct trace_array *tr)
@@ -637,7 +638,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_migrate_task\n");
- return;
+ goto fail_deprobe_sched_switch;
}
wakeup_reset(tr);
@@ -655,6 +656,8 @@ static void start_wakeup_tracer(struct trace_array *tr)
printk(KERN_ERR "failed to start wakeup tracer\n");
return;
+fail_deprobe_sched_switch:
+ unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
fail_deprobe_wake_new:
unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
fail_deprobe:
@@ -678,8 +681,8 @@ static int __wakeup_tracer_init(struct trace_array *tr)
save_flags = tr->trace_flags;
/* non overwrite screws up the latency tracers */
- set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
- set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
+ set_tracer_flag(tr, TRACE_ITER(OVERWRITE), 1);
+ set_tracer_flag(tr, TRACE_ITER(LATENCY_FMT), 1);
tr->max_latency = 0;
wakeup_trace = tr;
@@ -695,8 +698,8 @@ static int wakeup_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 0;
- wakeup_rt = 0;
+ wakeup_dl = false;
+ wakeup_rt = false;
return __wakeup_tracer_init(tr);
}
@@ -705,8 +708,8 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 0;
- wakeup_rt = 1;
+ wakeup_dl = false;
+ wakeup_rt = true;
return __wakeup_tracer_init(tr);
}
@@ -715,22 +718,22 @@ static int wakeup_dl_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 1;
- wakeup_rt = 0;
+ wakeup_dl = true;
+ wakeup_rt = false;
return __wakeup_tracer_init(tr);
}
static void wakeup_tracer_reset(struct trace_array *tr)
{
- int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
- int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
+ int lat_flag = save_flags & TRACE_ITER(LATENCY_FMT);
+ int overwrite_flag = save_flags & TRACE_ITER(OVERWRITE);
stop_wakeup_tracer(tr);
/* make sure we put back any tasks we are tracing */
wakeup_reset(tr);
- set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
- set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
+ set_tracer_flag(tr, TRACE_ITER(LATENCY_FMT), lat_flag);
+ set_tracer_flag(tr, TRACE_ITER(OVERWRITE), overwrite_flag);
ftrace_reset_array_ops(tr);
wakeup_busy = false;
}