summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_hwlat.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_hwlat.c')
-rw-r--r--kernel/trace/trace_hwlat.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index d440ddd5fd8b..2f7b94e98317 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -130,7 +130,6 @@ static bool hwlat_busy;
static void trace_hwlat_sample(struct hwlat_sample *sample)
{
struct trace_array *tr = hwlat_trace;
- struct trace_event_call *call = &event_hwlat;
struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event;
struct hwlat_entry *entry;
@@ -148,8 +147,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
entry->nmi_count = sample->nmi_count;
entry->count = sample->count;
- if (!call_filter_check_discard(call, entry, buffer, event))
- trace_buffer_unlock_commit_nostack(buffer, event);
+ trace_buffer_unlock_commit_nostack(buffer, event);
}
/* Macros to encapsulate the time capturing infrastructure */
@@ -327,19 +325,16 @@ static void move_to_next_cpu(void)
cpus_read_lock();
cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
- next_cpu = cpumask_next(raw_smp_processor_id(), current_mask);
+ next_cpu = cpumask_next_wrap(raw_smp_processor_id(), current_mask);
cpus_read_unlock();
- if (next_cpu >= nr_cpu_ids)
- next_cpu = cpumask_first(current_mask);
-
if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
goto change_mode;
cpumask_clear(current_mask);
cpumask_set_cpu(next_cpu, current_mask);
- sched_setaffinity(0, current_mask);
+ set_cpus_allowed_ptr(current, current_mask);
return;
change_mode:
@@ -446,7 +441,7 @@ static int start_single_kthread(struct trace_array *tr)
}
- sched_setaffinity(kthread->pid, current_mask);
+ set_cpus_allowed_ptr(kthread, current_mask);
kdata->kthread = kthread;
wake_up_process(kthread);
@@ -492,6 +487,10 @@ static int start_cpu_kthread(unsigned int cpu)
{
struct task_struct *kthread;
+ /* Do not start a new hwlatd thread if it is already running */
+ if (per_cpu(hwlat_per_cpu_data, cpu).kthread)
+ return 0;
+
kthread = kthread_run_on_cpu(kthread_fn, NULL, cpu, "hwlatd/%u");
if (IS_ERR(kthread)) {
pr_err(BANNER "could not start sampling thread\n");
@@ -516,6 +515,8 @@ static void hwlat_hotplug_workfn(struct work_struct *dummy)
if (!hwlat_busy || hwlat_data.thread_mode != MODE_PER_CPU)
goto out_unlock;
+ if (!cpu_online(cpu))
+ goto out_unlock;
if (!cpumask_test_cpu(cpu, tr->tracing_cpumask))
goto out_unlock;
@@ -584,9 +585,6 @@ static int start_per_cpu_kthreads(struct trace_array *tr)
*/
cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
- for_each_online_cpu(cpu)
- per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
-
for_each_cpu(cpu, current_mask) {
retval = start_cpu_kthread(cpu);
if (retval)
@@ -634,7 +632,7 @@ static int s_mode_show(struct seq_file *s, void *v)
else
seq_printf(s, "%s", thread_mode_str[mode]);
- if (mode != MODE_MAX)
+ if (mode < MODE_MAX - 1) /* if mode is any but last */
seq_puts(s, " ");
return 0;