summaryrefslogtreecommitdiff
path: root/kernel/events/hw_breakpoint.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2013-06-20 17:50:15 +0200
committerIngo Molnar <mingo@kernel.org>2013-06-20 17:58:56 +0200
commit1c10adbb929936316f71df089ace699fce037e24 (patch)
tree31cb135dfa9f016724fe97e096153abe507ea073 /kernel/events/hw_breakpoint.c
parent7ab71f3244e9f970c29566c5a67e13d1fa38c387 (diff)
hw_breakpoint: Introduce cpumask_of_bp()
Add the trivial helper which simply returns cpumask_of() or cpu_possible_mask depending on bp->cpu. Change fetch_bp_busy_slots() and toggle_bp_slot() to always do for_each_cpu(cpumask_of_bp) to simplify the code and avoid the code duplication. Reported-by: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Link: http://lkml.kernel.org/r/20130620155015.GA6340@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events/hw_breakpoint.c')
-rw-r--r--kernel/events/hw_breakpoint.c43
1 files changed, 17 insertions, 26 deletions
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 5cd4f6d9652c..9c71445328af 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -127,6 +127,13 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
return count;
}
+static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
+{
+ if (bp->cpu >= 0)
+ return cpumask_of(bp->cpu);
+ return cpu_possible_mask;
+}
+
/*
* Report the number of pinned/un-pinned breakpoints we have in
* a given cpu (cpu > -1) or in all of them (cpu = -1).
@@ -135,25 +142,13 @@ static void
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
enum bp_type_idx type)
{
- int cpu = bp->cpu;
- struct task_struct *tsk = bp->hw.bp_target;
-
- if (cpu >= 0) {
- slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
- if (!tsk)
- slots->pinned += max_task_bp_pinned(cpu, type);
- else
- slots->pinned += task_bp_pinned(cpu, bp, type);
- slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
-
- return;
- }
+ const struct cpumask *cpumask = cpumask_of_bp(bp);
+ int cpu;
- for_each_possible_cpu(cpu) {
- unsigned int nr;
+ for_each_cpu(cpu, cpumask) {
+ unsigned int nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
- nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
- if (!tsk)
+ if (!bp->hw.bp_target)
nr += max_task_bp_pinned(cpu, type);
else
nr += task_bp_pinned(cpu, bp, type);
@@ -205,25 +200,21 @@ static void
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
int weight)
{
- int cpu = bp->cpu;
- struct task_struct *tsk = bp->hw.bp_target;
+ const struct cpumask *cpumask = cpumask_of_bp(bp);
+ int cpu;
if (!enable)
weight = -weight;
/* Pinned counter cpu profiling */
- if (!tsk) {
- per_cpu(nr_cpu_bp_pinned[type], cpu) += weight;
+ if (!bp->hw.bp_target) {
+ per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
return;
}
/* Pinned counter task profiling */
- if (cpu >= 0) {
+ for_each_cpu(cpu, cpumask)
toggle_bp_task_slot(bp, cpu, type, weight);
- } else {
- for_each_possible_cpu(cpu)
- toggle_bp_task_slot(bp, cpu, type, weight);
- }
if (enable)
list_add_tail(&bp->hw.bp_list, &bp_task_head);