summaryrefslogtreecommitdiff
path: root/kernel/events
diff options
context:
space:
mode:
authorleilei.lin <leilei.lin@alibaba-inc.com>2018-03-06 17:36:37 +0800
committerIngo Molnar <mingo@kernel.org>2018-03-12 15:28:51 +0100
commit33801b94741d6c3be9713c10aa627477216c21e2 (patch)
treeb241236cf8773658ce499c85a483199675fb4f82 /kernel/events
parent8d5bce0c37fa10f21dbdd6a6d8fcba85202fe24e (diff)
perf/core: Fix installing cgroup events on CPU
There's two problems when installing cgroup events on CPUs: firstly list_update_cgroup_event() only tries to set cpuctx->cgrp for the first event, if that mismatches on @cgrp we'll not try again for later additions. Secondly, when we install a cgroup event into an active context, only issue an event reprogram when the event matches the current cgroup context. This avoids a pointless event reprogramming. Signed-off-by: leilei.lin <leilei.lin@alibaba-inc.com> [ Improved the changelog and comments. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: brendan.d.gregg@gmail.com Cc: eranian@gmail.com Cc: linux-kernel@vger.kernel.org Cc: yang_oliver@hotmail.com Link: http://lkml.kernel.org/r/20180306093637.28247-1-linxiulei@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c46
1 files changed, 35 insertions, 11 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f98c0f88cc94..969f865f9f1c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -937,27 +937,39 @@ list_update_cgroup_event(struct perf_event *event,
if (!is_cgroup_event(event))
return;
- if (add && ctx->nr_cgroups++)
- return;
- else if (!add && --ctx->nr_cgroups)
- return;
/*
* Because cgroup events are always per-cpu events,
* this will always be called from the right CPU.
*/
cpuctx = __get_cpu_context(ctx);
- cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
- /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
- if (add) {
+
+ /*
+ * Since setting cpuctx->cgrp is conditional on the current @cgrp
+ * matching the event's cgroup, we must do this for every new event,
+ * because if the first would mismatch, the second would not try again
+ * and we would leave cpuctx->cgrp unset.
+ */
+ if (add && !cpuctx->cgrp) {
struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
- list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
cpuctx->cgrp = cgrp;
- } else {
- list_del(cpuctx_entry);
- cpuctx->cgrp = NULL;
}
+
+ if (add && ctx->nr_cgroups++)
+ return;
+ else if (!add && --ctx->nr_cgroups)
+ return;
+
+ /* no cgroup running */
+ if (!add)
+ cpuctx->cgrp = NULL;
+
+ cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
+ if (add)
+ list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
+ else
+ list_del(cpuctx_entry);
}
#else /* !CONFIG_CGROUP_PERF */
@@ -2489,6 +2501,18 @@ static int __perf_install_in_context(void *info)
raw_spin_lock(&task_ctx->lock);
}
+#ifdef CONFIG_CGROUP_PERF
+ if (is_cgroup_event(event)) {
+ /*
+ * If the current cgroup doesn't match the event's
+ * cgroup, we should not try to schedule it.
+ */
+ struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
+ reprogram = cgroup_is_descendant(cgrp->css.cgroup,
+ event->cgrp->css.cgroup);
+ }
+#endif
+
if (reprogram) {
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
add_event_to_ctx(event, ctx);