summaryrefslogtreecommitdiff
path: root/kernel/events
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-08-07 11:17:00 +0200
committerIngo Molnar <mingo@kernel.org>2020-03-06 11:56:55 +0100
commitab6f824cfdf7363b5e529621cbc72ae6519c78d1 (patch)
tree348beb74ac92776735d20a07e3de814e12718ba3 /kernel/events
parent1941011a8bd26f01f36f827d5ebc6469749485bb (diff)
perf/core: Unify {pinned,flexible}_sched_in()
Less is more; unify the two very nearly identical function. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c58
1 files changed, 21 insertions, 37 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3f1f77de7247..b713080eedcc 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1986,6 +1986,12 @@ static int perf_get_aux_event(struct perf_event *event,
return 1;
}
+static inline struct list_head *get_event_list(struct perf_event *event)
+{
+ struct perf_event_context *ctx = event->ctx;
+ return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active;
+}
+
static void perf_group_detach(struct perf_event *event)
{
struct perf_event *sibling, *tmp;
@@ -2028,12 +2034,8 @@ static void perf_group_detach(struct perf_event *event)
if (!RB_EMPTY_NODE(&event->group_node)) {
add_event_to_groups(sibling, event->ctx);
- if (sibling->state == PERF_EVENT_STATE_ACTIVE) {
- struct list_head *list = sibling->attr.pinned ?
- &ctx->pinned_active : &ctx->flexible_active;
-
- list_add_tail(&sibling->active_list, list);
- }
+ if (sibling->state == PERF_EVENT_STATE_ACTIVE)
+ list_add_tail(&sibling->active_list, get_event_list(sibling));
}
WARN_ON_ONCE(sibling->ctx != event->ctx);
@@ -2350,6 +2352,8 @@ event_sched_in(struct perf_event *event,
{
int ret = 0;
+ WARN_ON_ONCE(event->ctx != ctx);
+
lockdep_assert_held(&ctx->lock);
if (event->state <= PERF_EVENT_STATE_OFF)
@@ -3425,10 +3429,12 @@ struct sched_in_data {
int can_add_hw;
};
-static int pinned_sched_in(struct perf_event *event, void *data)
+static int merge_sched_in(struct perf_event *event, void *data)
{
struct sched_in_data *sid = data;
+ WARN_ON_ONCE(event->ctx != sid->ctx);
+
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
@@ -3437,37 +3443,15 @@ static int pinned_sched_in(struct perf_event *event, void *data)
if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
if (!group_sched_in(event, sid->cpuctx, sid->ctx))
- list_add_tail(&event->active_list, &sid->ctx->pinned_active);
+ list_add_tail(&event->active_list, get_event_list(event));
}
- /*
- * If this pinned group hasn't been scheduled,
- * put it in error state.
- */
- if (event->state == PERF_EVENT_STATE_INACTIVE)
- perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
-
- return 0;
-}
-
-static int flexible_sched_in(struct perf_event *event, void *data)
-{
- struct sched_in_data *sid = data;
-
- if (event->state <= PERF_EVENT_STATE_OFF)
- return 0;
-
- if (!event_filter_match(event))
- return 0;
+ if (event->state == PERF_EVENT_STATE_INACTIVE) {
+ if (event->attr.pinned)
+ perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
- int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
- if (ret) {
- sid->can_add_hw = 0;
- sid->ctx->rotate_necessary = 1;
- return 0;
- }
- list_add_tail(&event->active_list, &sid->ctx->flexible_active);
+ sid->can_add_hw = 0;
+ sid->ctx->rotate_necessary = 1;
}
return 0;
@@ -3485,7 +3469,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
visit_groups_merge(&ctx->pinned_groups,
smp_processor_id(),
- pinned_sched_in, &sid);
+ merge_sched_in, &sid);
}
static void
@@ -3500,7 +3484,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
visit_groups_merge(&ctx->flexible_groups,
smp_processor_id(),
- flexible_sched_in, &sid);
+ merge_sched_in, &sid);
}
static void