summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2024-08-07 13:29:27 +0200
committerPeter Zijlstra <peterz@infradead.org>2024-08-08 12:27:31 +0200
commit558abc7e3f895049faa46b08656be4c60dc6e9fd (patch)
tree94ff7547e1f2b417d7004b4060ffd5f5bc014486 /kernel
parent9a32bd9901fe5b1dcf544389dbf04f3b0a2fbab4 (diff)
perf: Fix event_function_call() locking
All the event_function/@func call context already uses perf_ctx_lock() except for the !ctx->is_active case. Make it all consistent. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Reviewed-by: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20240807115550.138301094@infradead.org
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index eb03c9ab1670..ab49deae6e6f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -263,6 +263,7 @@ unlock:
static void event_function_call(struct perf_event *event, event_f func, void *data)
{
struct perf_event_context *ctx = event->ctx;
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
struct event_function_struct efs = {
.event = event,
@@ -291,22 +292,22 @@ again:
if (!task_function_call(task, event_function, &efs))
return;
- raw_spin_lock_irq(&ctx->lock);
+ perf_ctx_lock(cpuctx, ctx);
/*
* Reload the task pointer, it might have been changed by
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
if (task == TASK_TOMBSTONE) {
- raw_spin_unlock_irq(&ctx->lock);
+ perf_ctx_unlock(cpuctx, ctx);
return;
}
if (ctx->is_active) {
- raw_spin_unlock_irq(&ctx->lock);
+ perf_ctx_unlock(cpuctx, ctx);
goto again;
}
func(event, NULL, ctx, data);
- raw_spin_unlock_irq(&ctx->lock);
+ perf_ctx_unlock(cpuctx, ctx);
}
/*