6.10-stable review patch. If anyone has any objections, please let me know. ------------------ From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> [ Upstream commit 558abc7e3f895049faa46b08656be4c60dc6e9fd ] All the event_function/@func call context already uses perf_ctx_lock() except for the !ctx->is_active case. Make it all consistent. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Reviewed-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx> Reviewed-by: Namhyung Kim <namhyung@xxxxxxxxxx> Link: https://lore.kernel.org/r/20240807115550.138301094@xxxxxxxxxxxxx Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> --- kernel/events/core.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 081d9692ce747..e18a07de9920a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -263,6 +263,7 @@ static int event_function(void *info) static void event_function_call(struct perf_event *event, event_f func, void *data) { struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ struct event_function_struct efs = { .event = event, @@ -291,22 +292,22 @@ static void event_function_call(struct perf_event *event, event_f func, void *da if (!task_function_call(task, event_function, &efs)) return; - raw_spin_lock_irq(&ctx->lock); + perf_ctx_lock(cpuctx, ctx); /* * Reload the task pointer, it might have been changed by * a concurrent perf_event_context_sched_out(). */ task = ctx->task; if (task == TASK_TOMBSTONE) { - raw_spin_unlock_irq(&ctx->lock); + perf_ctx_unlock(cpuctx, ctx); return; } if (ctx->is_active) { - raw_spin_unlock_irq(&ctx->lock); + perf_ctx_unlock(cpuctx, ctx); goto again; } func(event, NULL, ctx, data); - raw_spin_unlock_irq(&ctx->lock); + perf_ctx_unlock(cpuctx, ctx); } /* -- 2.43.0