This is a note to let you know that I've just added the patch titled perf: Fix event_function_call() locking to the 6.11-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: perf-fix-event_function_call-locking.patch and it can be found in the queue-6.11 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 71722d7c6ec4ec760c25af184f34e41cb66a1a84 Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Date: Wed Aug 7 13:29:27 2024 +0200 perf: Fix event_function_call() locking [ Upstream commit 558abc7e3f895049faa46b08656be4c60dc6e9fd ] All the event_function/@func call context already uses perf_ctx_lock() except for the !ctx->is_active case. Make it all consistent. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Reviewed-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx> Reviewed-by: Namhyung Kim <namhyung@xxxxxxxxxx> Link: https://lore.kernel.org/r/20240807115550.138301094@xxxxxxxxxxxxx Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/kernel/events/core.c b/kernel/events/core.c index b21c8f24a9876..4339df585d42d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -263,6 +263,7 @@ static int event_function(void *info) static void event_function_call(struct perf_event *event, event_f func, void *data) { struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ struct event_function_struct efs = { .event = event, @@ -291,22 +292,22 @@ static void event_function_call(struct perf_event *event, event_f func, void *da if (!task_function_call(task, event_function, &efs)) return; - raw_spin_lock_irq(&ctx->lock); + perf_ctx_lock(cpuctx, ctx); /* * Reload the task pointer, it might have been changed by * a concurrent perf_event_context_sched_out(). */ task = ctx->task; if (task == TASK_TOMBSTONE) { - raw_spin_unlock_irq(&ctx->lock); + perf_ctx_unlock(cpuctx, ctx); return; } if (ctx->is_active) { - raw_spin_unlock_irq(&ctx->lock); + perf_ctx_unlock(cpuctx, ctx); goto again; } func(event, NULL, ctx, data); - raw_spin_unlock_irq(&ctx->lock); + perf_ctx_unlock(cpuctx, ctx); } /*