The following commit has been merged into the perf/core branch of tip: Commit-ID: 1fbc6c8e5289c252867c33bf12d54c11c8cfeac4 Gitweb: https://git.kernel.org/tip/1fbc6c8e5289c252867c33bf12d54c11c8cfeac4 Author: Kan Liang <kan.liang@xxxxxxxxxxxxxxx> AuthorDate: Fri, 14 Mar 2025 10:26:59 -07:00 Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx> CommitterDate: Mon, 17 Mar 2025 11:23:37 +01:00 perf/x86: Remove swap_task_ctx() The pmu specific data is saved in task_struct now. It doesn't need to swap between context. Remove swap_task_ctx() support. Signed-off-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Link: https://lore.kernel.org/r/20250314172700.438923-6-kan.liang@xxxxxxxxxxxxxxx --- arch/x86/events/core.c | 9 --------- arch/x86/events/intel/core.c | 7 ------- arch/x86/events/intel/lbr.c | 23 ----------------------- arch/x86/events/perf_event.h | 11 ----------- 4 files changed, 50 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index ae8c90a..833478f 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -87,7 +87,6 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling); DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling); DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task); -DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx); DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs); DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases); @@ -2039,7 +2038,6 @@ static void x86_pmu_static_call_update(void) static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling); static_call_update(x86_pmu_sched_task, x86_pmu.sched_task); - static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx); static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs); static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases); @@ -2644,12 +2642,6 @@ static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, static_call_cond(x86_pmu_sched_task)(pmu_ctx, task, sched_in); } -static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc, - struct perf_event_pmu_context *next_epc) -{ - static_call_cond(x86_pmu_swap_task_ctx)(prev_epc, next_epc); -} - void perf_check_microcode(void) { if (x86_pmu.check_microcode) @@ -2714,7 +2706,6 @@ static struct pmu pmu = { .event_idx = x86_pmu_event_idx, .sched_task = x86_pmu_sched_task, - .swap_task_ctx = x86_pmu_swap_task_ctx, .check_period = x86_pmu_check_period, .aux_output_match = x86_pmu_aux_output_match, diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 3efbb03..dc38dec 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -5300,12 +5300,6 @@ static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in); } -static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc, - struct perf_event_pmu_context *next_epc) -{ - intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc); -} - static int intel_pmu_check_period(struct perf_event *event, u64 value) { return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; @@ -5474,7 +5468,6 @@ static __initconst const struct x86_pmu intel_pmu = { .guest_get_msrs = intel_guest_get_msrs, .sched_task = intel_pmu_sched_task, - .swap_task_ctx = intel_pmu_swap_task_ctx, .check_period = intel_pmu_check_period, diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 24719ad..f44c3d8 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -522,29 +522,6 @@ static void __intel_pmu_lbr_save(void *ctx) cpuc->last_log_id = ++task_context_opt(ctx)->log_id; } -void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, - struct perf_event_pmu_context *next_epc) -{ - void *prev_ctx_data, *next_ctx_data; - - swap(prev_epc->task_ctx_data, next_epc->task_ctx_data); - - /* - * Architecture specific synchronization makes sense in case - * both prev_epc->task_ctx_data and next_epc->task_ctx_data - * pointers are allocated. - */ - - prev_ctx_data = next_epc->task_ctx_data; - next_ctx_data = prev_epc->task_ctx_data; - - if (!prev_ctx_data || !next_ctx_data) - return; - - swap(task_context_opt(prev_ctx_data)->lbr_callstack_users, - task_context_opt(next_ctx_data)->lbr_callstack_users); -} - void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, struct task_struct *task, bool sched_in) { diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 67d2d25..8e5a4c3 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -959,14 +959,6 @@ struct x86_pmu { int num_topdown_events; /* - * perf task context (i.e. struct perf_event_pmu_context::task_ctx_data) - * switch helper to bridge calls from perf/core to perf/x86. - * See struct pmu::swap_task_ctx() usage for examples; - */ - void (*swap_task_ctx)(struct perf_event_pmu_context *prev_epc, - struct perf_event_pmu_context *next_epc); - - /* * AMD bits */ unsigned int amd_nb_constraints : 1; @@ -1671,9 +1663,6 @@ void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, struct cpu_hw_events *cpuc, struct perf_event *event); -void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, - struct perf_event_pmu_context *next_epc); - void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, struct task_struct *task, bool sched_in);
![]() |