On Mon, Aug 30, 2021 at 02:41:04PM -0700, Song Liu wrote: > diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c > index ac6fd2dabf6a2..d28d0e12c112c 100644 > --- a/arch/x86/events/intel/core.c > +++ b/arch/x86/events/intel/core.c > @@ -2155,9 +2155,9 @@ static void __intel_pmu_disable_all(void) > > static void intel_pmu_disable_all(void) > { > + intel_pmu_lbr_disable_all(); > __intel_pmu_disable_all(); > intel_pmu_pebs_disable_all(); > - intel_pmu_lbr_disable_all(); > } Hurmph... I'm not sure about that, I'd rather you sprinkle a few __always_inline to ensure no actual function is called while you disable things in the correct order. You now still have a hole vs PMI. > +static int > +intel_pmu_snapshot_branch_stack(struct perf_branch_snapshot *br_snapshot) > +{ > + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); Note that this requires preemption is disabled, then look at the call-sites in your next patch and spot the problem... > + > + intel_pmu_disable_all(); > + intel_pmu_lbr_read(); > + memcpy(br_snapshot->entries, cpuc->lbr_entries, > + sizeof(struct perf_branch_entry) * x86_pmu.lbr_nr); > + br_snapshot->nr = x86_pmu.lbr_nr; > + intel_pmu_enable_all(0); > + return 0; > +} > + > /* > * Workaround for: > * Intel Errata AAK100 (model 26) > @@ -6283,9 +6297,15 @@ __init int intel_pmu_init(void) > x86_pmu.lbr_nr = 0; > } > > - if (x86_pmu.lbr_nr) > + if (x86_pmu.lbr_nr) { > pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); > > + /* only support branch_stack snapshot for perfmon >= v2 */ > + if (x86_pmu.disable_all == intel_pmu_disable_all) { > + static_call_update(perf_snapshot_branch_stack, > + intel_pmu_snapshot_branch_stack); } > + } > + > intel_pmu_check_extra_regs(x86_pmu.extra_regs); > > /* Support full width counters using alternative MSR range */ > diff --git a/kernel/events/core.c b/kernel/events/core.c > index 011cc5069b7ba..22807864e913b 100644 > --- a/kernel/events/core.c > +++ b/kernel/events/core.c > @@ -13437,3 +13437,6 @@ struct cgroup_subsys perf_event_cgrp_subsys = { > .threaded = true, > }; > #endif /* CONFIG_CGROUP_PERF */ > + > +DEFINE_STATIC_CALL_RET0(perf_snapshot_branch_stack, > + perf_snapshot_branch_stack_t); I'll squint and accept 82 characters :-)