From: Like Xu <likexu@xxxxxxxxxxx> Track the global PMU MSRs in pmu_caps so that tests don't need to manually differntiate between AMD and Intel. Although AMD and Intel PMUs have the same semantics in terms of global control features (including ctl and status), their MSR indexes are not the same Signed-off-by: Like Xu <likexu@xxxxxxxxxxx> [sean: drop most getters/setters] Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- lib/x86/pmu.c | 3 +++ lib/x86/pmu.h | 9 +++++++++ x86/pmu.c | 31 +++++++++++++------------------ 3 files changed, 25 insertions(+), 18 deletions(-) diff --git a/lib/x86/pmu.c b/lib/x86/pmu.c index fb9a121e..0a69a3c6 100644 --- a/lib/x86/pmu.c +++ b/lib/x86/pmu.c @@ -24,6 +24,9 @@ void pmu_init(void) pmu.perf_cap = rdmsr(MSR_IA32_PERF_CAPABILITIES); pmu.msr_gp_counter_base = MSR_IA32_PERFCTR0; pmu.msr_gp_event_select_base = MSR_P6_EVNTSEL0; + pmu.msr_global_status = MSR_CORE_PERF_GLOBAL_STATUS; + pmu.msr_global_ctl = MSR_CORE_PERF_GLOBAL_CTRL; + pmu.msr_global_status_clr = MSR_CORE_PERF_GLOBAL_OVF_CTRL; pmu_reset_all_counters(); } diff --git a/lib/x86/pmu.h b/lib/x86/pmu.h index cd81f557..cc643a7f 100644 --- a/lib/x86/pmu.h +++ b/lib/x86/pmu.h @@ -44,6 +44,10 @@ struct pmu_caps { u32 msr_gp_counter_base; u32 msr_gp_event_select_base; + u32 msr_global_status; + u32 msr_global_ctl; + u32 msr_global_status_clr; + u64 perf_cap; }; @@ -124,4 +128,9 @@ static inline void pmu_reset_all_counters(void) pmu_reset_all_fixed_counters(); } +static inline void pmu_clear_global_status(void) +{ + wrmsr(pmu.msr_global_status_clr, rdmsr(pmu.msr_global_status)); +} + #endif /* _X86_PMU_H_ */ diff --git a/x86/pmu.c b/x86/pmu.c index eb83c407..3cca5b9c 100644 --- a/x86/pmu.c +++ b/x86/pmu.c @@ -103,15 +103,12 @@ static struct pmu_event* get_counter_event(pmu_counter_t *cnt) static void global_enable(pmu_counter_t *cnt) { cnt->idx = event_to_global_idx(cnt); - - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) | - (1ull << cnt->idx)); + wrmsr(pmu.msr_global_ctl, rdmsr(pmu.msr_global_ctl) | BIT_ULL(cnt->idx)); } static void global_disable(pmu_counter_t *cnt) { - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) & - ~(1ull << cnt->idx)); + wrmsr(pmu.msr_global_ctl, rdmsr(pmu.msr_global_ctl) & ~BIT_ULL(cnt->idx)); } static void __start_event(pmu_counter_t *evt, uint64_t count) @@ -286,7 +283,7 @@ static void check_counter_overflow(void) overflow_preset = measure_for_overflow(&cnt); /* clear status before test */ - wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_STATUS)); + pmu_clear_global_status(); report_prefix_push("overflow"); @@ -313,10 +310,10 @@ static void check_counter_overflow(void) idx = event_to_global_idx(&cnt); __measure(&cnt, cnt.count); report(cnt.count == 1, "cntr-%d", i); - status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); + status = rdmsr(pmu.msr_global_status); report(status & (1ull << idx), "status-%d", i); - wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, status); - status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); + wrmsr(pmu.msr_global_status_clr, status); + status = rdmsr(pmu.msr_global_status); report(!(status & (1ull << idx)), "status clear-%d", i); report(check_irq() == (i % 2), "irq-%d", i); } @@ -421,8 +418,7 @@ static void check_running_counter_wrmsr(void) report(evt.count < gp_events[1].min, "cntr"); /* clear status before overflow test */ - wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, - rdmsr(MSR_CORE_PERF_GLOBAL_STATUS)); + pmu_clear_global_status(); start_event(&evt); @@ -434,8 +430,8 @@ static void check_running_counter_wrmsr(void) loop(); stop_event(&evt); - status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); - report(status & 1, "status"); + status = rdmsr(pmu.msr_global_status); + report(status & 1, "status msr bit"); report_prefix_pop(); } @@ -455,8 +451,7 @@ static void check_emulated_instr(void) }; report_prefix_push("emulated instruction"); - wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, - rdmsr(MSR_CORE_PERF_GLOBAL_STATUS)); + pmu_clear_global_status(); start_event(&brnch_cnt); start_event(&instr_cnt); @@ -490,7 +485,7 @@ static void check_emulated_instr(void) : : "eax", "ebx", "ecx", "edx"); - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); + wrmsr(pmu.msr_global_ctl, 0); stop_event(&brnch_cnt); stop_event(&instr_cnt); @@ -502,7 +497,7 @@ static void check_emulated_instr(void) report(brnch_cnt.count - brnch_start >= EXPECTED_BRNCH, "branch count"); // Additionally check that those counters overflowed properly. - status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); + status = rdmsr(pmu.msr_global_status); report(status & 1, "branch counter overflow"); report(status & 2, "instruction counter overflow"); @@ -590,7 +585,7 @@ static void set_ref_cycle_expectations(void) if (!pmu.nr_gp_counters || !pmu_gp_counter_is_available(2)) return; - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); + wrmsr(pmu.msr_global_ctl, 0); t0 = fenced_rdtsc(); start_event(&cnt); -- 2.38.1.431.g37b22c650d-goog