[kvm-unit-tests PATCH v5 23/27] x86/pmu: Add global helpers to cover Intel Arch PMU Version 1

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Like Xu <likexu@xxxxxxxxxxx>

To test Intel arch pmu version 1, most of the basic framework and
use cases which test any PMU counter do not require any changes,
except no access to registers introduced only in PMU version 2.

Adding some guardian's checks can seamlessly support version 1,
while opening the door for normal AMD PMUs tests.

Signed-off-by: Like Xu <likexu@xxxxxxxxxxx>
Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
 lib/x86/pmu.c |  9 ++++++---
 lib/x86/pmu.h |  5 +++++
 x86/pmu.c     | 47 +++++++++++++++++++++++++++++++----------------
 3 files changed, 42 insertions(+), 19 deletions(-)

diff --git a/lib/x86/pmu.c b/lib/x86/pmu.c
index 0a69a3c6..ea4859df 100644
--- a/lib/x86/pmu.c
+++ b/lib/x86/pmu.c
@@ -24,9 +24,12 @@ void pmu_init(void)
 		pmu.perf_cap = rdmsr(MSR_IA32_PERF_CAPABILITIES);
 	pmu.msr_gp_counter_base = MSR_IA32_PERFCTR0;
 	pmu.msr_gp_event_select_base = MSR_P6_EVNTSEL0;
-	pmu.msr_global_status = MSR_CORE_PERF_GLOBAL_STATUS;
-	pmu.msr_global_ctl = MSR_CORE_PERF_GLOBAL_CTRL;
-	pmu.msr_global_status_clr = MSR_CORE_PERF_GLOBAL_OVF_CTRL;
+
+	if (this_cpu_has_perf_global_status()) {
+		pmu.msr_global_status = MSR_CORE_PERF_GLOBAL_STATUS;
+		pmu.msr_global_ctl = MSR_CORE_PERF_GLOBAL_CTRL;
+		pmu.msr_global_status_clr = MSR_CORE_PERF_GLOBAL_OVF_CTRL;
+	}
 
 	pmu_reset_all_counters();
 }
diff --git a/lib/x86/pmu.h b/lib/x86/pmu.h
index 885b53f1..e2c0bdf4 100644
--- a/lib/x86/pmu.h
+++ b/lib/x86/pmu.h
@@ -89,6 +89,11 @@ static inline bool this_cpu_has_perf_global_ctrl(void)
 	return pmu.version > 1;
 }
 
+static inline bool this_cpu_has_perf_global_status(void)
+{
+	return pmu.version > 1;
+}
+
 static inline bool pmu_gp_counter_is_available(int i)
 {
 	return pmu.gp_counter_available & BIT(i);
diff --git a/x86/pmu.c b/x86/pmu.c
index 3cca5b9c..7f200658 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -102,12 +102,18 @@ static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
 
 static void global_enable(pmu_counter_t *cnt)
 {
+	if (!this_cpu_has_perf_global_ctrl())
+		return;
+
 	cnt->idx = event_to_global_idx(cnt);
 	wrmsr(pmu.msr_global_ctl, rdmsr(pmu.msr_global_ctl) | BIT_ULL(cnt->idx));
 }
 
 static void global_disable(pmu_counter_t *cnt)
 {
+	if (!this_cpu_has_perf_global_ctrl())
+		return;
+
 	wrmsr(pmu.msr_global_ctl, rdmsr(pmu.msr_global_ctl) & ~BIT_ULL(cnt->idx));
 }
 
@@ -283,7 +289,8 @@ static void check_counter_overflow(void)
 	overflow_preset = measure_for_overflow(&cnt);
 
 	/* clear status before test */
-	pmu_clear_global_status();
+	if (this_cpu_has_perf_global_status())
+		pmu_clear_global_status();
 
 	report_prefix_push("overflow");
 
@@ -310,6 +317,10 @@ static void check_counter_overflow(void)
 		idx = event_to_global_idx(&cnt);
 		__measure(&cnt, cnt.count);
 		report(cnt.count == 1, "cntr-%d", i);
+
+		if (!this_cpu_has_perf_global_status())
+			continue;
+
 		status = rdmsr(pmu.msr_global_status);
 		report(status & (1ull << idx), "status-%d", i);
 		wrmsr(pmu.msr_global_status_clr, status);
@@ -418,7 +429,8 @@ static void check_running_counter_wrmsr(void)
 	report(evt.count < gp_events[1].min, "cntr");
 
 	/* clear status before overflow test */
-	pmu_clear_global_status();
+	if (this_cpu_has_perf_global_status())
+		pmu_clear_global_status();
 
 	start_event(&evt);
 
@@ -430,8 +442,11 @@ static void check_running_counter_wrmsr(void)
 
 	loop();
 	stop_event(&evt);
-	status = rdmsr(pmu.msr_global_status);
-	report(status & 1, "status msr bit");
+
+	if (this_cpu_has_perf_global_status()) {
+		status = rdmsr(pmu.msr_global_status);
+		report(status & 1, "status msr bit");
+	}
 
 	report_prefix_pop();
 }
@@ -451,7 +466,8 @@ static void check_emulated_instr(void)
 	};
 	report_prefix_push("emulated instruction");
 
-	pmu_clear_global_status();
+	if (this_cpu_has_perf_global_status())
+		pmu_clear_global_status();
 
 	start_event(&brnch_cnt);
 	start_event(&instr_cnt);
@@ -485,7 +501,8 @@ static void check_emulated_instr(void)
 		:
 		: "eax", "ebx", "ecx", "edx");
 
-	wrmsr(pmu.msr_global_ctl, 0);
+	if (this_cpu_has_perf_global_ctrl())
+		wrmsr(pmu.msr_global_ctl, 0);
 
 	stop_event(&brnch_cnt);
 	stop_event(&instr_cnt);
@@ -496,10 +513,12 @@ static void check_emulated_instr(void)
 	       "instruction count");
 	report(brnch_cnt.count - brnch_start >= EXPECTED_BRNCH,
 	       "branch count");
-	// Additionally check that those counters overflowed properly.
-	status = rdmsr(pmu.msr_global_status);
-	report(status & 1, "branch counter overflow");
-	report(status & 2, "instruction counter overflow");
+	if (this_cpu_has_perf_global_status()) {
+		// Additionally check that those counters overflowed properly.
+		status = rdmsr(pmu.msr_global_status);
+		report(status & 1, "branch counter overflow");
+		report(status & 2, "instruction counter overflow");
+	}
 
 	report_prefix_pop();
 }
@@ -585,7 +604,8 @@ static void set_ref_cycle_expectations(void)
 	if (!pmu.nr_gp_counters || !pmu_gp_counter_is_available(2))
 		return;
 
-	wrmsr(pmu.msr_global_ctl, 0);
+	if (this_cpu_has_perf_global_ctrl())
+		wrmsr(pmu.msr_global_ctl, 0);
 
 	t0 = fenced_rdtsc();
 	start_event(&cnt);
@@ -636,11 +656,6 @@ int main(int ac, char **av)
 		return report_summary();
 	}
 
-	if (pmu.version == 1) {
-		report_skip("PMU version 1 is not supported.");
-		return report_summary();
-	}
-
 	set_ref_cycle_expectations();
 
 	printf("PMU version:         %d\n", pmu.version);
-- 
2.38.1.431.g37b22c650d-goog




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux