[kvm-unit-tests PATCH v4 18/24] x86/pmu: Add a set of helpers related to global registers

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Like Xu <likexu@xxxxxxxxxxx>

Although AMD and Intel's pmu have the same semantics in terms of
global control features (including ctl and status), their msr indexes
are not the same, and the tests can be fully reused by adding helpers.

Signed-off-by: Like Xu <likexu@xxxxxxxxxxx>
---
 lib/x86/pmu.c |  3 +++
 lib/x86/pmu.h | 33 +++++++++++++++++++++++++++++++++
 x86/pmu.c     | 31 +++++++++++++------------------
 3 files changed, 49 insertions(+), 18 deletions(-)

diff --git a/lib/x86/pmu.c b/lib/x86/pmu.c
index 0ce1691..3b6be37 100644
--- a/lib/x86/pmu.c
+++ b/lib/x86/pmu.c
@@ -10,5 +10,8 @@ void pmu_init(void)
         pmu.perf_cap = rdmsr(MSR_IA32_PERF_CAPABILITIES);
     pmu.msr_gp_counter_base = MSR_IA32_PERFCTR0;
     pmu.msr_gp_event_select_base = MSR_P6_EVNTSEL0;
+    pmu.msr_global_status = MSR_CORE_PERF_GLOBAL_STATUS;
+    pmu.msr_global_ctl = MSR_CORE_PERF_GLOBAL_CTRL;
+    pmu.msr_global_status_clr = MSR_CORE_PERF_GLOBAL_OVF_CTRL;
     reset_all_counters();
 }
\ No newline at end of file
diff --git a/lib/x86/pmu.h b/lib/x86/pmu.h
index 564b672..ef83934 100644
--- a/lib/x86/pmu.h
+++ b/lib/x86/pmu.h
@@ -37,6 +37,9 @@ struct pmu_caps {
     u64 perf_cap;
     u32 msr_gp_counter_base;
     u32 msr_gp_event_select_base;
+    u32 msr_global_status;
+    u32 msr_global_ctl;
+    u32 msr_global_status_clr;
 };
 
 extern struct cpuid cpuid_10;
@@ -194,4 +197,34 @@ static inline void reset_all_counters(void)
     reset_all_fixed_counters();
 }
 
+static inline void pmu_clear_global_status(void)
+{
+	wrmsr(pmu.msr_global_status_clr, rdmsr(pmu.msr_global_status));
+}
+
+static inline u64 pmu_get_global_status(void)
+{
+	return rdmsr(pmu.msr_global_status);
+}
+
+static inline u64 pmu_get_global_enable(void)
+{
+	return rdmsr(pmu.msr_global_ctl);
+}
+
+static inline void pmu_set_global_enable(u64 bitmask)
+{
+	wrmsr(pmu.msr_global_ctl, bitmask);
+}
+
+static inline void pmu_reset_global_enable(void)
+{
+	wrmsr(pmu.msr_global_ctl, 0);
+}
+
+static inline void pmu_ack_global_status(u64 value)
+{
+	wrmsr(pmu.msr_global_status_clr, value);
+}
+
 #endif /* _X86_PMU_H_ */
diff --git a/x86/pmu.c b/x86/pmu.c
index 7786b49..015591f 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -103,15 +103,12 @@ static struct pmu_event* get_counter_event(pmu_counter_t *cnt)
 static void global_enable(pmu_counter_t *cnt)
 {
 	cnt->idx = event_to_global_idx(cnt);
-
-	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) |
-			(1ull << cnt->idx));
+	pmu_set_global_enable(pmu_get_global_enable() | BIT_ULL(cnt->idx));
 }
 
 static void global_disable(pmu_counter_t *cnt)
 {
-	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) &
-			~(1ull << cnt->idx));
+	pmu_set_global_enable(pmu_get_global_enable() & ~BIT_ULL(cnt->idx));
 }
 
 static void __start_event(pmu_counter_t *evt, uint64_t count)
@@ -289,7 +286,7 @@ static void check_counter_overflow(void)
 	overflow_preset = measure_for_overflow(&cnt);
 
 	/* clear status before test */
-	wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
+	pmu_clear_global_status();
 
 	report_prefix_push("overflow");
 
@@ -316,10 +313,10 @@ static void check_counter_overflow(void)
 		idx = event_to_global_idx(&cnt);
 		__measure(&cnt, cnt.count);
 		report(cnt.count == 1, "cntr-%d", i);
-		status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
+		status = pmu_get_global_status();
 		report(status & (1ull << idx), "status-%d", i);
-		wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, status);
-		status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
+		pmu_ack_global_status(status);
+		status = pmu_get_global_status();
 		report(!(status & (1ull << idx)), "status clear-%d", i);
 		report(check_irq() == (i % 2), "irq-%d", i);
 	}
@@ -428,8 +425,7 @@ static void check_running_counter_wrmsr(void)
 	report(evt.count < gp_events[1].min, "cntr");
 
 	/* clear status before overflow test */
-	wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL,
-	      rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
+	pmu_clear_global_status();
 
 	start_event(&evt);
 
@@ -441,8 +437,8 @@ static void check_running_counter_wrmsr(void)
 
 	loop();
 	stop_event(&evt);
-	status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
-	report(status & 1, "status");
+	status = pmu_get_global_status();
+	report(status & 1, "status msr bit");
 
 	report_prefix_pop();
 }
@@ -462,8 +458,7 @@ static void check_emulated_instr(void)
 	};
 	report_prefix_push("emulated instruction");
 
-	wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL,
-	      rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
+	pmu_clear_global_status();
 
 	start_event(&brnch_cnt);
 	start_event(&instr_cnt);
@@ -497,7 +492,7 @@ static void check_emulated_instr(void)
 		:
 		: "eax", "ebx", "ecx", "edx");
 
-	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+	pmu_reset_global_enable();
 
 	stop_event(&brnch_cnt);
 	stop_event(&instr_cnt);
@@ -509,7 +504,7 @@ static void check_emulated_instr(void)
 	report(brnch_cnt.count - brnch_start >= EXPECTED_BRNCH,
 	       "branch count");
 	// Additionally check that those counters overflowed properly.
-	status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
+	status = pmu_get_global_status();
 	report(status & 1, "branch counter overflow");
 	report(status & 2, "instruction counter overflow");
 
@@ -598,7 +593,7 @@ static void set_ref_cycle_expectations(void)
 	if (!pmu_nr_gp_counters() || !pmu_gp_counter_is_available(2))
 		return;
 
-	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+	pmu_reset_global_enable();
 
 	t0 = fenced_rdtsc();
 	start_event(&cnt);
-- 
2.38.1




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux