[PATCH v3 02/11] KVM: selftests: Add pmu.h for PMU events and common masks

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Jinrong Liang <cloudliang@xxxxxxxxxxx>

By defining the PMU performance events and masks relevant for x86 in
the new pmu.h header, it becomes easier to reference them, minimizing
potential errors in code that handles these values.

Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx>
Signed-off-by: Jinrong Liang <cloudliang@xxxxxxxxxxx>
---
 .../selftests/kvm/include/x86_64/pmu.h        | 124 ++++++++++++++++++
 1 file changed, 124 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/include/x86_64/pmu.h

diff --git a/tools/testing/selftests/kvm/include/x86_64/pmu.h b/tools/testing/selftests/kvm/include/x86_64/pmu.h
new file mode 100644
index 000000000000..eb60b2065fac
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86_64/pmu.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * tools/testing/selftests/kvm/include/x86_64/pmu.h
+ *
+ * Copyright (C) 2023, Tencent, Inc.
+ */
+#ifndef SELFTEST_KVM_PMU_H
+#define SELFTEST_KVM_PMU_H
+
+#include "processor.h"
+
+#define GP_COUNTER_NR_OFS_BIT			8
+#define EVENT_LENGTH_OFS_BIT			24
+#define INTEL_PMC_IDX_FIXED			32
+
+#define AMD64_NR_COUNTERS			4
+#define AMD64_NR_COUNTERS_CORE			6
+
+#define PMU_CAP_FW_WRITES			BIT_ULL(13)
+#define RDPMC_FIXED_BASE			BIT_ULL(30)
+
+#define ARCH_PERFMON_EVENTSEL_EVENT		GENMASK_ULL(7, 0)
+#define ARCH_PERFMON_EVENTSEL_UMASK		GENMASK_ULL(15, 8)
+#define ARCH_PERFMON_EVENTSEL_USR		BIT_ULL(16)
+#define ARCH_PERFMON_EVENTSEL_OS		BIT_ULL(17)
+
+#define ARCH_PERFMON_EVENTSEL_EDGE		BIT_ULL(18)
+#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL	BIT_ULL(19)
+#define ARCH_PERFMON_EVENTSEL_INT		BIT_ULL(20)
+#define ARCH_PERFMON_EVENTSEL_ANY		BIT_ULL(21)
+#define ARCH_PERFMON_EVENTSEL_ENABLE		BIT_ULL(22)
+#define ARCH_PERFMON_EVENTSEL_INV		BIT_ULL(23)
+#define ARCH_PERFMON_EVENTSEL_CMASK		GENMASK_ULL(31, 24)
+
+#define PMU_VERSION_MASK			GENMASK_ULL(7, 0)
+#define EVENT_LENGTH_MASK			GENMASK_ULL(31, EVENT_LENGTH_OFS_BIT)
+#define GP_COUNTER_NR_MASK			GENMASK_ULL(15, GP_COUNTER_NR_OFS_BIT)
+#define FIXED_COUNTER_NR_MASK			GENMASK_ULL(4, 0)
+
+/* Definitions for Architectural Performance Events */
+#define ARCH_EVENT(select, umask) (((select) & 0xff) | ((umask) & 0xff) << 8)
+
+enum intel_pmu_architectural_events {
+	/*
+	 * The order of the architectural events matters as support for each
+	 * event is enumerated via CPUID using the index of the event.
+	 */
+	INTEL_ARCH_CPU_CYCLES,
+	INTEL_ARCH_INSTRUCTIONS_RETIRED,
+	INTEL_ARCH_REFERENCE_CYCLES,
+	INTEL_ARCH_LLC_REFERENCES,
+	INTEL_ARCH_LLC_MISSES,
+	INTEL_ARCH_BRANCHES_RETIRED,
+	INTEL_ARCH_BRANCHES_MISPREDICTED,
+
+	NR_REAL_INTEL_ARCH_EVENTS,
+
+	/*
+	 * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a.
+	 * TSC reference cycles. The architectural reference cycles event may
+	 * or may not actually use the TSC as the reference, e.g. might use the
+	 * core crystal clock or the bus clock (yeah, "architectural").
+	 */
+	PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS,
+	NR_INTEL_ARCH_EVENTS,
+};
+
+static const uint64_t intel_arch_events[] = {
+	[INTEL_ARCH_CPU_CYCLES]			= ARCH_EVENT(0x3c, 0x0),
+	[INTEL_ARCH_INSTRUCTIONS_RETIRED]	= ARCH_EVENT(0xc0, 0x0),
+	[INTEL_ARCH_REFERENCE_CYCLES]		= ARCH_EVENT(0x3c, 0x1),
+	[INTEL_ARCH_LLC_REFERENCES]		= ARCH_EVENT(0x2e, 0x4f),
+	[INTEL_ARCH_LLC_MISSES]			= ARCH_EVENT(0x2e, 0x41),
+	[INTEL_ARCH_BRANCHES_RETIRED]		= ARCH_EVENT(0xc4, 0x0),
+	[INTEL_ARCH_BRANCHES_MISPREDICTED]	= ARCH_EVENT(0xc5, 0x0),
+	[PSEUDO_ARCH_REFERENCE_CYCLES]		= ARCH_EVENT(0xa4, 0x1),
+};
+
+/* mapping between fixed pmc index and intel_arch_events array */
+static const int fixed_pmc_events[] = {
+	[0] = INTEL_ARCH_INSTRUCTIONS_RETIRED,
+	[1] = INTEL_ARCH_CPU_CYCLES,
+	[2] = PSEUDO_ARCH_REFERENCE_CYCLES,
+};
+
+enum amd_pmu_k7_events {
+	AMD_ZEN_CORE_CYCLES,
+	AMD_ZEN_INSTRUCTIONS,
+	AMD_ZEN_BRANCHES,
+	AMD_ZEN_BRANCH_MISSES,
+};
+
+static const uint64_t amd_arch_events[] = {
+	[AMD_ZEN_CORE_CYCLES]			= ARCH_EVENT(0x76, 0x00),
+	[AMD_ZEN_INSTRUCTIONS]			= ARCH_EVENT(0xc0, 0x00),
+	[AMD_ZEN_BRANCHES]			= ARCH_EVENT(0xc2, 0x00),
+	[AMD_ZEN_BRANCH_MISSES]			= ARCH_EVENT(0xc3, 0x00),
+};
+
+static inline bool arch_event_is_supported(struct kvm_vcpu *vcpu,
+					   uint8_t arch_event)
+{
+	struct kvm_cpuid_entry2 *entry;
+
+	entry = vcpu_get_cpuid_entry(vcpu, 0xa);
+
+	return !(entry->ebx & BIT_ULL(arch_event)) &&
+		(kvm_cpuid_property(vcpu->cpuid,
+		 X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH) > arch_event);
+}
+
+static inline bool fixed_counter_is_supported(struct kvm_vcpu *vcpu,
+					      uint8_t fixed_counter_idx)
+{
+	struct kvm_cpuid_entry2 *entry;
+
+	entry = vcpu_get_cpuid_entry(vcpu, 0xa);
+
+	return (entry->ecx & BIT_ULL(fixed_counter_idx) ||
+		(kvm_cpuid_property(vcpu->cpuid, X86_PROPERTY_PMU_NR_FIXED_COUNTERS) >
+		 fixed_counter_idx));
+}
+
+#endif /* SELFTEST_KVM_PMU_H */
-- 
2.39.3




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux