From: Jinrong Liang <cloudliang@xxxxxxxxxxx> To introduce a new pmu.h header file under tools/testing/selftests/kvm/include/x86_64 directory to better organize the PMU performance event constants and common masks. It will enhance the maintainability and readability of the KVM selftests code. In the new pmu.h header, to define the PMU performance events and masks that are relevant for x86_64, allowing developers to easily reference them and minimize potential errors in code that handles these values. Signed-off-by: Jinrong Liang <cloudliang@xxxxxxxxxxx> --- .../selftests/kvm/include/x86_64/pmu.h | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 tools/testing/selftests/kvm/include/x86_64/pmu.h diff --git a/tools/testing/selftests/kvm/include/x86_64/pmu.h b/tools/testing/selftests/kvm/include/x86_64/pmu.h new file mode 100644 index 000000000000..0e0111b11024 --- /dev/null +++ b/tools/testing/selftests/kvm/include/x86_64/pmu.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * tools/testing/selftests/kvm/include/x86_64/pmu.h + * + * Copyright (C) 2023, Tencent, Inc. + */ +#ifndef _PMU_H_ +#define _PMU_H_ + +#include "processor.h" + +#define GP_CTR_NUM_OFS_BIT 8 +#define EVT_LEN_OFS_BIT 24 +#define INTEL_PMC_IDX_FIXED 32 + +#define PMU_CAP_FW_WRITES BIT_ULL(13) +#define EVENTSEL_OS BIT_ULL(17) +#define EVENTSEL_ANY BIT_ULL(21) +#define EVENTSEL_EN BIT_ULL(22) +#define RDPMC_FIXED_BASE BIT_ULL(30) + +#define PMU_VERSION_MASK GENMASK_ULL(7, 0) +#define EVENTS_MASK GENMASK_ULL(7, 0) +#define EVT_LEN_MASK GENMASK_ULL(31, EVT_LEN_OFS_BIT) +#define GP_CTR_NUM_MASK GENMASK_ULL(15, GP_CTR_NUM_OFS_BIT) +#define FIXED_CTR_NUM_MASK GENMASK_ULL(4, 0) + +#define X86_INTEL_PMU_VERSION kvm_cpu_property(X86_PROPERTY_PMU_VERSION) +#define X86_INTEL_MAX_GP_CTR_NUM kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) +#define X86_INTEL_MAX_FIXED_CTR_NUM kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS) +#define X86_INTEL_FIXED_CTRS_BITMASK kvm_cpu_property(X86_PROPERTY_PMU_FIXED_CTRS_BITMASK) + +/* Definitions for Architectural Performance Events */ +#define ARCH_EVENT(select, umask) (((select) & 0xff) | ((umask) & 0xff) << 8) + +/* Intel Pre-defined Architectural Performance Events */ +static const uint64_t arch_events[] = { + [0] = ARCH_EVENT(0x3c, 0x0), + [1] = ARCH_EVENT(0xc0, 0x0), + [2] = ARCH_EVENT(0x3c, 0x1), + [3] = ARCH_EVENT(0x2e, 0x4f), + [4] = ARCH_EVENT(0x2e, 0x41), + [5] = ARCH_EVENT(0xc4, 0x0), + [6] = ARCH_EVENT(0xc5, 0x0), + [7] = ARCH_EVENT(0xa4, 0x1), +}; + +/* Association of Fixed Counters with Architectural Performance Events */ +static int fixed_events[] = {1, 0, 7}; + +static inline uint64_t evt_code_for_fixed_ctr(uint8_t idx) +{ + return arch_events[fixed_events[idx]]; +} + +#endif /* _PMU_H_ */ -- 2.31.1