From: Like Xu <likexu@xxxxxxxxxxx> Add test cases to check if different Architectural events are available after it's marked as unavailable via CPUID. It covers vPMU event filtering logic based on Intel CPUID, which is a complement to pmu_event_filter. According to Intel SDM, the number of architectural events is reported through CPUID.0AH:EAX[31:24] and the architectural event x is supported if EBX[x]=0 && EAX[31:24]>x. Co-developed-by: Jinrong Liang <cloudliang@xxxxxxxxxxx> Signed-off-by: Jinrong Liang <cloudliang@xxxxxxxxxxx> Signed-off-by: Like Xu <likexu@xxxxxxxxxxx> --- tools/testing/selftests/kvm/Makefile | 1 + .../kvm/x86_64/pmu_basic_functionality_test.c | 168 ++++++++++++++++++ 2 files changed, 169 insertions(+) create mode 100644 tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 18cadc669798..f636968709c4 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -78,6 +78,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test TEST_GEN_PROGS_x86_64 += x86_64/monitor_mwait_test TEST_GEN_PROGS_x86_64 += x86_64/nested_exceptions_test TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test +TEST_GEN_PROGS_x86_64 += x86_64/pmu_basic_functionality_test TEST_GEN_PROGS_x86_64 += x86_64/pmu_event_filter_test TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test diff --git a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c new file mode 100644 index 000000000000..1f100fd94d67 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test the consistency of the PMU's CPUID and its features + * + * Copyright (C) 2023, Tencent, Inc. + * + * Check that the VM's PMU behaviour is consistent with the + * VM CPUID definition. + */ + +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include <x86intrin.h> + +#include "pmu.h" + +/* Guest payload for any performance counter counting */ +#define NUM_BRANCHES 10 + +static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, + void *guest_code) +{ + struct kvm_vm *vm; + + vm = vm_create_with_one_vcpu(vcpu, guest_code); + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(*vcpu); + + return vm; +} + +static uint64_t run_vcpu(struct kvm_vcpu *vcpu, uint64_t *ucall_arg) +{ + struct ucall uc; + + vcpu_run(vcpu); + switch (get_ucall(vcpu, &uc)) { + case UCALL_SYNC: + *ucall_arg = uc.args[1]; + break; + case UCALL_DONE: + break; + default: + TEST_ASSERT(false, "Unexpected exit: %s", + exit_reason_str(vcpu->run->exit_reason)); + } + return uc.cmd; +} + +static void intel_guest_run_arch_event(uint8_t version, uint8_t max_gp_num, + uint32_t ctr_base_msr, uint64_t evt_code) +{ + uint32_t global_msr = MSR_CORE_PERF_GLOBAL_CTRL; + unsigned int i; + + for (i = 0; i < max_gp_num; i++) { + wrmsr(ctr_base_msr + i, 0); + wrmsr(MSR_P6_EVNTSEL0 + i, EVENTSEL_OS | EVENTSEL_EN | evt_code); + if (version > 1) + wrmsr(global_msr, BIT_ULL(i)); + + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); + + if (version > 1) + wrmsr(global_msr, 0); + + GUEST_SYNC(_rdpmc(i)); + } + + GUEST_DONE(); +} + +static void test_arch_events_cpuid(struct kvm_vcpu *vcpu, uint8_t evt_vector, + uint8_t unavl_mask, uint8_t idx) +{ + struct kvm_cpuid_entry2 *entry; + uint32_t ctr_msr = MSR_IA32_PERFCTR0; + bool is_supported; + uint64_t counter_val = 0; + + entry = vcpu_get_cpuid_entry(vcpu, 0xa); + entry->eax = (entry->eax & ~EVT_LEN_MASK) | + (evt_vector << EVT_LEN_OFS_BIT); + entry->ebx = (entry->ebx & ~EVENTS_MASK) | unavl_mask; + vcpu_set_cpuid(vcpu); + + if (vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES) + ctr_msr = MSR_IA32_PMC0; + + /* Arch event x is supported if EBX[x]=0 && EAX[31:24]>x */ + is_supported = !(entry->ebx & BIT_ULL(idx)) && + (((entry->eax & EVT_LEN_MASK) >> EVT_LEN_OFS_BIT) > idx); + + vcpu_args_set(vcpu, 4, X86_INTEL_PMU_VERSION, X86_INTEL_MAX_GP_CTR_NUM, + ctr_msr, arch_events[idx]); + + while (run_vcpu(vcpu, &counter_val) != UCALL_DONE) + TEST_ASSERT(is_supported == !!counter_val, + "Unavailable arch event is counting."); +} + +static void intel_check_arch_event_is_unavl(uint8_t idx) +{ + uint8_t eax_evt_vec, ebx_unavl_mask, i, j; + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + + /* + * A brute force iteration of all combinations of values is likely to + * exhaust the limit of the single-threaded thread fd nums, so it's + * tested here by iterating through all valid values on a single bit. + */ + for (i = 0; i < ARRAY_SIZE(arch_events); i++) { + eax_evt_vec = BIT_ULL(i); + for (j = 0; j < ARRAY_SIZE(arch_events); j++) { + ebx_unavl_mask = BIT_ULL(j); + vm = pmu_vm_create_with_one_vcpu(&vcpu, + intel_guest_run_arch_event); + test_arch_events_cpuid(vcpu, eax_evt_vec, + ebx_unavl_mask, idx); + + kvm_vm_free(vm); + } + } +} + +static void intel_test_arch_events(void) +{ + uint8_t idx; + + for (idx = 0; idx < ARRAY_SIZE(arch_events); idx++) { + /* + * Given the stability of performance event recurrence, + * only these arch events are currently being tested: + * + * - Core cycle event (idx = 0) + * - Instruction retired event (idx = 1) + * - Reference cycles event (idx = 2) + * - Branch instruction retired event (idx = 5) + * + * Note that reference cycles is one event that actually cannot + * be successfully virtualized. + */ + if (idx > 2 && idx != 5) + continue; + + intel_check_arch_event_is_unavl(idx); + } +} + +static void intel_test_pmu_cpuid(void) +{ + intel_test_arch_events(); +} + +int main(int argc, char *argv[]) +{ + TEST_REQUIRE(get_kvm_param_bool("enable_pmu")); + + if (host_cpu_is_intel) { + TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION)); + TEST_REQUIRE(X86_INTEL_PMU_VERSION > 0); + TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM)); + + intel_test_pmu_cpuid(); + } + + return 0; +} -- 2.31.1