Branch in main() depending on if the CPU is Intel or AMD. They are subject to vastly different requirements because the AMD PMU lacks many properties defined by the Intel PMU including the entire CPUID 0xa function where Intel stores all the PMU properties. AMD lacks this as well as any consistent notion of PMU versions as Intel does. Every feature is a separate flag and they aren't the same features as Intel. Set up a VM for testing core AMD counters and ensure proper CPUID features are set. Signed-off-by: Colton Lewis <coltonlewis@xxxxxxxxxx> --- .../selftests/kvm/x86_64/pmu_counters_test.c | 104 ++++++++++++++---- 1 file changed, 83 insertions(+), 21 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c index 0e305e43a93b..5b240585edc5 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -30,10 +30,21 @@ #define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS) +/* + * Limit testing to MSRs that are actually defined by Intel (in the SDM). MSRs + * that aren't defined counter MSRs *probably* don't exist, but there's no + * guarantee that currently undefined MSR indices won't be used for something + * other than PMCs in the future. + */ +#define MAX_NR_GP_COUNTERS 8 +#define MAX_NR_FIXED_COUNTERS 3 +#define AMD_NR_CORE_COUNTERS 4 +#define AMD_NR_CORE_EXT_COUNTERS 6 + static uint8_t kvm_pmu_version; static bool kvm_has_perf_caps; -static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, +static struct kvm_vm *intel_pmu_vm_create(struct kvm_vcpu **vcpu, void *guest_code, uint8_t pmu_version, uint64_t perf_capabilities) @@ -303,7 +314,7 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities, if (!pmu_version) return; - vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events, + vm = intel_pmu_vm_create(&vcpu, guest_test_arch_events, pmu_version, perf_capabilities); vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH, @@ -316,15 +327,6 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities, kvm_vm_free(vm); } -/* - * Limit testing to MSRs that are actually defined by Intel (in the SDM). MSRs - * that aren't defined counter MSRs *probably* don't exist, but there's no - * guarantee that currently undefined MSR indices won't be used for something - * other than PMCs in the future. - */ -#define MAX_NR_GP_COUNTERS 8 -#define MAX_NR_FIXED_COUNTERS 3 - #define GUEST_ASSERT_PMC_MSR_ACCESS(insn, msr, expect_gp, vector) \ __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \ "Expected %s on " #insn "(0x%x), got vector %u", \ @@ -463,7 +465,7 @@ static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities, struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_gp_counters, + vm = intel_pmu_vm_create(&vcpu, guest_test_gp_counters, pmu_version, perf_capabilities); vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_GP_COUNTERS, @@ -530,7 +532,7 @@ static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities, struct kvm_vcpu *vcpu; struct kvm_vm *vm; - vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_fixed_counters, + vm = intel_pmu_vm_create(&vcpu, guest_test_fixed_counters, pmu_version, perf_capabilities); vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK, @@ -627,18 +629,78 @@ static void test_intel_counters(void) } } -int main(int argc, char *argv[]) +static uint8_t nr_core_counters(void) { - TEST_REQUIRE(kvm_is_pmu_enabled()); + uint8_t nr_counters = kvm_cpu_property(X86_PROPERTY_NUM_PERF_CTR_CORE); + bool core_ext = kvm_cpu_has(X86_FEATURE_PERF_CTR_EXT_CORE); + + if (nr_counters != 0) + return nr_counters; + + if (core_ext) + return AMD_NR_CORE_EXT_COUNTERS; - TEST_REQUIRE(host_cpu_is_intel); - TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION)); - TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0); + return AMD_NR_CORE_COUNTERS; - kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); - kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM); +} + +static void guest_test_core_counters(void) +{ + GUEST_DONE(); +} + +static void test_core_counters(void) +{ + uint8_t nr_counters = nr_core_counters(); + bool core_ext = kvm_cpu_has(X86_FEATURE_PERF_CTR_EXT_CORE); + bool perfmon_v2 = kvm_cpu_has(X86_FEATURE_PERFMON_V2); + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + + for (uint8_t ce = 0; ce <= core_ext; ce++) { + for (uint8_t pm = 0; pm <= perfmon_v2; pm++) { + for (uint8_t nc = 0; nc <= nr_counters; nc++) { + vm = vm_create_with_one_vcpu(&vcpu, guest_test_core_counters); + + if (nc) + vcpu_set_cpuid_property( + vcpu, X86_PROPERTY_NUM_PERF_CTR_CORE, nc); + if (ce) + vcpu_set_cpuid_feature( + vcpu, X86_FEATURE_PERF_CTR_EXT_CORE); + if (pm) + vcpu_set_cpuid_feature( + vcpu, X86_FEATURE_PERFMON_V2); + + pr_info("Testing core counters: CoreExt = %u, PerfMonV2 = %u, NumCounters = %u\n", + ce, pm, nc); + run_vcpu(vcpu); + + kvm_vm_free(vm); + } + } + } +} + +static void test_amd_counters(void) +{ + test_core_counters(); +} - test_intel_counters(); +int main(int argc, char *argv[]) +{ + TEST_REQUIRE(kvm_is_pmu_enabled()); + + if (host_cpu_is_intel) { + TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION)); + TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0); + kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION); + kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM); + test_intel_counters(); + } else if (host_cpu_is_amd) { + /* AMD CPUs don't have the same properties to look at. */ + test_amd_counters(); + } return 0; } -- 2.46.0.662.g92d0881bb0-goog