From: Jinrong Liang <cloudliang@xxxxxxxxxxx> KVM user sapce may control the Intel guest PMU version number via CPUID.0AH:EAX[07:00]. A test is added to check if a typical PMU register that is not available at the current version number is leaking. Co-developed-by: Like Xu <likexu@xxxxxxxxxxx> Signed-off-by: Like Xu <likexu@xxxxxxxxxxx> Signed-off-by: Jinrong Liang <cloudliang@xxxxxxxxxxx> --- .../selftests/kvm/x86_64/pmu_cpuid_test.c | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c index 79f2e144c6c6..caf0d98079c7 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_cpuid_test.c @@ -17,6 +17,7 @@ #define NUM_BRANCHES 10 #define EVENTSEL_OS BIT_ULL(17) +#define EVENTSEL_ANY BIT_ULL(21) #define EVENTSEL_EN BIT_ULL(22) #define PMU_CAP_FW_WRITES BIT_ULL(13) #define EVENTS_MASK GENMASK_ULL(7, 0) @@ -90,6 +91,14 @@ static uint32_t kvm_fixed_ctrs_bitmask(void) return kvm_entry->ecx; } +static uint32_t kvm_max_pmu_version(void) +{ + const struct kvm_cpuid_entry2 *kvm_entry; + + kvm_entry = get_cpuid_entry(kvm_get_supported_cpuid(), 0xa, 0); + return kvm_entry->eax & PMU_VERSION_MASK; +} + static struct kvm_vcpu *new_vcpu(void *guest_code) { struct kvm_vm *vm; @@ -220,6 +229,25 @@ static void intel_guest_run_fixed_counters(uint64_t supported_bitmask, GUEST_DONE(); } +static void intel_guest_check_pmu_version(uint8_t version) +{ + switch (version) { + case 0: + wrmsr(MSR_INTEL_ARCH_PMU_GPCTR, 0xffffull); + case 1: + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1ull); + case 2: + /* AnyThread Bit is only supported in version 3 */ + wrmsr(MSR_P6_EVNTSEL0, EVENTSEL_ANY); + break; + default: + /* KVM currently supports up to pmu version 2 */ + GUEST_SYNC(GP_VECTOR); + } + + GUEST_DONE(); +} + static void test_arch_events_setup(struct kvm_vcpu *vcpu, uint8_t evt_vector, uint8_t unavl_mask, uint8_t idx) { @@ -341,6 +369,18 @@ static void intel_test_fixed_counters(void) } } +static void test_pmu_version_setup(struct kvm_vcpu *vcpu, uint8_t version) +{ + struct kvm_cpuid_entry2 *entry; + + entry = vcpu_get_cpuid_entry(vcpu, 0xa); + entry->eax = (entry->eax & ~PMU_VERSION_MASK) | version; + vcpu_set_cpuid(vcpu); + + vcpu_args_set(vcpu, 1, version); + vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler); +} + static void intel_check_arch_event_is_unavl(uint8_t idx) { const char *msg = "Unavailable arch event is counting."; @@ -441,11 +481,28 @@ static void intel_test_arch_events(void) } } +static void intel_test_pmu_version(void) +{ + const char *msg = "Something beyond this PMU version is leaked."; + uint8_t version, unsupported_version = kvm_max_pmu_version() + 1; + struct kvm_vcpu *vcpu; + + TEST_REQUIRE(kvm_gp_ctrs_num() > 2); + + for (version = 0; version <= unsupported_version; version++) { + vcpu = new_vcpu(intel_guest_check_pmu_version); + test_pmu_version_setup(vcpu, version); + run_vcpu(vcpu, msg, first_uc_arg_equals, (void *)GP_VECTOR); + free_vcpu(vcpu); + } +} + static void intel_test_pmu_cpuid(void) { intel_test_arch_events(); intel_test_counters_num(); intel_test_fixed_counters(); + intel_test_pmu_version(); } int main(int argc, char *argv[]) -- 2.40.0