Clear RDPMC_EXITING in vmcs when all counters on the host side are exposed to guest VM. This gives performance to passthrough PMU. However, when guest does not get all counters, intercept RDPMC to prevent access to unexposed counters. Make decision in vmx_vcpu_after_set_cpuid() when guest enables PMU and passthrough PMU is enabled. Co-developed-by: Xiong Zhang <xiong.y.zhang@xxxxxxxxxxxxxxx> Signed-off-by: Xiong Zhang <xiong.y.zhang@xxxxxxxxxxxxxxx> Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx> Signed-off-by: Dapeng Mi <dapeng1.mi@xxxxxxxxxxxxxxx> Tested-by: Yongwei Ma <yongwei.ma@xxxxxxxxx> --- arch/x86/kvm/pmu.c | 16 ++++++++++++++++ arch/x86/kvm/pmu.h | 1 + arch/x86/kvm/vmx/vmx.c | 5 +++++ 3 files changed, 22 insertions(+) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index e656f72fdace..19104e16a986 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -96,6 +96,22 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops) #undef __KVM_X86_PMU_OP } +bool kvm_pmu_check_rdpmc_passthrough(struct kvm_vcpu *vcpu) +{ + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + + if (is_passthrough_pmu_enabled(vcpu) && + !enable_vmware_backdoor && + pmu->nr_arch_gp_counters == kvm_pmu_cap.num_counters_gp && + pmu->nr_arch_fixed_counters == kvm_pmu_cap.num_counters_fixed && + pmu->counter_bitmask[KVM_PMC_GP] == (((u64)1 << kvm_pmu_cap.bit_width_gp) - 1) && + pmu->counter_bitmask[KVM_PMC_FIXED] == (((u64)1 << kvm_pmu_cap.bit_width_fixed) - 1)) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(kvm_pmu_check_rdpmc_passthrough); + static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi) { struct kvm_pmu *pmu = pmc_to_pmu(pmc); diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index e041c8a23e2f..91941a0f6e47 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -290,6 +290,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); void kvm_pmu_destroy(struct kvm_vcpu *vcpu); int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel); +bool kvm_pmu_check_rdpmc_passthrough(struct kvm_vcpu *vcpu); bool is_vmware_backdoor_pmc(u32 pmc_idx); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4d60a8cf2dd1..339742350b7a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7911,6 +7911,11 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_LC_ENABLED; + if (kvm_pmu_check_rdpmc_passthrough(&vmx->vcpu)) + exec_controls_clearbit(vmx, CPU_BASED_RDPMC_EXITING); + else + exec_controls_setbit(vmx, CPU_BASED_RDPMC_EXITING); + /* Refresh #PF interception to account for MAXPHYADDR changes. */ vmx_update_exception_bitmap(vcpu); } -- 2.46.0.rc1.232.g9752f9e123-goog