Excluding existing vLBR logic from the passthrough PMU because the it does not support LBR related MSRs. So to avoid any side effect, do not call vLBR related code in both vcpu_enter_guest() and pmi injection function. Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx> Signed-off-by: Dapeng Mi <dapeng1.mi@xxxxxxxxxxxxxxx> Tested-by: Yongwei Ma <yongwei.ma@xxxxxxxxx> --- arch/x86/kvm/vmx/pmu_intel.c | 13 ++++++++----- arch/x86/kvm/vmx/vmx.c | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index c61936266cbd..40c503cd263b 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -660,13 +660,16 @@ static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu) static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu) { - u8 version = vcpu_to_pmu(vcpu)->version; + u8 version; - if (!intel_pmu_lbr_is_enabled(vcpu)) - return; + if (!is_passthrough_pmu_enabled(vcpu)) { + if (!intel_pmu_lbr_is_enabled(vcpu)) + return; - if (version > 1 && version < 4) - intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu); + version = vcpu_to_pmu(vcpu)->version; + if (version > 1 && version < 4) + intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu); + } } static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b126de6569c8..a4b2b0b69a68 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7561,7 +7561,7 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit) pt_guest_enter(vmx); atomic_switch_perf_msrs(vmx); - if (intel_pmu_lbr_is_enabled(vcpu)) + if (!is_passthrough_pmu_enabled(&vmx->vcpu) && intel_pmu_lbr_is_enabled(vcpu)) vmx_passthrough_lbr_msrs(vcpu); if (enable_preemption_timer) -- 2.46.0.rc1.232.g9752f9e123-goog