If host is using MSR_ARCH_LBR_CTL then save it before vm-entry and reload it after vm-exit. Co-developed-by: Like Xu <like.xu@xxxxxxxxxxxxxxx> Signed-off-by: Like Xu <like.xu@xxxxxxxxxxxxxxx> Signed-off-by: Yang Weijiang <weijiang.yang@xxxxxxxxx> --- arch/x86/kvm/vmx/vmx.c | 23 +++++++++++++++++++++++ arch/x86/kvm/vmx/vmx.h | 1 + 2 files changed, 24 insertions(+) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 1a79ac1757af..0d714e76e2d5 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1397,6 +1397,26 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, decache_tsc_multiplier(vmx); } +static inline unsigned long get_lbrctlmsr(void) +{ + unsigned long lbrctlmsr = 0; + + if (!static_cpu_has(X86_FEATURE_ARCH_LBR)) + return 0; + + rdmsrl(MSR_ARCH_LBR_CTL, lbrctlmsr); + + return lbrctlmsr; +} + +static inline void update_lbrctlmsr(unsigned long lbrctlmsr) +{ + if (!static_cpu_has(X86_FEATURE_ARCH_LBR)) + return; + + wrmsrl(MSR_ARCH_LBR_CTL, lbrctlmsr); +} + /* * Switches to specified vcpu, until a matching vcpu_put(), but assumes * vcpu mutex is already taken. @@ -1410,6 +1430,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vmx_vcpu_pi_load(vcpu, cpu); vmx->host_debugctlmsr = get_debugctlmsr(); + vmx->host_lbrctlmsr = get_lbrctlmsr(); } static void vmx_vcpu_put(struct kvm_vcpu *vcpu) @@ -6797,6 +6818,8 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (vmx->host_debugctlmsr) update_debugctlmsr(vmx->host_debugctlmsr); + if (vmx->host_lbrctlmsr) + update_lbrctlmsr(vmx->host_lbrctlmsr); #ifndef CONFIG_X86_64 /* diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index cc362e2d3eaa..69e243fea23d 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -328,6 +328,7 @@ struct vcpu_vmx { u64 current_tsc_ratio; unsigned long host_debugctlmsr; + unsigned long host_lbrctlmsr; /* * Only bits masked by msr_ia32_feature_control_valid_bits can be set in -- 2.21.1