This was tested with kvm-unit-test that was developed for this purpose. Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> --- arch/x86/kvm/svm/nested.c | 21 +++++++++++++++++++-- arch/x86/kvm/svm/svm.c | 8 ++++++++ arch/x86/kvm/svm/svm.h | 1 + 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index d026f89ee94e6..12e856bfce408 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -565,8 +565,19 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12 vmcb_mark_dirty(svm->vmcb, VMCB_DR); } - if (unlikely(svm->vmcb01.ptr->control.virt_ext & LBR_CTL_ENABLE_MASK)) + if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { + + /* Copy LBR related registers from vmcb12, + * but make sure that we only pick LBR enable bit from the guest. + */ + + svm_copy_lbrs(vmcb12, svm->vmcb); + svm->vmcb->save.dbgctl &= LBR_CTL_ENABLE_MASK; + svm_update_lbrv(&svm->vcpu); + + } else if (unlikely(svm->vmcb01.ptr->control.virt_ext & LBR_CTL_ENABLE_MASK)) { svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb); + } } static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) @@ -626,6 +637,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) svm->vmcb->control.virt_ext = svm->vmcb01.ptr->control.virt_ext & LBR_CTL_ENABLE_MASK; + if (svm->lbrv_enabled) + svm->vmcb->control.virt_ext |= + (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK); nested_svm_transition_tlb_flush(vcpu); @@ -889,7 +903,10 @@ int nested_svm_vmexit(struct vcpu_svm *svm) svm_switch_vmcb(svm, &svm->vmcb01); - if (unlikely(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)) { + if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { + svm_copy_lbrs(svm->nested.vmcb02.ptr, vmcb12); + svm_update_lbrv(vcpu); + } else if (unlikely(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)) { svm_copy_lbrs(svm->nested.vmcb02.ptr, svm->vmcb); svm_update_lbrv(vcpu); } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index aa6c04d15d2a7..8d87f84f93f30 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -888,6 +888,10 @@ void svm_update_lbrv(struct kvm_vcpu *vcpu) bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK); + if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled)) + if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)) + enable_lbrv = true; + if (enable_lbrv == current_enable_lbrv) return; @@ -3998,6 +4002,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) guest_cpuid_has(vcpu, X86_FEATURE_NRIPS); svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR); + svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV); svm_recalc_instruction_intercepts(vcpu, svm); @@ -4748,6 +4753,9 @@ static __init void svm_set_cpu_caps(void) if (tsc_scaling) kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR); + if (lbrv) + kvm_cpu_cap_set(X86_FEATURE_LBRV); + /* Nested VM can receive #VMEXIT instead of triggering #GP */ kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 2d0487968cba4..86dc1f997e01d 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -232,6 +232,7 @@ struct vcpu_svm { /* cached guest cpuid flags for faster access */ bool nrips_enabled : 1; bool tsc_scaling_enabled : 1; + bool lbrv_enabled : 1; u32 ldr_reg; u32 dfr_reg; -- 2.26.3