This is needed so that FILL_RETURN_BUFFER has access to the percpu area via the GS segment base. Cc: stable@xxxxxxxxxxxxxxx Fixes: f14eec0a3203 ("KVM: SVM: move more vmentry code to assembly") Reported-by: Nathan Chancellor <nathan@xxxxxxxxxx> Analyzed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- arch/x86/kvm/svm/svm.c | 3 +-- arch/x86/kvm/svm/svm.h | 2 +- arch/x86/kvm/svm/svm_ops.h | 5 ----- arch/x86/kvm/svm/vmenter.S | 13 +++++++++++++ 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 0ba4feb19cb6..e15f6ea9e5cc 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3922,8 +3922,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) } else { struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); - __svm_vcpu_run(svm); - vmload(__sme_page_pa(sd->save_area)); + __svm_vcpu_run(svm, __sme_page_pa(sd->save_area)); } guest_state_exit_irqoff(); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 7ff1879e73c5..932f26be5675 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -684,6 +684,6 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm); /* vmenter.S */ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm); -void __svm_vcpu_run(struct vcpu_svm *svm); +void __svm_vcpu_run(struct vcpu_svm *svm, unsigned long hsave_pa); #endif diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h index 9430d6437c9f..36c8af87a707 100644 --- a/arch/x86/kvm/svm/svm_ops.h +++ b/arch/x86/kvm/svm/svm_ops.h @@ -61,9 +61,4 @@ static __always_inline void vmsave(unsigned long pa) svm_asm1(vmsave, "a" (pa), "memory"); } -static __always_inline void vmload(unsigned long pa) -{ - svm_asm1(vmload, "a" (pa), "memory"); -} - #endif /* __KVM_X86_SVM_OPS_H */ diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 5bc2ed7d79c0..0a4272faf80f 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -35,6 +35,7 @@ /** * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode * @svm: struct vcpu_svm * + * @hsave_pa: unsigned long */ SYM_FUNC_START(__svm_vcpu_run) push %_ASM_BP @@ -49,6 +50,9 @@ SYM_FUNC_START(__svm_vcpu_run) #endif push %_ASM_BX + /* @hsave_pa is needed last after vmexit, save it first. */ + push %_ASM_ARG2 + /* Save @svm. */ push %_ASM_ARG1 @@ -124,6 +128,11 @@ SYM_FUNC_START(__svm_vcpu_run) 5: vmsave %_ASM_AX 6: + /* Pop @hsave_pa and restore GSBASE, allowing access to percpu data. */ + pop %_ASM_AX +7: vmload %_ASM_AX +8: + #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE @@ -187,10 +196,14 @@ SYM_FUNC_START(__svm_vcpu_run) 50: cmpb $0, kvm_rebooting jne 6b ud2 +70: cmpb $0, kvm_rebooting + jne 8b + ud2 _ASM_EXTABLE(1b, 10b) _ASM_EXTABLE(3b, 30b) _ASM_EXTABLE(5b, 50b) + _ASM_EXTABLE(7b, 70b) SYM_FUNC_END(__svm_vcpu_run) -- 2.31.1