From: Paolo Bonzini <pbonzini@xxxxxxxxxx> commit f6d58266d731fd7e63163790aad21e0dbb1d5264 upstream. Continue moving accesses to struct vcpu_svm to vmenter.S. Reducing the number of arguments limits the chance of mistakes due to different registers used for argument passing in 32- and 64-bit ABIs; pushing the VMCB argument and almost immediately popping it into a different register looks pretty weird. 32-bit ABI is not a concern for __svm_sev_es_vcpu_run() which is 64-bit only; however, it will soon need @svm to save/restore SPEC_CTRL so stay consistent with __svm_vcpu_run() and let them share the same prototype. No functional change intended. Cc: stable@xxxxxxxxxxxxxxx Fixes: a149180fbcf3 ("x86: Add magic AMD return-thunk") Reviewed-by: Sean Christopherson <seanjc@xxxxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/kvm/kvm-asm-offsets.c | 2 ++ arch/x86/kvm/svm/svm.c | 5 ++--- arch/x86/kvm/svm/svm.h | 4 ++-- arch/x86/kvm/svm/vmenter.S | 20 ++++++++++---------- 4 files changed, 16 insertions(+), 15 deletions(-) --- a/arch/x86/kvm/kvm-asm-offsets.c +++ b/arch/x86/kvm/kvm-asm-offsets.c @@ -15,6 +15,8 @@ static void __used common(void) if (IS_ENABLED(CONFIG_KVM_AMD)) { BLANK(); OFFSET(SVM_vcpu_arch_regs, vcpu_svm, vcpu.arch.regs); + OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb); + OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa); } if (IS_ENABLED(CONFIG_KVM_INTEL)) { --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3915,12 +3915,11 @@ static fastpath_t svm_exit_handlers_fast static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - unsigned long vmcb_pa = svm->current_vmcb->pa; guest_state_enter_irqoff(); if (sev_es_guest(vcpu->kvm)) { - __svm_sev_es_vcpu_run(vmcb_pa); + __svm_sev_es_vcpu_run(svm); } else { struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); @@ -3931,7 +3930,7 @@ static noinstr void svm_vcpu_enter_exit( * vmcb02 when switching vmcbs for nested virtualization. */ vmload(svm->vmcb01.pa); - __svm_vcpu_run(vmcb_pa, svm); + __svm_vcpu_run(svm); vmsave(svm->vmcb01.pa); vmload(__sme_page_pa(sd->save_area)); --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -683,7 +683,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm * /* vmenter.S */ -void __svm_sev_es_vcpu_run(unsigned long vmcb_pa); -void __svm_vcpu_run(unsigned long vmcb_pa, struct vcpu_svm *svm); +void __svm_sev_es_vcpu_run(struct vcpu_svm *svm); +void __svm_vcpu_run(struct vcpu_svm *svm); #endif --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -32,7 +32,6 @@ /** * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode - * @vmcb_pa: unsigned long * @svm: struct vcpu_svm * */ SYM_FUNC_START(__svm_vcpu_run) @@ -49,16 +48,16 @@ SYM_FUNC_START(__svm_vcpu_run) push %_ASM_BX /* Save @svm. */ - push %_ASM_ARG2 - - /* Save @vmcb. */ push %_ASM_ARG1 +.ifnc _ASM_ARG1, _ASM_DI /* Move @svm to RDI. */ - mov %_ASM_ARG2, %_ASM_DI + mov %_ASM_ARG1, %_ASM_DI +.endif - /* "POP" @vmcb to RAX. */ - pop %_ASM_AX + /* Get svm->current_vmcb->pa into RAX. */ + mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX + mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX /* Load guest registers. */ mov VCPU_RCX(%_ASM_DI), %_ASM_CX @@ -170,7 +169,7 @@ SYM_FUNC_END(__svm_vcpu_run) /** * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode - * @vmcb_pa: unsigned long + * @svm: struct vcpu_svm * */ SYM_FUNC_START(__svm_sev_es_vcpu_run) push %_ASM_BP @@ -185,8 +184,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) #endif push %_ASM_BX - /* Move @vmcb to RAX. */ - mov %_ASM_ARG1, %_ASM_AX + /* Get svm->current_vmcb->pa into RAX. */ + mov SVM_current_vmcb(%_ASM_ARG1), %_ASM_AX + mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX /* Enter guest mode */ sti