On Fri, Aug 04, 2023 at 12:20:05PM +0200, Paolo Bonzini wrote: > It's not clobbered in a part that will cause unwinding; we can further > restrict the part to a handful of instructions (and add a mov %rsp, %rbp > at the top, see untested patch after signature). > > I think the chance of this failure is similar or lower to the chance of > a memory failure that hits the exception handler code itself. Yes, that's very helpful, the below is your patch with a few extra hints and a comment. This seems to cure things. Specifically, your change is needed to put UNWIND_HINT_RESTORE before we go CALL things (like entry_ibpb), otherwise objtool gets upset we CALL without having a valid framepointer. Josh, this look ok to you? --- arch/x86/kvm/Makefile | 4 ---- arch/x86/kvm/svm/vmenter.S | 38 ++++++++++++++++++++++++++++++++------ 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 80e3fe184d17..0c5c2f090e93 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -3,10 +3,6 @@ ccflags-y += -I $(srctree)/arch/x86/kvm ccflags-$(CONFIG_KVM_WERROR) += -Werror -ifeq ($(CONFIG_FRAME_POINTER),y) -OBJECT_FILES_NON_STANDARD_vmenter.o := y -endif - include $(srctree)/virt/kvm/Makefile.kvm kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 8e8295e774f0..99b9be9a56c3 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -99,6 +99,8 @@ */ SYM_FUNC_START(__svm_vcpu_run) push %_ASM_BP + mov %_ASM_SP, %_ASM_BP + #ifdef CONFIG_X86_64 push %r15 push %r14 @@ -121,7 +123,18 @@ SYM_FUNC_START(__svm_vcpu_run) /* Needed to restore access to percpu variables. */ __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa) - /* Finally save @svm. */ + /* + * Finally save frame pointer and @svm. + * + * Clobbering BP here is mostly ok since GIF will block NMIs and with + * the exception of #MC and the kvm_rebooting _ASM_EXTABLE()s below + * nothing untoward will happen until BP is restored. + * + * The kvm_rebooting exceptions should not want to unwind stack, and + * while #MV might want to unwind stack, it is ultimately fatal. + */ + UNWIND_HINT_SAVE + push %_ASM_BP push %_ASM_ARG1 .ifnc _ASM_ARG1, _ASM_DI @@ -153,7 +166,6 @@ SYM_FUNC_START(__svm_vcpu_run) mov VCPU_RCX(%_ASM_DI), %_ASM_CX mov VCPU_RDX(%_ASM_DI), %_ASM_DX mov VCPU_RBX(%_ASM_DI), %_ASM_BX - mov VCPU_RBP(%_ASM_DI), %_ASM_BP mov VCPU_RSI(%_ASM_DI), %_ASM_SI #ifdef CONFIG_X86_64 mov VCPU_R8 (%_ASM_DI), %r8 @@ -165,6 +177,7 @@ SYM_FUNC_START(__svm_vcpu_run) mov VCPU_R14(%_ASM_DI), %r14 mov VCPU_R15(%_ASM_DI), %r15 #endif + mov VCPU_RBP(%_ASM_DI), %_ASM_BP mov VCPU_RDI(%_ASM_DI), %_ASM_DI /* Enter guest mode */ @@ -177,11 +190,16 @@ SYM_FUNC_START(__svm_vcpu_run) /* Pop @svm to RAX while it's the only available register. */ pop %_ASM_AX - /* Save all guest registers. */ + /* + * Save all guest registers. Pop the frame pointer as soon as possible + * to enable unwinding. + */ + mov %_ASM_BP, VCPU_RBP(%_ASM_AX) + pop %_ASM_BP + UNWIND_HINT_RESTORE mov %_ASM_CX, VCPU_RCX(%_ASM_AX) mov %_ASM_DX, VCPU_RDX(%_ASM_AX) mov %_ASM_BX, VCPU_RBX(%_ASM_AX) - mov %_ASM_BP, VCPU_RBP(%_ASM_AX) mov %_ASM_SI, VCPU_RSI(%_ASM_AX) mov %_ASM_DI, VCPU_RDI(%_ASM_AX) #ifdef CONFIG_X86_64 @@ -297,6 +315,7 @@ SYM_FUNC_END(__svm_vcpu_run) */ SYM_FUNC_START(__svm_sev_es_vcpu_run) push %_ASM_BP + mov %_ASM_SP, %_ASM_BP #ifdef CONFIG_X86_64 push %r15 push %r14 @@ -316,7 +335,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */ push %_ASM_ARG2 - /* Save @svm. */ + /* Save frame pointer and @svm. */ + UNWIND_HINT_SAVE + push %_ASM_BP push %_ASM_ARG1 .ifnc _ASM_ARG1, _ASM_DI @@ -341,8 +362,13 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) 2: cli - /* Pop @svm to RDI, guest registers have been saved already. */ + /* + * Guest registers have been saved already. + * Pop @svm to RDI and restore the frame pointer to allow unwinding. + */ pop %_ASM_DI + pop %_ASM_BP + UNWIND_HINT_RESTORE #ifdef CONFIG_RETPOLINE /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */