On 8/3/23 21:07, Peter Zijlstra wrote:
The only weird thing that can happen is ud2 instructions that are executed
in case the vmload/vmrun/vmsave instructions causes a #GP, from the
exception handler.
This code is ran with GIF disabled, so NMIs are not in the books, right?
Yep.
Does GIF block #MC ?
No, #MC is an exception not an interrupt.
So if frame pointer unwinding can be used in the absence of ORC, Nikunj
patch should not break anything.
But framepointer unwinds rely on BP, and that is clobbered per the
objtool complaint.
It's not clobbered in a part that will cause unwinding; we can further
restrict the part to a handful of instructions (and add a mov %rsp, %rbp
at the top, see untested patch after signature).
I think the chance of this failure is similar or lower to the chance of
a memory failure that hits the exception handler code itself.
Paolo
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 8e8295e774f0..58fab5e0f7ae 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -99,6 +99,8 @@
*/
SYM_FUNC_START(__svm_vcpu_run)
push %_ASM_BP
+ mov %_ASM_SP, %_ASM_BP
+
#ifdef CONFIG_X86_64
push %r15
push %r14
@@ -121,7 +123,8 @@ SYM_FUNC_START(__svm_vcpu_run)
/* Needed to restore access to percpu variables. */
__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
- /* Finally save @svm. */
+ /* Finally save frame pointer and @svm. */
+ push %_ASM_BP
push %_ASM_ARG1
.ifnc _ASM_ARG1, _ASM_DI
@@ -153,7 +156,6 @@ SYM_FUNC_START(__svm_vcpu_run)
mov VCPU_RCX(%_ASM_DI), %_ASM_CX
mov VCPU_RDX(%_ASM_DI), %_ASM_DX
mov VCPU_RBX(%_ASM_DI), %_ASM_BX
- mov VCPU_RBP(%_ASM_DI), %_ASM_BP
mov VCPU_RSI(%_ASM_DI), %_ASM_SI
#ifdef CONFIG_X86_64
mov VCPU_R8 (%_ASM_DI), %r8
@@ -165,6 +167,7 @@ SYM_FUNC_START(__svm_vcpu_run)
mov VCPU_R14(%_ASM_DI), %r14
mov VCPU_R15(%_ASM_DI), %r15
#endif
+ mov VCPU_RBP(%_ASM_DI), %_ASM_BP
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
/* Enter guest mode */
@@ -177,11 +180,15 @@ SYM_FUNC_START(__svm_vcpu_run)
/* Pop @svm to RAX while it's the only available register. */
pop %_ASM_AX
- /* Save all guest registers. */
+ /*
+ * Save all guest registers. Pop the frame pointer as soon as possible
+ * to enable unwinding.
+ */
+ mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
+ pop %_ASM_BP
mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
- mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
#ifdef CONFIG_X86_64
@@ -297,6 +304,7 @@ SYM_FUNC_END(__svm_vcpu_run)
*/
SYM_FUNC_START(__svm_sev_es_vcpu_run)
push %_ASM_BP
+ mov %_ASM_SP, %_ASM_BP
#ifdef CONFIG_X86_64
push %r15
push %r14
@@ -316,7 +324,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
push %_ASM_ARG2
- /* Save @svm. */
+ /* Save frame pointer and @svm. */
+ push %_ASM_BP
push %_ASM_ARG1
.ifnc _ASM_ARG1, _ASM_DI
@@ -341,8 +350,12 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
2: cli
- /* Pop @svm to RDI, guest registers have been saved already. */
+ /*
+ * Guest registers have been saved already.
+ * Pop @svm to RDI and restore the frame pointer to allow unwinding.
+ */
pop %_ASM_DI
+ pop %_ASM_BP
#ifdef CONFIG_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */