Re: [PATCH v2 7/8] KVM: SVM: move MSR_IA32_SPEC_CTRL save/restore to assembly

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Nov 08, 2022, Paolo Bonzini wrote:
> diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
> index 0a4272faf80f..a02eef724379 100644
> --- a/arch/x86/kvm/svm/vmenter.S
> +++ b/arch/x86/kvm/svm/vmenter.S
> @@ -32,10 +32,70 @@
>  
>  .section .noinstr.text, "ax"
>  
> +.macro RESTORE_GUEST_SPEC_CTRL
> +	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
> +	ALTERNATIVE_2 "", \
> +		"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
> +		"", X86_FEATURE_V_SPEC_CTRL
> +801:
> +.endm
> +
> +.macro RESTORE_HOST_SPEC_CTRL
> +	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
> +	ALTERNATIVE_2 "", \
> +		"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
> +		"", X86_FEATURE_V_SPEC_CTRL
> +901:
> +.endm
> +
> +.macro RESTORE_SPEC_CTRL_BODY

Can we split these into separate macros?  It's a bit more typing, but it's not
immediately obvious that these are two independent chunks (I was expecting a JMP
from the 800 section into the 900 section).

E.g. RESTORE_GUEST_SPEC_CTRL_BODY and RESTORE_HOST_SPEC_CTRL_BODY

> +800:

Ugh, the multiple users makes it somewhat ugly, but rather than arbitrary numbers,
what about using named labels to make it easier to understand the branches?

E.g.

---
 arch/x86/kvm/svm/vmenter.S | 43 +++++++++++++++++++++-----------------
 1 file changed, 24 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index a02eef724379..23fd7353f0d0 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -32,24 +32,24 @@
 
 .section .noinstr.text, "ax"
 
-.macro RESTORE_GUEST_SPEC_CTRL
+.macro RESTORE_GUEST_SPEC_CTRL name
 	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
 	ALTERNATIVE_2 "", \
-		"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
+		"jmp .Lrestore_guest_spec_ctrl\name", X86_FEATURE_MSR_SPEC_CTRL, \
 		"", X86_FEATURE_V_SPEC_CTRL
-801:
+.Lpost_restore_guest_spec_ctrl\name:
 .endm
 
-.macro RESTORE_HOST_SPEC_CTRL
+.macro RESTORE_HOST_SPEC_CTRL name
 	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
 	ALTERNATIVE_2 "", \
-		"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
+		"jmp .Lrestore_host_spec_ctrl\name", X86_FEATURE_MSR_SPEC_CTRL, \
 		"", X86_FEATURE_V_SPEC_CTRL
-901:
+.Lpost_restore_host_spec_ctrl\name:
 .endm
 
-.macro RESTORE_SPEC_CTRL_BODY
-800:
+.macro RESTORE_GUEST_SPEC_CTRL_BODY name
+.Lrestore_guest_spec_ctrl\name:
 	/*
 	 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
 	 * host's, write the MSR.  This is kept out-of-line so that the common
@@ -61,13 +61,16 @@
 	 */
 	movl SVM_spec_ctrl(%_ASM_DI), %eax
 	cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
-	je 801b
+	je .Lpost_restore_guest_spec_ctrl\name
+
 	mov $MSR_IA32_SPEC_CTRL, %ecx
 	xor %edx, %edx
 	wrmsr
-	jmp 801b
+	jmp .Lpost_restore_guest_spec_ctrl\name
+.endm
 
-900:
+.macro RESTORE_HOST_SPEC_CTRL_BODY name
+.Lrestore_host_spec_ctrl\name:
 	/* Same for after vmexit.  */
 	mov $MSR_IA32_SPEC_CTRL, %ecx
 
@@ -76,18 +79,18 @@
 	 * if it was not intercepted during guest execution.
 	 */
 	cmpb $0, (%_ASM_SP)
-	jnz 998f
+	jnz .Lskip_save_guest_spec_ctrl\name
 	rdmsr
 	movl %eax, SVM_spec_ctrl(%_ASM_DI)
-998:
 
+.Lskip_save_guest_spec_ctrl\name:
 	/* Now restore the host value of the MSR if different from the guest's.  */
 	movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
 	cmp SVM_spec_ctrl(%_ASM_DI), %eax
-	je 901b
+	je .Lpost_restore_host_spec_ctrl\name
 	xor %edx, %edx
 	wrmsr
-	jmp 901b
+	jmp .Lpost_restore_host_spec_ctrl\name
 .endm
 
 
@@ -259,7 +262,8 @@ SYM_FUNC_START(__svm_vcpu_run)
 	pop %_ASM_BP
 	RET
 
-	RESTORE_SPEC_CTRL_BODY
+	RESTORE_GUEST_SPEC_CTRL_BODY
+	RESTORE_HOST_SPEC_CTRL_BODY
 
 10:	cmpb $0, kvm_rebooting
 	jne 2b
@@ -310,7 +314,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
 	mov %_ASM_ARG1, %_ASM_DI
 .endif
 
-	RESTORE_GUEST_SPEC_CTRL
+	RESTORE_GUEST_SPEC_CTRL sev_es
 
 	/* Get svm->current_vmcb->pa into RAX. */
 	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
@@ -331,7 +335,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
 	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
 #endif
 
-	RESTORE_HOST_SPEC_CTRL
+	RESTORE_HOST_SPEC_CTRL sev_es
 
 	/*
 	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
@@ -359,7 +363,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
 	pop %_ASM_BP
 	RET
 
-	RESTORE_SPEC_CTRL_BODY
+	RESTORE_GUEST_SPEC_CTRL_BODY sev_es
+	RESTORE_HOST_SPEC_CTRL_BODY sev_es
 
 3:	cmpb $0, kvm_rebooting
 	jne 2b

base-commit: 0b242ada175d97a556866c48c80310860f634579
-- 

> @@ -61,6 +126,8 @@ SYM_FUNC_START(__svm_vcpu_run)
>  	mov %_ASM_ARG1, %_ASM_DI
>  .endif
>

Can you add a comment to all of these to call out that RAX, RCX, and RDX might
get clobbered?  It's easy to overlook that detail, and it matters on 32-bit where
the arguments use RCX and RDX.  And because the clobber is conditional, it wouldn't
be that hard for a bug to escape initial testing.

	/* This might clobber RAX, RCX, and RDX! */

> +	RESTORE_GUEST_SPEC_CTRL



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux