Re: [PATCH v8 01/40] x86/compressed/64: detect/setup SEV/SME features earlier in boot

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 2021-12-10 09:42:53 -0600, Brijesh Singh wrote:
> From: Michael Roth <michael.roth@xxxxxxx>
> 
> With upcoming SEV-SNP support, SEV-related features need to be
> initialized earlier in boot, at the same point the initial #VC handler
> is set up, so that the SEV-SNP CPUID table can be utilized during the
> initial feature checks. Also, SEV-SNP feature detection will rely on
> EFI helper functions to scan the EFI config table for the Confidential
> Computing blob, and so would need to be implemented at least partially
> in C.
> 
> Currently set_sev_encryption_mask() is used to initialize the
> sev_status and sme_me_mask globals that advertise what SEV/SME features
> are available in a guest. Rename it to sev_enable() to better reflect
> that (SME is only enabled in the case of SEV guests in the
> boot/compressed kernel), and move it to just after the stage1 #VC
> handler is set up so that it can be used to initialize SEV-SNP as well
> in future patches.
> 
> While at it, re-implement it as C code so that all SEV feature
> detection can be better consolidated with upcoming SEV-SNP feature
> detection, which will also be in C.
> 
> The 32-bit entry path remains unchanged, as it never relied on the
> set_sev_encryption_mask() initialization to begin with, possibly due to
> the normal rva() helper for accessing globals only being usable by code
> in .head.text. Either way, 32-bit entry for SEV-SNP would likely only
> be supported for non-EFI boot paths, and so wouldn't rely on existing
> EFI helper functions, and so could be handled by a separate/simpler
> 32-bit initializer in the future if needed.
> 
> Signed-off-by: Michael Roth <michael.roth@xxxxxxx>
> Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
> ---
>  arch/x86/boot/compressed/head_64.S     | 32 ++++++++++--------
>  arch/x86/boot/compressed/mem_encrypt.S | 36 ---------------------
>  arch/x86/boot/compressed/misc.h        |  4 +--
>  arch/x86/boot/compressed/sev.c         | 45 ++++++++++++++++++++++++++
>  4 files changed, 66 insertions(+), 51 deletions(-)
> 
> diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
> index 572c535cf45b..20b174adca51 100644
> --- a/arch/x86/boot/compressed/head_64.S
> +++ b/arch/x86/boot/compressed/head_64.S
> @@ -191,9 +191,8 @@ SYM_FUNC_START(startup_32)
>  	/*
>  	 * Mark SEV as active in sev_status so that startup32_check_sev_cbit()
>  	 * will do a check. The sev_status memory will be fully initialized
> -	 * with the contents of MSR_AMD_SEV_STATUS later in
> -	 * set_sev_encryption_mask(). For now it is sufficient to know that SEV
> -	 * is active.
> +	 * with the contents of MSR_AMD_SEV_STATUS later via sev_enable(). For
> +	 * now it is sufficient to know that SEV is active.
>  	 */
>  	movl	$1, rva(sev_status)(%ebp)
>  1:
> @@ -447,6 +446,23 @@ SYM_CODE_START(startup_64)
>  	call	load_stage1_idt
>  	popq	%rsi
>  
> +#ifdef CONFIG_AMD_MEM_ENCRYPT
> +	/*
> +	 * Now that the stage1 interrupt handlers are set up, #VC exceptions from
> +	 * CPUID instructions can be properly handled for SEV-ES guests.
> +	 *
> +	 * For SEV-SNP, the CPUID table also needs to be set up in advance of any
> +	 * CPUID instructions being issued, so go ahead and do that now via
> +	 * sev_enable(), which will also handle the rest of the SEV-related
> +	 * detection/setup to ensure that has been done in advance of any dependent
> +	 * code.
> +	 */
> +	pushq	%rsi
> +	movq	%rsi, %rdi		/* real mode address */
> +	call	sev_enable
> +	popq	%rsi
> +#endif
> +
>  	/*
>  	 * paging_prepare() sets up the trampoline and checks if we need to
>  	 * enable 5-level paging.
> @@ -559,17 +575,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
>  	shrq	$3, %rcx
>  	rep	stosq
>  
> -/*
> - * If running as an SEV guest, the encryption mask is required in the
> - * page-table setup code below. When the guest also has SEV-ES enabled
> - * set_sev_encryption_mask() will cause #VC exceptions, but the stage2
> - * handler can't map its GHCB because the page-table is not set up yet.
> - * So set up the encryption mask here while still on the stage1 #VC
> - * handler. Then load stage2 IDT and switch to the kernel's own
> - * page-table.
> - */
>  	pushq	%rsi
> -	call	set_sev_encryption_mask
>  	call	load_stage2_idt
>  
>  	/* Pass boot_params to initialize_identity_maps() */
> diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
> index c1e81a848b2a..311d40f35a4b 100644
> --- a/arch/x86/boot/compressed/mem_encrypt.S
> +++ b/arch/x86/boot/compressed/mem_encrypt.S
> @@ -187,42 +187,6 @@ SYM_CODE_END(startup32_vc_handler)
>  	.code64
>  
>  #include "../../kernel/sev_verify_cbit.S"
> -SYM_FUNC_START(set_sev_encryption_mask)
> -#ifdef CONFIG_AMD_MEM_ENCRYPT
> -	push	%rbp
> -	push	%rdx
> -
> -	movq	%rsp, %rbp		/* Save current stack pointer */
> -
> -	call	get_sev_encryption_bit	/* Get the encryption bit position */
> -	testl	%eax, %eax
> -	jz	.Lno_sev_mask
> -
> -	bts	%rax, sme_me_mask(%rip)	/* Create the encryption mask */
> -
> -	/*
> -	 * Read MSR_AMD64_SEV again and store it to sev_status. Can't do this in
> -	 * get_sev_encryption_bit() because this function is 32-bit code and
> -	 * shared between 64-bit and 32-bit boot path.
> -	 */
> -	movl	$MSR_AMD64_SEV, %ecx	/* Read the SEV MSR */
> -	rdmsr
> -
> -	/* Store MSR value in sev_status */
> -	shlq	$32, %rdx
> -	orq	%rdx, %rax
> -	movq	%rax, sev_status(%rip)
> -
> -.Lno_sev_mask:
> -	movq	%rbp, %rsp		/* Restore original stack pointer */
> -
> -	pop	%rdx
> -	pop	%rbp
> -#endif
> -
> -	xor	%rax, %rax
> -	ret
> -SYM_FUNC_END(set_sev_encryption_mask)
>  
>  	.data
>  
> diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
> index 16ed360b6692..23e0e395084a 100644
> --- a/arch/x86/boot/compressed/misc.h
> +++ b/arch/x86/boot/compressed/misc.h
> @@ -120,12 +120,12 @@ static inline void console_init(void)
>  { }
>  #endif
>  
> -void set_sev_encryption_mask(void);
> -
>  #ifdef CONFIG_AMD_MEM_ENCRYPT
> +void sev_enable(struct boot_params *bp);
>  void sev_es_shutdown_ghcb(void);
>  extern bool sev_es_check_ghcb_fault(unsigned long address);
>  #else
> +static inline void sev_enable(struct boot_params *bp) { }
>  static inline void sev_es_shutdown_ghcb(void) { }
>  static inline bool sev_es_check_ghcb_fault(unsigned long address)
>  {
> diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
> index 28bcf04c022e..8eebdf589a90 100644
> --- a/arch/x86/boot/compressed/sev.c
> +++ b/arch/x86/boot/compressed/sev.c
> @@ -204,3 +204,48 @@ void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
>  	else if (result != ES_RETRY)
>  		sev_es_terminate(GHCB_SEV_ES_GEN_REQ);
>  }
> +
> +static inline u64 rd_sev_status_msr(void)
> +{
> +	unsigned long low, high;
> +
> +	asm volatile("rdmsr" : "=a" (low), "=d" (high) :
> +			"c" (MSR_AMD64_SEV));
> +
> +	return ((high << 32) | low);
> +}
> +
> +void sev_enable(struct boot_params *bp)
> +{
> +	unsigned int eax, ebx, ecx, edx;
> +
> +	/* Check for the SME/SEV support leaf */
> +	eax = 0x80000000;
> +	ecx = 0;
> +	native_cpuid(&eax, &ebx, &ecx, &edx);
> +	if (eax < 0x8000001f)
> +		return;
> +
> +	/*
> +	 * Check for the SME/SEV feature:
> +	 *   CPUID Fn8000_001F[EAX]
> +	 *   - Bit 0 - Secure Memory Encryption support
> +	 *   - Bit 1 - Secure Encrypted Virtualization support
> +	 *   CPUID Fn8000_001F[EBX]
> +	 *   - Bits 5:0 - Pagetable bit position used to indicate encryption
> +	 */
> +	eax = 0x8000001f;
> +	ecx = 0;
> +	native_cpuid(&eax, &ebx, &ecx, &edx);
> +	/* Check whether SEV is supported */
> +	if (!(eax & BIT(1)))
> +		return;
> +
> +	/* Set the SME mask if this is an SEV guest. */
> +	sev_status   = rd_sev_status_msr();
> +
> +	if (!(sev_status & MSR_AMD64_SEV_ENABLED))
> +		return;
> +
> +	sme_me_mask = BIT_ULL(ebx & 0x3f);

I made this suggestion while reviewing v7 too, but it appears that it
fell through the cracks. Most of the code in sev_enable() is duplicated
from sme_enable(). Wouldn't it be better to put all that common code
in a different function, and call that function from sme_enable()
and sev_enable()?

Venu




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux