This is a note to let you know that I've just added the patch titled x86/boot/compressed: Move startup32_check_sev_cbit() out of head_64.S to the 6.1-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: x86-boot-compressed-move-startup32_check_sev_cbit-out-of-head_64.s.patch and it can be found in the queue-6.1 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From 9d7eaae6a071ff1f718e0aa5e610bb712f8cc632 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel <ardb@xxxxxxxxxx> Date: Tue, 22 Nov 2022 17:10:14 +0100 Subject: x86/boot/compressed: Move startup32_check_sev_cbit() out of head_64.S From: Ard Biesheuvel <ardb@xxxxxxxxxx> commit 9d7eaae6a071ff1f718e0aa5e610bb712f8cc632 upstream. Now that the startup32_check_sev_cbit() routine can execute from anywhere and behaves like an ordinary function, it can be moved where it belongs. Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> Signed-off-by: Borislav Petkov <bp@xxxxxxx> Link: https://lore.kernel.org/r/20221122161017.2426828-15-ardb@xxxxxxxxxx Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/boot/compressed/head_64.S | 71 --------------------------------- arch/x86/boot/compressed/mem_encrypt.S | 68 +++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 71 deletions(-) --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -727,77 +727,6 @@ SYM_DATA_START(boot_idt) SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end) /* - * Check for the correct C-bit position when the startup_32 boot-path is used. - * - * The check makes use of the fact that all memory is encrypted when paging is - * disabled. The function creates 64 bits of random data using the RDRAND - * instruction. RDRAND is mandatory for SEV guests, so always available. If the - * hypervisor violates that the kernel will crash right here. - * - * The 64 bits of random data are stored to a memory location and at the same - * time kept in the %eax and %ebx registers. Since encryption is always active - * when paging is off the random data will be stored encrypted in main memory. - * - * Then paging is enabled. When the C-bit position is correct all memory is - * still mapped encrypted and comparing the register values with memory will - * succeed. An incorrect C-bit position will map all memory unencrypted, so that - * the compare will use the encrypted random data and fail. - */ -#ifdef CONFIG_AMD_MEM_ENCRYPT - .text -SYM_FUNC_START(startup32_check_sev_cbit) - pushl %ebx - pushl %ebp - - call 0f -0: popl %ebp - - /* Check for non-zero sev_status */ - movl (sev_status - 0b)(%ebp), %eax - testl %eax, %eax - jz 4f - - /* - * Get two 32-bit random values - Don't bail out if RDRAND fails - * because it is better to prevent forward progress if no random value - * can be gathered. - */ -1: rdrand %eax - jnc 1b -2: rdrand %ebx - jnc 2b - - /* Store to memory and keep it in the registers */ - leal (sev_check_data - 0b)(%ebp), %ebp - movl %eax, 0(%ebp) - movl %ebx, 4(%ebp) - - /* Enable paging to see if encryption is active */ - movl %cr0, %edx /* Backup %cr0 in %edx */ - movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */ - movl %ecx, %cr0 - - cmpl %eax, 0(%ebp) - jne 3f - cmpl %ebx, 4(%ebp) - jne 3f - - movl %edx, %cr0 /* Restore previous %cr0 */ - - jmp 4f - -3: /* Check failed - hlt the machine */ - hlt - jmp 3b - -4: - popl %ebp - popl %ebx - RET -SYM_FUNC_END(startup32_check_sev_cbit) -#endif - -/* * Stack and heap for uncompression */ .bss --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S @@ -243,6 +243,74 @@ SYM_FUNC_START(startup32_load_idt) RET SYM_FUNC_END(startup32_load_idt) +/* + * Check for the correct C-bit position when the startup_32 boot-path is used. + * + * The check makes use of the fact that all memory is encrypted when paging is + * disabled. The function creates 64 bits of random data using the RDRAND + * instruction. RDRAND is mandatory for SEV guests, so always available. If the + * hypervisor violates that the kernel will crash right here. + * + * The 64 bits of random data are stored to a memory location and at the same + * time kept in the %eax and %ebx registers. Since encryption is always active + * when paging is off the random data will be stored encrypted in main memory. + * + * Then paging is enabled. When the C-bit position is correct all memory is + * still mapped encrypted and comparing the register values with memory will + * succeed. An incorrect C-bit position will map all memory unencrypted, so that + * the compare will use the encrypted random data and fail. + */ +SYM_FUNC_START(startup32_check_sev_cbit) + pushl %ebx + pushl %ebp + + call 0f +0: popl %ebp + + /* Check for non-zero sev_status */ + movl (sev_status - 0b)(%ebp), %eax + testl %eax, %eax + jz 4f + + /* + * Get two 32-bit random values - Don't bail out if RDRAND fails + * because it is better to prevent forward progress if no random value + * can be gathered. + */ +1: rdrand %eax + jnc 1b +2: rdrand %ebx + jnc 2b + + /* Store to memory and keep it in the registers */ + leal (sev_check_data - 0b)(%ebp), %ebp + movl %eax, 0(%ebp) + movl %ebx, 4(%ebp) + + /* Enable paging to see if encryption is active */ + movl %cr0, %edx /* Backup %cr0 in %edx */ + movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */ + movl %ecx, %cr0 + + cmpl %eax, 0(%ebp) + jne 3f + cmpl %ebx, 4(%ebp) + jne 3f + + movl %edx, %cr0 /* Restore previous %cr0 */ + + jmp 4f + +3: /* Check failed - hlt the machine */ + hlt + jmp 3b + +4: + popl %ebp + popl %ebx + RET +SYM_FUNC_END(startup32_check_sev_cbit) + .code64 #include "../../kernel/sev_verify_cbit.S" Patches currently in stable-queue which might be from ardb@xxxxxxxxxx are queue-6.1/x86-boot-compressed-avoid-touching-ecx-in-startup32_set_idt_entry.patch queue-6.1/x86-boot-compressed-only-build-mem_encrypt.s-if-amd_mem_encrypt-y.patch queue-6.1/efi-capsule-loader-fix-incorrect-allocation-size.patch queue-6.1/x86-decompressor-move-global-symbol-references-to-c-code.patch queue-6.1/x86-boot-compressed-move-startup32_load_idt-into-.text-section.patch queue-6.1/x86-decompressor-only-call-the-trampoline-when-changing-paging-levels.patch queue-6.1/x86-boot-compressed-rename-efi_thunk_64.s-to-efi-mixed.s.patch queue-6.1/x86-decompressor-merge-trampoline-cleanup-with-switching-code.patch queue-6.1/x86-boot-compressed-move-efi32_pe_entry-out-of-head_64.s.patch queue-6.1/efi-verify-that-variable-services-are-supported.patch queue-6.1/x86-decompressor-call-trampoline-directly-from-c-code.patch queue-6.1/x86-boot-compressed-pull-global-variable-reference-into-startup32_load_idt.patch queue-6.1/x86-boot-compressed-simplify-idt-gdt-preserve-restore-in-the-efi-thunk.patch queue-6.1/x86-decompressor-assign-paging-related-global-variables-earlier.patch queue-6.1/x86-boot-compressed-move-32-bit-entrypoint-code-into-.text-section.patch queue-6.1/x86-boot-compressed-move-efi32_pe_entry-into-.text-section.patch queue-6.1/x86-decompressor-avoid-the-need-for-a-stack-in-the-32-bit-trampoline.patch queue-6.1/x86-boot-compressed-move-startup32_check_sev_cbit-out-of-head_64.s.patch queue-6.1/x86-boot-compressed-efi-merge-multiple-definitions-of-image_offset-into-one.patch queue-6.1/x86-boot-compressed-move-startup32_load_idt-out-of-head_64.s.patch queue-6.1/decompress-use-8-byte-alignment.patch queue-6.1/x86-boot-compressed-move-efi32_entry-out-of-head_64.s.patch queue-6.1/efi-libstub-use-efi_loader_code-region-when-moving-the-kernel-in-memory.patch queue-6.1/crypto-arm64-neonbs-fix-out-of-bounds-access-on-short-input.patch queue-6.1/x86-boot-compressed-move-bootargs-parsing-out-of-32-bit-startup-code.patch queue-6.1/efivarfs-request-at-most-512-bytes-for-variable-names.patch queue-6.1/x86-boot-compressed-adhere-to-calling-convention-in-get_sev_encryption_bit.patch queue-6.1/x86-boot-compressed-move-startup32_check_sev_cbit-into-.text.patch queue-6.1/x86-decompressor-store-boot_params-pointer-in-callee-save-register.patch queue-6.1/x86-efistub-branch-straight-to-kernel-entry-point-from-c-code.patch queue-6.1/x86-decompressor-call-trampoline-as-a-normal-function.patch queue-6.1/x86-decompressor-pass-pgtable-address-to-trampoline-directly.patch queue-6.1/x86-decompressor-use-standard-calling-convention-for-trampoline.patch queue-6.1/x86-efi-make-the-deprecated-efi-handover-protocol-optional.patch