Patch "x86/decompressor: Avoid the need for a stack in the 32-bit trampoline" has been added to the 6.1-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    x86/decompressor: Avoid the need for a stack in the 32-bit trampoline

to the 6.1-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     x86-decompressor-avoid-the-need-for-a-stack-in-the-32-bit-trampoline.patch
and it can be found in the queue-6.1 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.


>From bd328aa01ff77a45aeffea5fc4521854291db11f Mon Sep 17 00:00:00 2001
From: Ard Biesheuvel <ardb@xxxxxxxxxx>
Date: Mon, 7 Aug 2023 18:27:08 +0200
Subject: x86/decompressor: Avoid the need for a stack in the 32-bit trampoline

From: Ard Biesheuvel <ardb@xxxxxxxxxx>

commit bd328aa01ff77a45aeffea5fc4521854291db11f upstream.

The 32-bit trampoline no longer uses the stack for anything except
performing a far return back to long mode, and preserving the caller's
stack pointer value. Currently, the trampoline stack is placed in the
same page that carries the trampoline code, which means this page must
be mapped writable and executable, and the stack is therefore executable
as well.

Replace the far return with a far jump, so that the return address can
be pre-calculated and patched into the code before it is called. This
removes the need for a 32-bit addressable stack entirely, and in a later
patch, this will be taken advantage of by removing writable permissions
from (and adding executable permissions to) the trampoline code page
when booting via the EFI stub.

Note that the value of RSP still needs to be preserved explicitly across
the switch into 32-bit mode, as the register may get truncated to 32
bits.

Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx>
Signed-off-by: Borislav Petkov (AMD) <bp@xxxxxxxxx>
Acked-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
Link: https://lore.kernel.org/r/20230807162720.545787-12-ardb@xxxxxxxxxx
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
 arch/x86/boot/compressed/head_64.S    |   45 ++++++++++++++++++++--------------
 arch/x86/boot/compressed/pgtable.h    |    4 +--
 arch/x86/boot/compressed/pgtable_64.c |   12 ++++++++-
 3 files changed, 40 insertions(+), 21 deletions(-)

--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -558,6 +558,7 @@ SYM_FUNC_END(.Lrelocated)
  * trampoline memory. A non-zero second argument (ESI) means that the
  * trampoline needs to enable 5-level paging.
  */
+	.section ".rodata", "a", @progbits
 SYM_CODE_START(trampoline_32bit_src)
 	/*
 	 * Preserve live 64-bit registers on the stack: this is necessary
@@ -568,13 +569,9 @@ SYM_CODE_START(trampoline_32bit_src)
 	pushq	%rbp
 	pushq	%rbx
 
-	/* Set up 32-bit addressable stack and push the old RSP value */
-	leaq	(TRAMPOLINE_32BIT_STACK_END - 8)(%rcx), %rbx
-	movq	%rsp, (%rbx)
-	movq	%rbx, %rsp
-
-	/* Take the address of the trampoline exit code */
-	leaq	.Lret(%rip), %rbx
+	/* Preserve top half of RSP in a legacy mode GPR to avoid truncation */
+	movq	%rsp, %rbx
+	shrq	$32, %rbx
 
 	/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
 	pushq	$__KERNEL32_CS
@@ -582,9 +579,17 @@ SYM_CODE_START(trampoline_32bit_src)
 	pushq	%rax
 	lretq
 
+	/*
+	 * The 32-bit code below will do a far jump back to long mode and end
+	 * up here after reconfiguring the number of paging levels. First, the
+	 * stack pointer needs to be restored to its full 64-bit value before
+	 * the callee save register contents can be popped from the stack.
+	 */
 .Lret:
+	shlq	$32, %rbx
+	orq	%rbx, %rsp
+
 	/* Restore the preserved 64-bit registers */
-	movq	(%rsp), %rsp
 	popq	%rbx
 	popq	%rbp
 	popq	%r15
@@ -592,11 +597,6 @@ SYM_CODE_START(trampoline_32bit_src)
 
 	.code32
 0:
-	/* Set up data and stack segments */
-	movl	$__KERNEL_DS, %eax
-	movl	%eax, %ds
-	movl	%eax, %ss
-
 	/* Disable paging */
 	movl	%cr0, %eax
 	btrl	$X86_CR0_PG_BIT, %eax
@@ -651,18 +651,26 @@ SYM_CODE_START(trampoline_32bit_src)
 1:
 	movl	%eax, %cr4
 
-	/* Prepare the stack for far return to Long Mode */
-	pushl	$__KERNEL_CS
-	pushl	%ebx
-
 	/* Enable paging again. */
 	movl	%cr0, %eax
 	btsl	$X86_CR0_PG_BIT, %eax
 	movl	%eax, %cr0
 
-	lret
+	/*
+	 * Return to the 64-bit calling code using LJMP rather than LRET, to
+	 * avoid the need for a 32-bit addressable stack. The destination
+	 * address will be adjusted after the template code is copied into a
+	 * 32-bit addressable buffer.
+	 */
+.Ljmp:	ljmpl	$__KERNEL_CS, $(.Lret - trampoline_32bit_src)
 SYM_CODE_END(trampoline_32bit_src)
 
+/*
+ * This symbol is placed right after trampoline_32bit_src() so its address can
+ * be used to infer the size of the trampoline code.
+ */
+SYM_DATA(trampoline_ljmp_imm_offset, .word  .Ljmp + 1 - trampoline_32bit_src)
+
 	/*
          * The trampoline code has a size limit.
          * Make sure we fail to compile if the trampoline code grows
@@ -670,6 +678,7 @@ SYM_CODE_END(trampoline_32bit_src)
 	 */
 	.org	trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
 
+	.text
 SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
 	/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
 1:
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -8,13 +8,13 @@
 #define TRAMPOLINE_32BIT_CODE_OFFSET	PAGE_SIZE
 #define TRAMPOLINE_32BIT_CODE_SIZE	0xA0
 
-#define TRAMPOLINE_32BIT_STACK_END	TRAMPOLINE_32BIT_SIZE
-
 #ifndef __ASSEMBLER__
 
 extern unsigned long *trampoline_32bit;
 
 extern void trampoline_32bit_src(void *trampoline, bool enable_5lvl);
 
+extern const u16 trampoline_ljmp_imm_offset;
+
 #endif /* __ASSEMBLER__ */
 #endif /* BOOT_COMPRESSED_PAGETABLE_H */
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -109,6 +109,7 @@ static unsigned long find_trampoline_pla
 struct paging_config paging_prepare(void *rmode)
 {
 	struct paging_config paging_config = {};
+	void *tramp_code;
 
 	/* Initialize boot_params. Required for cmdline_find_option_bool(). */
 	boot_params = rmode;
@@ -148,10 +149,19 @@ struct paging_config paging_prepare(void
 	memset(trampoline_32bit, 0, TRAMPOLINE_32BIT_SIZE);
 
 	/* Copy trampoline code in place */
-	memcpy(trampoline_32bit + TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long),
+	tramp_code = memcpy(trampoline_32bit +
+			TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long),
 			&trampoline_32bit_src, TRAMPOLINE_32BIT_CODE_SIZE);
 
 	/*
+	 * Avoid the need for a stack in the 32-bit trampoline code, by using
+	 * LJMP rather than LRET to return back to long mode. LJMP takes an
+	 * immediate absolute address, which needs to be adjusted based on the
+	 * placement of the trampoline.
+	 */
+	*(u32 *)(tramp_code + trampoline_ljmp_imm_offset) += (unsigned long)tramp_code;
+
+	/*
 	 * The code below prepares page table in trampoline memory.
 	 *
 	 * The new page table will be used by trampoline code for switching


Patches currently in stable-queue which might be from ardb@xxxxxxxxxx are

queue-6.1/x86-boot-compressed-avoid-touching-ecx-in-startup32_set_idt_entry.patch
queue-6.1/x86-boot-compressed-only-build-mem_encrypt.s-if-amd_mem_encrypt-y.patch
queue-6.1/efi-capsule-loader-fix-incorrect-allocation-size.patch
queue-6.1/x86-decompressor-move-global-symbol-references-to-c-code.patch
queue-6.1/x86-boot-compressed-move-startup32_load_idt-into-.text-section.patch
queue-6.1/x86-decompressor-only-call-the-trampoline-when-changing-paging-levels.patch
queue-6.1/x86-boot-compressed-rename-efi_thunk_64.s-to-efi-mixed.s.patch
queue-6.1/x86-decompressor-merge-trampoline-cleanup-with-switching-code.patch
queue-6.1/x86-boot-compressed-move-efi32_pe_entry-out-of-head_64.s.patch
queue-6.1/efi-verify-that-variable-services-are-supported.patch
queue-6.1/x86-decompressor-call-trampoline-directly-from-c-code.patch
queue-6.1/x86-boot-compressed-pull-global-variable-reference-into-startup32_load_idt.patch
queue-6.1/x86-boot-compressed-simplify-idt-gdt-preserve-restore-in-the-efi-thunk.patch
queue-6.1/x86-decompressor-assign-paging-related-global-variables-earlier.patch
queue-6.1/x86-boot-compressed-move-32-bit-entrypoint-code-into-.text-section.patch
queue-6.1/x86-boot-compressed-move-efi32_pe_entry-into-.text-section.patch
queue-6.1/x86-decompressor-avoid-the-need-for-a-stack-in-the-32-bit-trampoline.patch
queue-6.1/x86-boot-compressed-move-startup32_check_sev_cbit-out-of-head_64.s.patch
queue-6.1/x86-boot-compressed-efi-merge-multiple-definitions-of-image_offset-into-one.patch
queue-6.1/x86-boot-compressed-move-startup32_load_idt-out-of-head_64.s.patch
queue-6.1/decompress-use-8-byte-alignment.patch
queue-6.1/x86-boot-compressed-move-efi32_entry-out-of-head_64.s.patch
queue-6.1/efi-libstub-use-efi_loader_code-region-when-moving-the-kernel-in-memory.patch
queue-6.1/crypto-arm64-neonbs-fix-out-of-bounds-access-on-short-input.patch
queue-6.1/x86-boot-compressed-move-bootargs-parsing-out-of-32-bit-startup-code.patch
queue-6.1/efivarfs-request-at-most-512-bytes-for-variable-names.patch
queue-6.1/x86-boot-compressed-adhere-to-calling-convention-in-get_sev_encryption_bit.patch
queue-6.1/x86-boot-compressed-move-startup32_check_sev_cbit-into-.text.patch
queue-6.1/x86-decompressor-store-boot_params-pointer-in-callee-save-register.patch
queue-6.1/x86-efistub-branch-straight-to-kernel-entry-point-from-c-code.patch
queue-6.1/x86-decompressor-call-trampoline-as-a-normal-function.patch
queue-6.1/x86-decompressor-pass-pgtable-address-to-trampoline-directly.patch
queue-6.1/x86-decompressor-use-standard-calling-convention-for-trampoline.patch
queue-6.1/x86-efi-make-the-deprecated-efi-handover-protocol-optional.patch




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux