Patch "x86/sme: Move early SME kernel encryption handling into .head.text" has been added to the 6.1-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    x86/sme: Move early SME kernel encryption handling into .head.text

to the 6.1-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     x86-sme-move-early-sme-kernel-encryption-handling-into-.head.text.patch
and it can be found in the queue-6.1 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.


>From stable+bounces-40257-greg=kroah.com@xxxxxxxxxxxxxxx Fri Apr 19 10:12:18 2024
From: Ard Biesheuvel <ardb+git@xxxxxxxxxx>
Date: Fri, 19 Apr 2024 10:11:27 +0200
Subject: x86/sme: Move early SME kernel encryption handling into .head.text
To: stable@xxxxxxxxxxxxxxx
Message-ID: <20240419081105.3817596-46-ardb+git@xxxxxxxxxx>

From: Ard Biesheuvel <ardb@xxxxxxxxxx>

[ Commit 48204aba801f1b512b3abed10b8e1a63e03f3dd1 upstream ]

The .head.text section is the initial primary entrypoint of the core
kernel, and is entered with the CPU executing from a 1:1 mapping of
memory. Such code must never access global variables using absolute
references, as these are based on the kernel virtual mapping which is
not active yet at this point.

Given that the SME startup code is also called from this early execution
context, move it into .head.text as well. This will allow more thorough
build time checks in the future to ensure that early startup code only
uses RIP-relative references to global variables.

Also replace some occurrences of __pa_symbol() [which relies on the
compiler generating an absolute reference, which is not guaranteed] and
an open coded RIP-relative access with RIP_REL_REF().

Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx>
Signed-off-by: Borislav Petkov (AMD) <bp@xxxxxxxxx>
Tested-by: Tom Lendacky <thomas.lendacky@xxxxxxx>
Link: https://lore.kernel.org/r/20240227151907.387873-18-ardb+git@xxxxxxxxxx
Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
 arch/x86/include/asm/mem_encrypt.h |    8 +++----
 arch/x86/mm/mem_encrypt_identity.c |   42 ++++++++++++++-----------------------
 2 files changed, 21 insertions(+), 29 deletions(-)

--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -46,8 +46,8 @@ void __init sme_unmap_bootdata(char *rea
 void __init sme_early_init(void);
 void __init sev_setup_arch(void);
 
-void __init sme_encrypt_kernel(struct boot_params *bp);
-void __init sme_enable(struct boot_params *bp);
+void sme_encrypt_kernel(struct boot_params *bp);
+void sme_enable(struct boot_params *bp);
 
 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
@@ -80,8 +80,8 @@ static inline void __init sme_unmap_boot
 static inline void __init sme_early_init(void) { }
 static inline void __init sev_setup_arch(void) { }
 
-static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
-static inline void __init sme_enable(struct boot_params *bp) { }
+static inline void sme_encrypt_kernel(struct boot_params *bp) { }
+static inline void sme_enable(struct boot_params *bp) { }
 
 static inline void sev_es_init_vc_handling(void) { }
 
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -41,6 +41,7 @@
 #include <linux/mem_encrypt.h>
 #include <linux/cc_platform.h>
 
+#include <asm/init.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cmdline.h>
@@ -98,7 +99,7 @@ static char sme_workarea[2 * PMD_SIZE] _
 static char sme_cmdline_arg[] __initdata = "mem_encrypt";
 static char sme_cmdline_on[]  __initdata = "on";
 
-static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
 {
 	unsigned long pgd_start, pgd_end, pgd_size;
 	pgd_t *pgd_p;
@@ -113,7 +114,7 @@ static void __init sme_clear_pgd(struct
 	memset(pgd_p, 0, pgd_size);
 }
 
-static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
+static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
 {
 	pgd_t *pgd;
 	p4d_t *p4d;
@@ -150,7 +151,7 @@ static pud_t __init *sme_prepare_pgd(str
 	return pud;
 }
 
-static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
 {
 	pud_t *pud;
 	pmd_t *pmd;
@@ -166,7 +167,7 @@ static void __init sme_populate_pgd_larg
 	set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
 }
 
-static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
+static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
 {
 	pud_t *pud;
 	pmd_t *pmd;
@@ -192,7 +193,7 @@ static void __init sme_populate_pgd(stru
 		set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
 }
 
-static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
+static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
 {
 	while (ppd->vaddr < ppd->vaddr_end) {
 		sme_populate_pgd_large(ppd);
@@ -202,7 +203,7 @@ static void __init __sme_map_range_pmd(s
 	}
 }
 
-static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
+static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
 {
 	while (ppd->vaddr < ppd->vaddr_end) {
 		sme_populate_pgd(ppd);
@@ -212,7 +213,7 @@ static void __init __sme_map_range_pte(s
 	}
 }
 
-static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
+static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
 				   pmdval_t pmd_flags, pteval_t pte_flags)
 {
 	unsigned long vaddr_end;
@@ -236,22 +237,22 @@ static void __init __sme_map_range(struc
 	__sme_map_range_pte(ppd);
 }
 
-static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
+static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
 {
 	__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
 }
 
-static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
+static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
 {
 	__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
 }
 
-static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
+static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
 {
 	__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
 }
 
-static unsigned long __init sme_pgtable_calc(unsigned long len)
+static unsigned long __head sme_pgtable_calc(unsigned long len)
 {
 	unsigned long entries = 0, tables = 0;
 
@@ -288,7 +289,7 @@ static unsigned long __init sme_pgtable_
 	return entries + tables;
 }
 
-void __init sme_encrypt_kernel(struct boot_params *bp)
+void __head sme_encrypt_kernel(struct boot_params *bp)
 {
 	unsigned long workarea_start, workarea_end, workarea_len;
 	unsigned long execute_start, execute_end, execute_len;
@@ -323,9 +324,8 @@ void __init sme_encrypt_kernel(struct bo
 	 *     memory from being cached.
 	 */
 
-	/* Physical addresses gives us the identity mapped virtual addresses */
-	kernel_start = __pa_symbol(_text);
-	kernel_end = ALIGN(__pa_symbol(_end), PMD_SIZE);
+	kernel_start = (unsigned long)RIP_REL_REF(_text);
+	kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
 	kernel_len = kernel_end - kernel_start;
 
 	initrd_start = 0;
@@ -343,14 +343,6 @@ void __init sme_encrypt_kernel(struct bo
 #endif
 
 	/*
-	 * We're running identity mapped, so we must obtain the address to the
-	 * SME encryption workarea using rip-relative addressing.
-	 */
-	asm ("lea sme_workarea(%%rip), %0"
-	     : "=r" (workarea_start)
-	     : "p" (sme_workarea));
-
-	/*
 	 * Calculate required number of workarea bytes needed:
 	 *   executable encryption area size:
 	 *     stack page (PAGE_SIZE)
@@ -359,7 +351,7 @@ void __init sme_encrypt_kernel(struct bo
 	 *   pagetable structures for the encryption of the kernel
 	 *   pagetable structures for workarea (in case not currently mapped)
 	 */
-	execute_start = workarea_start;
+	execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
 	execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
 	execute_len = execute_end - execute_start;
 
@@ -502,7 +494,7 @@ void __init sme_encrypt_kernel(struct bo
 	native_write_cr3(__native_read_cr3());
 }
 
-void __init sme_enable(struct boot_params *bp)
+void __head sme_enable(struct boot_params *bp)
 {
 	const char *cmdline_ptr, *cmdline_arg, *cmdline_on;
 	unsigned int eax, ebx, ecx, edx;


Patches currently in stable-queue which might be from kroah.com@xxxxxxxxxxxxxxx are

queue-6.1/x86-boot-drop-pe-coff-.reloc-section.patch
queue-6.1/x86-sme-move-early-sme-kernel-encryption-handling-into-.head.text.patch
queue-6.1/x86-boot-omit-compression-buffer-from-pe-coff-image-memory-footprint.patch
queue-6.1/x86-mm-remove-p-d_page_mask-and-p-d_page_size-macros.patch
queue-6.1/x86-efi-drop-alignment-flags-from-pe-section-headers.patch
queue-6.1/x86-boot-increase-section-and-file-alignment-to-4k-512.patch
queue-6.1/x86-head-64-add-missing-__head-annotation-to-startup_64_load_idt.patch
queue-6.1/x86-boot-define-setup-size-in-linker-script.patch
queue-6.1/x86-boot-split-off-pe-coff-.data-section.patch
queue-6.1/x86-boot-derive-file-size-from-_edata-symbol.patch
queue-6.1/x86-boot-construct-pe-coff-.text-section-from-assembler.patch
queue-6.1/x86-boot-grab-kernel_info-offset-from-zoffset-header-directly.patch
queue-6.1/x86-sev-move-early-startup-code-into-.head.text-section.patch
queue-6.1/x86-boot-remove-the-bugger-off-message.patch
queue-6.1/x86-head-64-move-the-__head-definition-to-asm-init.h.patch
queue-6.1/x86-boot-drop-references-to-startup_64.patch
queue-6.1/x86-efistub-use-1-1-file-memory-mapping-for-pe-coff-.compat-section.patch
queue-6.1/x86-efi-drop-efi-stub-.bss-from-.data-section.patch
queue-6.1/x86-efi-disregard-setup-header-of-loaded-image.patch
queue-6.1/x86-efistub-remap-kernel-text-read-only-before-dropping-nx-attribute.patch
queue-6.1/x86-efistub-reinstate-soft-limit-for-initrd-loading.patch
queue-6.1/x86-boot-set-efi-handover-offset-directly-in-header-asm.patch
queue-6.1/x86-boot-drop-redundant-code-setting-the-root-device.patch




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux