This patch adds support to the early boot code to use Secure Memory Encryption (SME). Support is added to update the early pagetables with the memory encryption mask and to encrypt the kernel in place. The routines to set the encryption mask and perform the encryption are stub routines for now with full function to be added in a later patch. A new file, arch/x86/kernel/mem_encrypt_init.c, is introduced to avoid adding #ifdefs within arch/x86/kernel/head_64.S and allow arch/x86/mm/mem_encrypt.c to be removed from the build if SME is not configured. The mem_encrypt_init.c file will contain the necessary #ifdefs to allow head_64.S to successfully build and call the SME routines. Signed-off-by: Tom Lendacky <thomas.lendacky@xxxxxxx> --- arch/x86/kernel/Makefile | 2 + arch/x86/kernel/head_64.S | 46 ++++++++++++++++++++++++++++++++- arch/x86/kernel/mem_encrypt_init.c | 50 ++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+), 2 deletions(-) create mode 100644 arch/x86/kernel/mem_encrypt_init.c diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index bdcdb3b..33af80a 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -140,4 +140,6 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o obj-y += vsmp_64.o + + obj-y += mem_encrypt_init.o endif diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index b467b14..4f8201b 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -91,6 +91,23 @@ startup_64: jnz bad_address /* + * Enable Secure Memory Encryption (SME), if supported and enabled. + * The real_mode_data address is in %rsi and that register can be + * clobbered by the called function so be sure to save it. + * Save the returned mask in %r12 for later use. + */ + push %rsi + call sme_enable + pop %rsi + movq %rax, %r12 + + /* + * Add the memory encryption mask to %rbp to include it in the page + * table fixups. + */ + addq %r12, %rbp + + /* * Fixup the physical addresses in the page table */ addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip) @@ -113,6 +130,7 @@ startup_64: shrq $PGDIR_SHIFT, %rax leaq (PAGE_SIZE + _KERNPG_TABLE)(%rbx), %rdx + addq %r12, %rdx movq %rdx, 0(%rbx,%rax,8) movq %rdx, 8(%rbx,%rax,8) @@ -129,6 +147,7 @@ startup_64: movq %rdi, %rax shrq $PMD_SHIFT, %rdi addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax + addq %r12, %rax leaq (_end - 1)(%rip), %rcx shrq $PMD_SHIFT, %rcx subq %rdi, %rcx @@ -162,11 +181,25 @@ startup_64: cmp %r8, %rdi jne 1b - /* Fixup phys_base */ + /* + * Fixup phys_base - remove the memory encryption mask from %rbp + * to obtain the true physical address. + */ + subq %r12, %rbp addq %rbp, phys_base(%rip) + /* + * Encrypt the kernel if SME is active. + * The real_mode_data address is in %rsi and that register can be + * clobbered by the called function so be sure to save it. + */ + push %rsi + call sme_encrypt_kernel + pop %rsi + .Lskip_fixup: movq $(early_level4_pgt - __START_KERNEL_map), %rax + addq %r12, %rax jmp 1f ENTRY(secondary_startup_64) /* @@ -186,7 +219,16 @@ ENTRY(secondary_startup_64) /* Sanitize CPU configuration */ call verify_cpu - movq $(init_level4_pgt - __START_KERNEL_map), %rax + /* + * Get the SME encryption mask. + * The real_mode_data address is in %rsi and that register can be + * clobbered by the called function so be sure to save it. + */ + push %rsi + call sme_get_me_mask + pop %rsi + + addq $(init_level4_pgt - __START_KERNEL_map), %rax 1: /* Enable PAE mode and PGE */ diff --git a/arch/x86/kernel/mem_encrypt_init.c b/arch/x86/kernel/mem_encrypt_init.c new file mode 100644 index 0000000..25af15d --- /dev/null +++ b/arch/x86/kernel/mem_encrypt_init.c @@ -0,0 +1,50 @@ +/* + * AMD Memory Encryption Support + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky <thomas.lendacky@xxxxxxx> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/linkage.h> +#include <linux/init.h> + +#ifdef CONFIG_AMD_MEM_ENCRYPT + +#include <linux/mem_encrypt.h> + +void __init sme_encrypt_kernel(void) +{ +} + +unsigned long __init sme_get_me_mask(void) +{ + return sme_me_mask; +} + +unsigned long __init sme_enable(void) +{ + return sme_me_mask; +} + +#else /* !CONFIG_AMD_MEM_ENCRYPT */ + +void __init sme_encrypt_kernel(void) +{ +} + +unsigned long __init sme_get_me_mask(void) +{ + return 0; +} + +unsigned long __init sme_enable(void) +{ + return 0; +} + +#endif /* CONFIG_AMD_MEM_ENCRYPT */