This adds support to be able to either encrypt or decrypt data during the early stages of booting the kernel. This does not change the memory encryption attribute - it is used for ensuring that data present in either an encrypted or un-encrypted memory area is in the proper state (for example the initrd will have been loaded by the boot loader and will not be encrypted, but the memory that it resides in is marked as encrypted). Signed-off-by: Tom Lendacky <thomas.lendacky@xxxxxxx> --- arch/x86/include/asm/mem_encrypt.h | 15 ++++++ arch/x86/mm/mem_encrypt.c | 89 ++++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 9f3e762..2785493 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -23,6 +23,11 @@ extern unsigned long sme_me_mask; u8 sme_get_me_loss(void); +void __init sme_early_mem_enc(resource_size_t paddr, + unsigned long size); +void __init sme_early_mem_dec(resource_size_t paddr, + unsigned long size); + void __init sme_early_init(void); #define __sme_pa(x) (__pa((x)) | sme_me_mask) @@ -39,6 +44,16 @@ static inline u8 sme_get_me_loss(void) return 0; } +static inline void __init sme_early_mem_enc(resource_size_t paddr, + unsigned long size) +{ +} + +static inline void __init sme_early_mem_dec(resource_size_t paddr, + unsigned long size) +{ +} + static inline void __init sme_early_init(void) { } diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 00eb705..5f19ede 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -14,6 +14,95 @@ #include <linux/mm.h> #include <asm/mem_encrypt.h> +#include <asm/tlbflush.h> +#include <asm/fixmap.h> + +/* Buffer used for early in-place encryption by BSP, no locking needed */ +static char me_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); + +void __init sme_early_mem_enc(resource_size_t paddr, unsigned long size) +{ + void *src, *dst; + size_t len; + + if (!sme_me_mask) + return; + + local_flush_tlb(); + wbinvd(); + + /* + * There are limited number of early mapping slots, so map (at most) + * one page at time. + */ + while (size) { + len = min_t(size_t, sizeof(me_early_buffer), size); + + /* Create a mapping for non-encrypted write-protected memory */ + src = early_memremap_dec_wp(paddr, len); + + /* Create a mapping for encrypted memory */ + dst = early_memremap_enc(paddr, len); + + /* + * If a mapping can't be obtained to perform the encryption, + * then encrypted access to that area will end up causing + * a crash. + */ + BUG_ON(!src || !dst); + + memcpy(me_early_buffer, src, len); + memcpy(dst, me_early_buffer, len); + + early_memunmap(dst, len); + early_memunmap(src, len); + + paddr += len; + size -= len; + } +} + +void __init sme_early_mem_dec(resource_size_t paddr, unsigned long size) +{ + void *src, *dst; + size_t len; + + if (!sme_me_mask) + return; + + local_flush_tlb(); + wbinvd(); + + /* + * There are limited number of early mapping slots, so map (at most) + * one page at time. + */ + while (size) { + len = min_t(size_t, sizeof(me_early_buffer), size); + + /* Create a mapping for encrypted write-protected memory */ + src = early_memremap_enc_wp(paddr, len); + + /* Create a mapping for non-encrypted memory */ + dst = early_memremap_dec(paddr, len); + + /* + * If a mapping can't be obtained to perform the decryption, + * then un-encrypted access to that area will end up causing + * a crash. + */ + BUG_ON(!src || !dst); + + memcpy(me_early_buffer, src, len); + memcpy(dst, me_early_buffer, len); + + early_memunmap(dst, len); + early_memunmap(src, len); + + paddr += len; + size -= len; + } +} void __init sme_early_init(void) { -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>