Commit-ID: 4e237903f95db585b976e7311de2bfdaaf0f6e31 Gitweb: http://git.kernel.org/tip/4e237903f95db585b976e7311de2bfdaaf0f6e31 Author: Tom Lendacky <thomas.lendacky at amd.com> AuthorDate: Fri, 28 Jul 2017 11:01:16 -0500 Committer: Ingo Molnar <mingo at kernel.org> CommitDate: Sun, 30 Jul 2017 12:09:12 +0200 x86/mm, kexec: Fix memory corruption with SME on successive kexecs After issuing successive kexecs it was found that the SHA hash failed verification when booting the kexec'd kernel. When SME is enabled, the change from using pages that were marked encrypted to now being marked as not encrypted (through new identify mapped page tables) results in memory corruption if there are any cache entries for the previously encrypted pages. This is because separate cache entries can exist for the same physical location but tagged both with and without the encryption bit. To prevent this, issue a wbinvd if SME is active before copying the pages from the source location to the destination location to clear any possible cache entry conflicts. Signed-off-by: Tom Lendacky <thomas.lendacky at amd.com> Cc: <kexec at lists.infradead.org> Cc: Andy Lutomirski <luto at kernel.org> Cc: Borislav Petkov <bp at alien8.de> Cc: Brijesh Singh <brijesh.singh at amd.com> Cc: Dave Young <dyoung at redhat.com> Cc: Linus Torvalds <torvalds at linux-foundation.org> Cc: Peter Zijlstra <peterz at infradead.org> Cc: Thomas Gleixner <tglx at linutronix.de> Link: http://lkml.kernel.org/r/e7fb8610af3a93e8f8ae6f214cd9249adc0df2b4.1501186516.git.thomas.lendacky at amd.com Signed-off-by: Ingo Molnar <mingo at kernel.org> --- arch/x86/include/asm/kexec.h | 3 ++- arch/x86/kernel/machine_kexec_64.c | 3 ++- arch/x86/kernel/relocate_kernel_64.S | 14 ++++++++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index e8183ac..942c1f4 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -147,7 +147,8 @@ unsigned long relocate_kernel(unsigned long indirection_page, unsigned long page_list, unsigned long start_address, - unsigned int preserve_context); + unsigned int preserve_context, + unsigned int sme_active); #endif #define ARCH_HAS_KIMAGE_ARCH diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 9cf8daa..1f790cf 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -335,7 +335,8 @@ void machine_kexec(struct kimage *image) image->start = relocate_kernel((unsigned long)image->head, (unsigned long)page_list, image->start, - image->preserve_context); + image->preserve_context, + sme_active()); #ifdef CONFIG_KEXEC_JUMP if (image->preserve_context) diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 98111b3..307d3ba 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -47,6 +47,7 @@ relocate_kernel: * %rsi page_list * %rdx start address * %rcx preserve_context + * %r8 sme_active */ /* Save the CPU context, used for jumping back */ @@ -71,6 +72,9 @@ relocate_kernel: pushq $0 popfq + /* Save SME active flag */ + movq %r8, %r12 + /* * get physical address of control page now * this is impossible after page table switch @@ -132,6 +136,16 @@ identity_mapped: /* Flush the TLB (needed?) */ movq %r9, %cr3 + /* + * If SME is active, there could be old encrypted cache line + * entries that will conflict with the now unencrypted memory + * used by kexec. Flush the caches before copying the kernel. + */ + testq %r12, %r12 + jz 1f + wbinvd +1: + movq %rcx, %r11 call swap_pages