[PATCH 3/3] x86/kasan: support KASAN_VMALLOC

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



In the case where KASAN directly allocates memory to back vmalloc
space, don't map the early shadow page over it.

Not mapping the early shadow page over the whole shadow space means
that there are some pgds that are not populated on boot. Allow the
vmalloc fault handler to also fault in vmalloc shadow as needed.

Signed-off-by: Daniel Axtens <dja@xxxxxxxxxx>
---
 arch/x86/Kconfig            |  1 +
 arch/x86/mm/fault.c         | 13 +++++++++++++
 arch/x86/mm/kasan_init_64.c | 10 ++++++++++
 3 files changed, 24 insertions(+)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 222855cc0158..40562cc3771f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -134,6 +134,7 @@ config X86
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_JUMP_LABEL_RELATIVE
 	select HAVE_ARCH_KASAN			if X86_64
+	select HAVE_ARCH_KASAN_VMALLOC		if X86_64
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_MMAP_RND_BITS		if MMU
 	select HAVE_ARCH_MMAP_RND_COMPAT_BITS	if MMU && COMPAT
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 6c46095cd0d9..d722230121c3 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -340,8 +340,21 @@ static noinline int vmalloc_fault(unsigned long address)
 	pte_t *pte;
 
 	/* Make sure we are in vmalloc area: */
+#ifndef CONFIG_KASAN_VMALLOC
 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
 		return -1;
+#else
+	/*
+	 * Some of the shadow mapping for the vmalloc area lives outside the
+	 * pgds populated by kasan init. They are created dynamically and so
+	 * we may need to fault them in.
+	 *
+	 * You can observe this with test_vmalloc's align_shift_alloc_test
+	 */
+	if (!((address >= VMALLOC_START && address < VMALLOC_END) ||
+	      (address >= KASAN_SHADOW_START && address < KASAN_SHADOW_END)))
+		return -1;
+#endif
 
 	/*
 	 * Copy kernel mappings over when needed. This can also
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 296da58f3013..e2fe1c1b805c 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -352,9 +352,19 @@ void __init kasan_init(void)
 	shadow_cpu_entry_end = (void *)round_up(
 			(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
 
+	/*
+	 * If we're in full vmalloc mode, don't back vmalloc space with early
+	 * shadow pages.
+	 */
+#ifdef CONFIG_KASAN_VMALLOC
+	kasan_populate_early_shadow(
+		kasan_mem_to_shadow((void *)VMALLOC_END+1),
+		shadow_cpu_entry_begin);
+#else
 	kasan_populate_early_shadow(
 		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
 		shadow_cpu_entry_begin);
+#endif
 
 	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
 			      (unsigned long)shadow_cpu_entry_end, 0);
-- 
2.20.1




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux