KMSAN assumes shadow and origin pages for every allocated page are accessible. For pages in vmalloc region those metadata pages reside in [VMALLOC_END, VMALLOC_META_END), therefore we must sync a bigger memory region. Signed-off-by: Alexander Potapenko <glider@xxxxxxxxxx> To: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxx> Cc: Vegard Nossum <vegard.nossum@xxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Cc: linux-mm@xxxxxxxxx --- Change-Id: I0d54855489870ef1180b37fe2120b601da464bf7 --- arch/x86/mm/fault.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index a51df516b87bf..d22e373fa2124 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -331,11 +331,21 @@ static void dump_pagetable(unsigned long address) void vmalloc_sync_mappings(void) { +#ifndef CONFIG_KMSAN /* * 64-bit mappings might allocate new p4d/pud pages * that need to be propagated to all tasks' PGDs. */ sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); +#else + /* + * For KMSAN, make sure metadata pages for vmalloc area and modules are + * also synced. + */ + sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_META_END); + sync_global_pgds(MODULES_SHADOW_START & PGDIR_MASK, + MODULES_ORIGIN_END); +#endif } void vmalloc_sync_unmappings(void) @@ -360,7 +370,17 @@ static noinline int vmalloc_fault(unsigned long address) pte_t *pte; /* Make sure we are in vmalloc area: */ +#ifdef CONFIG_KMSAN + /* + * For KMSAN, make sure metadata pages for vmalloc area and modules are + * also synced. + */ + if (!(address >= VMALLOC_START && address < VMALLOC_META_END) && + !(address >= MODULES_SHADOW_START && + address < MODULES_ORIGIN_END)) +#else if (!(address >= VMALLOC_START && address < VMALLOC_END)) +#endif return -1; /* -- 2.25.1.696.g5e7596f4ac-goog