KMSAN assumes shadow and origin pages for every allocated page are accessible. For pages in vmalloc region those metadata pages reside in [VMALLOC_END, VMALLOC_META_END), therefore we must sync a bigger memory region. Signed-off-by: Alexander Potapenko <glider@xxxxxxxxxx> To: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxx> Cc: Vegard Nossum <vegard.nossum@xxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: linux-mm@xxxxxxxxx --- Change-Id: I0d54855489870ef1180b37fe2120b601da464bf7 --- arch/x86/mm/fault.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9ceacd1156db..d582337ba45d 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -320,7 +320,17 @@ static void dump_pagetable(unsigned long address) void vmalloc_sync_all(void) { +#ifdef CONFIG_KMSAN + /* + * For KMSAN, make sure metadata pages for vmalloc area and modules are + * also synced. + */ + sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_META_END); + sync_global_pgds(MODULES_SHADOW_START & PGDIR_MASK, + MODULES_ORIGIN_END); +#else sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); +#endif } /* @@ -337,7 +347,17 @@ static noinline int vmalloc_fault(unsigned long address) pte_t *pte; /* Make sure we are in vmalloc area: */ +#ifdef CONFIG_KMSAN + /* + * For KMSAN, make sure metadata pages for vmalloc area and modules are + * also synced. + */ + if (!(address >= VMALLOC_START && address < VMALLOC_META_END) && + !(address >= MODULES_SHADOW_START && + address < MODULES_ORIGIN_END)) +#else if (!(address >= VMALLOC_START && address < VMALLOC_END)) +#endif return -1; /* -- 2.24.0.rc0.303.g954a862665-goog