KMSAN is going to use 3/4 of existing vmalloc space to hold the metadata, therefore we lower VMALLOC_END to make sure vmalloc() doesn't allocate past the first 1/4. Signed-off-by: Alexander Potapenko <glider@xxxxxxxxxx> To: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Vegard Nossum <vegard.nossum@xxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: linux-mm@xxxxxxxxx --- Change-Id: Iaa5e8e0fc2aa66c956f937f5a1de6e5ef40d57cc --- arch/x86/include/asm/pgtable_64_types.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 52e5f5f2240d..586629e20436 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -139,7 +139,22 @@ extern unsigned int ptrs_per_p4d; # define VMEMMAP_START __VMEMMAP_BASE_L4 #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ +#ifndef CONFIG_KMSAN #define VMALLOC_END (VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1) +#else +/* + * In KMSAN builds vmalloc area is four times smaller, and the remaining 3/4 + * are used to keep the metadata for virtual pages. + */ +#define VMALLOC_QUARTER_SIZE ((VMALLOC_SIZE_TB << 40) >> 2) +#define VMALLOC_END (VMALLOC_START + VMALLOC_QUARTER_SIZE - 1) +#define VMALLOC_SHADOW_OFFSET VMALLOC_QUARTER_SIZE +#define VMALLOC_ORIGIN_OFFSET (VMALLOC_QUARTER_SIZE * 2) +#define VMALLOC_META_END (VMALLOC_END + VMALLOC_ORIGIN_OFFSET) +#define MODULES_SHADOW_START (VMALLOC_META_END + 1) +#define MODULES_ORIGIN_START (MODULES_SHADOW_START + MODULES_LEN) +#define MODULES_ORIGIN_END (MODULES_ORIGIN_START + MODULES_LEN) +#endif #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) /* The module sections ends with the start of the fixmap */ -- 2.23.0.866.gb869b98d4c-goog