This is a hack to reduce stackdepot pressure. struct mmu_gather contains 7 1-bit fields packed into a 32-bit unsigned int value. The remaining 25 bits remain uninitialized and are never used, but KMSAN updates the origin for them in zap_pXX_range() in mm/memory.c, thus creating very long origin chains. This is technically correct, but consumes too much memory. Unpoisoning the whole structure will prevent creating such chains. Signed-off-by: Alexander Potapenko <glider@xxxxxxxxxx> To: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Vegard Nossum <vegard.nossum@xxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Cc: linux-mm@xxxxxxxxx --- v4: - removed a TODO, updated patch description Change-Id: I22a201e7e4f67ed74f8129072f12e5351b26103a --- mm/mmu_gather.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index a3538cb2bcbee..d3d57c276e301 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -1,6 +1,7 @@ #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/kernel.h> +#include <linux/kmsan-checks.h> #include <linux/mmdebug.h> #include <linux/mm_types.h> #include <linux/pagemap.h> @@ -264,6 +265,15 @@ void tlb_flush_mmu(struct mmu_gather *tlb) void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { + /* + * struct mmu_gather contains 7 1-bit fields packed into a 32-bit + * unsigned int value. The remaining 25 bits remain uninitialized + * and are never used, but KMSAN updates the origin for them in + * zap_pXX_range() in mm/memory.c, thus creating very long origin + * chains. This is technically correct, but consumes too much memory. + * Unpoisoning the whole structure will prevent creating such chains. + */ + kmsan_unpoison_shadow(tlb, sizeof(*tlb)); tlb->mm = mm; /* Is it from 0 to ~0? */ -- 2.25.1.696.g5e7596f4ac-goog