This reverts commit ac801e7e252c5588325e3c983c7d4167fc68c024. The patch in question was picked to -mm from the KMSAN v6 patch series (https://lore.kernel.org/linux-mm/20220905122452.2258262-1-glider@xxxxxxxxxx/) and sneaked into mainline despite its removal from the v7 series (https://lore.kernel.org/linux-mm/20220915150417.722975-1-glider@xxxxxxxxxx/) Currently KMSAN does not warn about origin chains hitting the maximum depth, so keeping @tlb poisoned won't result in any inconveniences. Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Eric Biggers <ebiggers@xxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Marco Elver <elver@xxxxxxxxxx> Cc: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Alexander Potapenko <glider@xxxxxxxxxx> --- mm/mmu_gather.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index add4244e5790d..a71924bd38c0d 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -1,7 +1,6 @@ #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/kernel.h> -#include <linux/kmsan-checks.h> #include <linux/mmdebug.h> #include <linux/mm_types.h> #include <linux/mm_inline.h> @@ -266,15 +265,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb) static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) { - /* - * struct mmu_gather contains 7 1-bit fields packed into a 32-bit - * unsigned int value. The remaining 25 bits remain uninitialized - * and are never used, but KMSAN updates the origin for them in - * zap_pXX_range() in mm/memory.c, thus creating very long origin - * chains. This is technically correct, but consumes too much memory. - * Unpoisoning the whole structure will prevent creating such chains. - */ - kmsan_unpoison_memory(tlb, sizeof(*tlb)); tlb->mm = mm; tlb->fullmm = fullmm; -- 2.38.1.431.g37b22c650d-goog