Avoid false KMSAN negatives with SLUB_DEBUG by allowing kmsan_slab_free() to poison the freed memory, and by preventing init_object() from unpoisoning new allocations. Signed-off-by: Ilya Leoshkevich <iii@xxxxxxxxxxxxx> --- mm/kmsan/hooks.c | 2 +- mm/slub.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c index 7b5814412e9f..7a30274b893c 100644 --- a/mm/kmsan/hooks.c +++ b/mm/kmsan/hooks.c @@ -76,7 +76,7 @@ void kmsan_slab_free(struct kmem_cache *s, void *object) return; /* RCU slabs could be legally used after free within the RCU period */ - if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))) + if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) return; /* * If there's a constructor, freed memory must remain in the same state diff --git a/mm/slub.c b/mm/slub.c index 63d281dfacdb..8d9aa4d7cb7e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1024,7 +1024,8 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } -static void init_object(struct kmem_cache *s, void *object, u8 val) +__no_sanitize_memory static void +init_object(struct kmem_cache *s, void *object, u8 val) { u8 *p = kasan_reset_tag(object); unsigned int poison_size = s->object_size; -- 2.41.0