From: Andrey Konovalov <andreyknvl@xxxxxxxxxx> commit fdb54d96600aafe45951f549866cd6fc1af59954 upstream. Commit 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated kmalloc space than requested") added precise kmalloc redzone poisoning to the slub_debug functionality. However, this commit didn't account for HW_TAGS KASAN fully initializing the object via its built-in memory initialization feature. Even though HW_TAGS KASAN memory initialization contains special memory initialization handling for when slub_debug is enabled, it does not account for in-object slub_debug redzones. As a result, HW_TAGS KASAN can overwrite these redzones and cause false-positive slub_debug reports. To fix the issue, avoid HW_TAGS KASAN memory initialization when slub_debug is enabled altogether. Implement this by moving the __slub_debug_enabled check to slab_post_alloc_hook. Common slab code seems like a more appropriate place for a slub_debug check anyway. Link: https://lkml.kernel.org/r/678ac92ab790dba9198f9ca14f405651b97c8502.1688561016.git.andreyknvl@xxxxxxxxxx Fixes: 946fa0dbf2d8 ("mm/slub: extend redzone check to extra allocated kmalloc space than requested") Signed-off-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> Reported-by: Will Deacon <will@xxxxxxxxxx> Acked-by: Marco Elver <elver@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Andrey Ryabinin <ryabinin.a.a@xxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Feng Tang <feng.tang@xxxxxxxxx> Cc: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: kasan-dev@xxxxxxxxxxxxxxxx Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: Peter Collingbourne <pcc@xxxxxxxxxx> Cc: Roman Gushchin <roman.gushchin@xxxxxxxxx> Cc: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: <stable@xxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- mm/kasan/kasan.h | 12 ------------ mm/slab.h | 16 ++++++++++++++-- 2 files changed, 14 insertions(+), 14 deletions(-) --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -466,18 +466,6 @@ static inline void kasan_unpoison(const if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; - /* - * Explicitly initialize the memory with the precise object size to - * avoid overwriting the slab redzone. This disables initialization in - * the arch code and may thus lead to performance penalty. This penalty - * does not affect production builds, as slab redzones are not enabled - * there. - */ - if (__slub_debug_enabled() && - init && ((unsigned long)size & KASAN_GRANULE_MASK)) { - init = false; - memzero_explicit((void *)addr, size); - } size = round_up(size, KASAN_GRANULE_SIZE); hw_set_mem_tag_range((void *)addr, size, tag, init); --- a/mm/slab.h +++ b/mm/slab.h @@ -684,6 +684,7 @@ static inline void slab_post_alloc_hook( unsigned int orig_size) { unsigned int zero_size = s->object_size; + bool kasan_init = init; size_t i; flags &= gfp_allowed_mask; @@ -701,6 +702,17 @@ static inline void slab_post_alloc_hook( zero_size = orig_size; /* + * When slub_debug is enabled, avoid memory initialization integrated + * into KASAN and instead zero out the memory via the memset below with + * the proper size. Otherwise, KASAN might overwrite SLUB redzones and + * cause false-positive reports. This does not lead to a performance + * penalty on production builds, as slub_debug is not intended to be + * enabled there. + */ + if (__slub_debug_enabled()) + kasan_init = false; + + /* * As memory initialization might be integrated into KASAN, * kasan_slab_alloc and initialization memset must be * kept together to avoid discrepancies in behavior. @@ -708,8 +720,8 @@ static inline void slab_post_alloc_hook( * As p[i] might get tagged, memset and kmemleak hook come after KASAN. */ for (i = 0; i < size; i++) { - p[i] = kasan_slab_alloc(s, p[i], flags, init); - if (p[i] && init && !kasan_has_integrated_init()) + p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init); + if (p[i] && init && (!kasan_init || !kasan_has_integrated_init())) memset(p[i], 0, zero_size); kmemleak_alloc_recursive(p[i], s->object_size, 1, s->flags, flags);