Currently in CONFIG_SLAB init_on_free happens too late, and heap objects go to the heap quarantine being dirty. Lets move memory clearing before calling kasan_slab_free() to fix that. Signed-off-by: Alexander Popov <alex.popov@xxxxxxxxx> --- mm/slab.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 3160dff6fd76..5140203c5b76 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3414,6 +3414,9 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, unsigned long caller) { + if (unlikely(slab_want_init_on_free(cachep))) + memset(objp, 0, cachep->object_size); + /* Put the object into the quarantine, don't touch it for now. */ if (kasan_slab_free(cachep, objp, _RET_IP_)) return; @@ -3432,8 +3435,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp, struct array_cache *ac = cpu_cache_get(cachep); check_irq_off(); - if (unlikely(slab_want_init_on_free(cachep))) - memset(objp, 0, cachep->object_size); kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, caller); memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp); -- 2.26.2