From: Roman Bobniev <roman.bobniev@xxxxxxxxxxxxxx> When kmemleak checking is enabled and CONFIG_SLUB_DEBUG is disabled, the kmemleak code for small block allocation is disabled. This results in false kmemleak errors when memory is freed. Move the kmemleak code for small block allocation out from under CONFIG_SLUB_DEBUG. Signed-off-by: Roman Bobniev <roman.bobniev@xxxxxxxxxxxxxx> Signed-off-by: Frank Rowand <frank.rowand@xxxxxxxxxxxxxx> --- mm/slub.c | 6 3 + 3 - 0 ! 1 file changed, 3 insertions(+), 3 deletions(-) Index: b/mm/slub.c =================================================================== --- a/mm/slub.c +++ b/mm/slub.c @@ -947,13 +947,10 @@ static inline void slab_post_alloc_hook( { flags &= gfp_allowed_mask; kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); - kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); } static inline void slab_free_hook(struct kmem_cache *s, void *x) { - kmemleak_free_recursive(x, s->flags); - /* * Trouble is that we may no longer disable interupts in the fast path * So in order to make the debug calls that expect irqs to be @@ -2418,6 +2415,8 @@ redo: memset(object, 0, s->object_size); slab_post_alloc_hook(s, gfpflags, object); + kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, + gfpflags & gfp_allowed_mask); return object; } @@ -2614,6 +2613,7 @@ static __always_inline void slab_free(st struct kmem_cache_cpu *c; unsigned long tid; + kmemleak_free_recursive(x, s->flags); slab_free_hook(s, x); redo: -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>