On 4/14/22 10:57, Hyeonggon Yoo wrote: > Move tracepoints into kmalloc_large_node() and add missing flag fix code. > > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> Hm so there's a problem with the tracepoint's caller. kmalloc_large() is only called from kmalloc() which is an inline thus the callsite of kmalloc() calls directly kmalloc_large(). So when kmalloc_large() does "trace_kmalloc(_RET_IP_, ...)" the _RET_IP_ is the callsite of kmalloc(), which is what we want. But with kmalloc_large_node()... > --- > mm/slab_common.c | 6 ++++++ > mm/slub.c | 22 ++++------------------ > 2 files changed, 10 insertions(+), 18 deletions(-) > > diff --git a/mm/slab_common.c b/mm/slab_common.c > index e72089515030..cf17be8cd9ad 100644 > --- a/mm/slab_common.c > +++ b/mm/slab_common.c > @@ -955,6 +955,9 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) > void *ptr = NULL; > unsigned int order = get_order(size); > > + if (unlikely(flags & GFP_SLAB_BUG_MASK)) > + flags = kmalloc_fix_flags(flags); > + > flags |= __GFP_COMP; > page = alloc_pages_node(node, flags, order); > if (page) { > @@ -966,6 +969,9 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) > ptr = kasan_kmalloc_large(ptr, size, flags); > /* As ptr might get tagged, call kmemleak hook after KASAN. */ > kmemleak_alloc(ptr, size, 1, flags); > + trace_kmalloc_node(_RET_IP_, ptr, > + size, PAGE_SIZE << order, > + flags, node); ... the _RET_IP_ here would be __kmalloc_node() which is not useful. > return ptr; > } > diff --git a/mm/slub.c b/mm/slub.c > index 640712706f2b..f10a892f1772 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -4396,15 +4396,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) > struct kmem_cache *s; > void *ret; > > - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { > - ret = kmalloc_large_node(size, flags, node); > - > - trace_kmalloc_node(_RET_IP_, ret, > - size, PAGE_SIZE << get_order(size), > - flags, node); Here it was OK because __kmalloc_node is expanded from something inline coming from slab.h. > - > - return ret; > - } > + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) > + return kmalloc_large_node(size, flags, node); > > s = kmalloc_slab(size, flags); > > @@ -4861,15 +4854,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, > struct kmem_cache *s; > void *ret; > > - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { > - ret = kmalloc_large_node(size, gfpflags, node); > - > - trace_kmalloc_node(caller, ret, > - size, PAGE_SIZE << get_order(size), > - gfpflags, node); > - > - return ret; > - } > + if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) > + return kmalloc_large_node(size, gfpflags, node); And here it even forgets the 'caller'. > > s = kmalloc_slab(size, gfpflags); >