To reduce overhead of printing tracepoint name, rename trace_kmem_cache_alloc_node to kmem_cache_alloc. Suggested-by: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx> --- include/trace/events/kmem.h | 4 ++-- mm/slab.c | 8 ++++---- mm/slab_common.c | 6 +++--- mm/slob.c | 22 +++++++++++----------- mm/slub.c | 16 ++++++++-------- 5 files changed, 28 insertions(+), 28 deletions(-) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index ca67ba5fd76a..58edb2e3e5a4 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -9,7 +9,7 @@ #include <linux/tracepoint.h> #include <trace/events/mmflags.h> -DECLARE_EVENT_CLASS(kmem_alloc_node, +DECLARE_EVENT_CLASS(kmem_alloc, TP_PROTO(const char *name, unsigned long call_site, @@ -51,7 +51,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __entry->node) ); -DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, +DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, TP_PROTO(const char *name, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, diff --git a/mm/slab.c b/mm/slab.c index b9959a6b5c48..424168b96790 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3448,7 +3448,7 @@ void *__kmem_cache_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, void *ret = slab_alloc_node(cachep, lru, flags, nodeid, cachep->object_size, caller); - trace_kmem_cache_alloc_node(cachep->name, caller, ret, + trace_kmem_cache_alloc(cachep->name, caller, ret, cachep->object_size, cachep->size, flags, nodeid); @@ -3519,9 +3519,9 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, ret = slab_alloc_node(cachep, NULL, flags, nodeid, size, _RET_IP_); ret = kasan_kmalloc(cachep, ret, size, flags); - trace_kmem_cache_alloc_node(cachep->name, _RET_IP_, ret, - size, cachep->size, - flags, nodeid); + trace_kmem_cache_alloc(cachep->name, _RET_IP_, ret, + size, cachep->size, + flags, nodeid); return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node_trace); diff --git a/mm/slab_common.c b/mm/slab_common.c index 3d1569085c54..3cd5d7a47ec7 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -957,8 +957,8 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) ptr = kasan_kmalloc_large(ptr, size, flags); /* As ptr might get tagged, call kmemleak hook after KASAN. */ kmemleak_alloc(ptr, size, 1, flags); - trace_kmem_cache_alloc_node(KMALLOC_LARGE_NAME, _RET_IP_, ptr, size, - PAGE_SIZE << order, flags, node); + trace_kmem_cache_alloc(KMALLOC_LARGE_NAME, _RET_IP_, ptr, size, + PAGE_SIZE << order, flags, node); return ptr; } EXPORT_SYMBOL(kmalloc_large_node); @@ -1291,7 +1291,7 @@ size_t ksize(const void *objp) EXPORT_SYMBOL(ksize); /* Tracepoints definitions. */ -EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node); +EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); int should_failslab(struct kmem_cache *s, gfp_t gfpflags) diff --git a/mm/slob.c b/mm/slob.c index b1f291128e94..1bb4c577b908 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -505,8 +505,8 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) *m = size; ret = (void *)m + minalign; - trace_kmem_cache_alloc_node(KMALLOC_NAME, caller, ret, - size, size + minalign, gfp, node); + trace_kmem_cache_alloc(KMALLOC_NAME, caller, ret, + size, size + minalign, gfp, node); } else { unsigned int order = get_order(size); @@ -514,9 +514,9 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) gfp |= __GFP_COMP; ret = slob_new_pages(gfp, order, node); - trace_kmem_cache_alloc_node(KMALLOC_LARGE_NAME, caller, - ret, size, PAGE_SIZE << order, - gfp, node); + trace_kmem_cache_alloc(KMALLOC_LARGE_NAME, caller, + ret, size, PAGE_SIZE << order, + gfp, node); } kmemleak_alloc(ret, size, 1, gfp); @@ -599,14 +599,14 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node, if (c->size < PAGE_SIZE) { b = slob_alloc(c->size, flags, c->align, node, 0); - trace_kmem_cache_alloc_node(c->name, caller, b, c->object_size, - SLOB_UNITS(c->size) * SLOB_UNIT, - flags, node); + trace_kmem_cache_alloc(c->name, caller, b, c->object_size, + SLOB_UNITS(c->size) * SLOB_UNIT, + flags, node); } else { b = slob_new_pages(flags, get_order(c->size), node); - trace_kmem_cache_alloc_node(c->name, caller, b, c->object_size, - PAGE_SIZE << get_order(c->size), - flags, node); + trace_kmem_cache_alloc(c->name, caller, b, c->object_size, + PAGE_SIZE << get_order(c->size), + flags, node); } if (b && c->ctor) { diff --git a/mm/slub.c b/mm/slub.c index d53e9e22d67e..a088d4fa1062 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3215,8 +3215,8 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, struct list_lru *lru, gfp_t { void *ret = slab_alloc_node(s, lru, gfpflags, node, caller, s->object_size); - trace_kmem_cache_alloc_node(s->name, caller, ret, s->object_size, - s->size, gfpflags, node); + trace_kmem_cache_alloc(s->name, caller, ret, s->object_size, + s->size, gfpflags, node); return ret; } @@ -3229,8 +3229,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, { void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); - trace_kmem_cache_alloc_node(s->name, _RET_IP_, ret, - size, s->size, gfpflags, node); + trace_kmem_cache_alloc(s->name, _RET_IP_, ret, + size, s->size, gfpflags, node); ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; @@ -4352,8 +4352,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size); - trace_kmem_cache_alloc_node(s->name, _RET_IP_, ret, size, - s->size, flags, node); + trace_kmem_cache_alloc(s->name, _RET_IP_, ret, size, + s->size, flags, node); ret = kasan_kmalloc(s, ret, size, flags); @@ -4815,8 +4815,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size); /* Honor the call site pointer we received. */ - trace_kmem_cache_alloc_node(s->name, caller, ret, size, - s->size, gfpflags, node); + trace_kmem_cache_alloc(s->name, caller, ret, size, + s->size, gfpflags, node); return ret; } -- 2.32.0