slab_alloc() is a thin wrapper around slab_alloc_node() with only one caller. Replace with direct call of slab_alloc_node(). __kmem_cache_alloc_lru() itself is a thin wrapper with two callers, so replace it with direct calls of slab_alloc_node() and trace_kmem_cache_alloc(). This also makes sure _RET_IP_ has always the expected value and not depending on inlining decisions. Reviewed-by: Kees Cook <keescook@xxxxxxxxxxxx> Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> --- mm/slub.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index d6bc15929d22..5683f1d02e4f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3821,33 +3821,26 @@ static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list return object; } -static __fastpath_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru, - gfp_t gfpflags, unsigned long addr, size_t orig_size) -{ - return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size); -} - -static __fastpath_inline -void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, - gfp_t gfpflags) +void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size); + void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, + s->object_size); trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); return ret; } - -void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) -{ - return __kmem_cache_alloc_lru(s, NULL, gfpflags); -} EXPORT_SYMBOL(kmem_cache_alloc); void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags) { - return __kmem_cache_alloc_lru(s, lru, gfpflags); + void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_, + s->object_size); + + trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); + + return ret; } EXPORT_SYMBOL(kmem_cache_alloc_lru); -- 2.42.1