Move allocation tagging specific code in the allocation path into alloc_tagging_slab_alloc_hook, similar to how freeing path uses alloc_tagging_slab_free_hook. No functional changes, just code cleanup. Suggested-by: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> --- mm/slub.c | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 4927edec6a8c..99d53190cfcf 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2033,11 +2033,18 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) return slab_obj_exts(slab) + obj_to_index(s, slab, p); } +#ifdef CONFIG_MEM_ALLOC_PROFILING + +static inline void +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size) +{ + alloc_tag_add(&obj_exts->ref, current->alloc_tag, size); +} + static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) { -#ifdef CONFIG_MEM_ALLOC_PROFILING struct slabobj_ext *obj_exts; int i; @@ -2053,9 +2060,23 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, alloc_tag_sub(&obj_exts[off].ref, s->size); } -#endif } +#else /* CONFIG_MEM_ALLOC_PROFILING */ + +static inline void +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size) +{ +} + +static inline void +alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, + int objects) +{ +} + +#endif /* CONFIG_MEM_ALLOC_PROFILING*/ + #else /* CONFIG_SLAB_OBJ_EXT */ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, @@ -2079,6 +2100,11 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) return NULL; } +static inline void +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size) +{ +} + static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) @@ -3944,7 +3970,6 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, kmemleak_alloc_recursive(p[i], s->object_size, 1, s->flags, init_flags); kmsan_slab_alloc(s, p[i], init_flags); -#ifdef CONFIG_MEM_ALLOC_PROFILING if (need_slab_obj_ext()) { struct slabobj_ext *obj_exts; @@ -3955,9 +3980,8 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, * check should be added before alloc_tag_add(). */ if (likely(obj_exts)) - alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); + alloc_tagging_slab_alloc_hook(obj_exts, s->size); } -#endif } return memcg_slab_post_alloc_hook(s, lru, flags, size, p); base-commit: e9d22f7a6655941fc8b2b942ed354ec780936b3e -- 2.45.2.803.g4e1b14247a-goog