Move allocation tagging specific code in the allocation path into alloc_tagging_slab_alloc_hook, similar to how freeing path uses alloc_tagging_slab_free_hook. No functional changes, just code cleanup. Suggested-by: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> --- Changes since v1 [1] - Moved entire profiling code portion from the slab_post_alloc_hook into alloc_tagging_slab_alloc_hook, per Vlastimil Babka - Moved alloc_tagging_slab_free_hook out of CONFIG_SLAB_OBJ_EXT section and into CONFIG_MEM_ALLOC_PROFILING one, per Vlastimil Babka [1] https://lore.kernel.org/all/20240703015354.3370503-1-surenb@xxxxxxxxxx/ mm/slub.c | 86 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 50 insertions(+), 36 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 4927edec6a8c..98c47ad7ceba 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2033,11 +2033,54 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) return slab_obj_exts(slab) + obj_to_index(s, slab, p); } +#else /* CONFIG_SLAB_OBJ_EXT */ + +static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, + gfp_t gfp, bool new_slab) +{ + return 0; +} + +static inline void free_slab_obj_exts(struct slab *slab) +{ +} + +static inline bool need_slab_obj_ext(void) +{ + return false; +} + +static inline struct slabobj_ext * +prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) +{ + return NULL; +} + +#endif /* CONFIG_SLAB_OBJ_EXT */ + +#ifdef CONFIG_MEM_ALLOC_PROFILING + +static inline void +alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) +{ + if (need_slab_obj_ext()) { + struct slabobj_ext *obj_exts; + + obj_exts = prepare_slab_obj_exts_hook(s, flags, object); + /* + * Currently obj_exts is used only for allocation profiling. + * If other users appear then mem_alloc_profiling_enabled() + * check should be added before alloc_tag_add(). + */ + if (likely(obj_exts)) + alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); + } +} + static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) { -#ifdef CONFIG_MEM_ALLOC_PROFILING struct slabobj_ext *obj_exts; int i; @@ -2053,30 +2096,13 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, alloc_tag_sub(&obj_exts[off].ref, s->size); } -#endif -} - -#else /* CONFIG_SLAB_OBJ_EXT */ - -static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, - gfp_t gfp, bool new_slab) -{ - return 0; -} - -static inline void free_slab_obj_exts(struct slab *slab) -{ } -static inline bool need_slab_obj_ext(void) -{ - return false; -} +#else /* CONFIG_MEM_ALLOC_PROFILING */ -static inline struct slabobj_ext * -prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) +static inline void +alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) { - return NULL; } static inline void @@ -2085,7 +2111,8 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, { } -#endif /* CONFIG_SLAB_OBJ_EXT */ +#endif /* CONFIG_MEM_ALLOC_PROFILING */ + #ifdef CONFIG_MEMCG_KMEM @@ -3944,20 +3971,7 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, kmemleak_alloc_recursive(p[i], s->object_size, 1, s->flags, init_flags); kmsan_slab_alloc(s, p[i], init_flags); -#ifdef CONFIG_MEM_ALLOC_PROFILING - if (need_slab_obj_ext()) { - struct slabobj_ext *obj_exts; - - obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]); - /* - * Currently obj_exts is used only for allocation profiling. - * If other users appear then mem_alloc_profiling_enabled() - * check should be added before alloc_tag_add(). - */ - if (likely(obj_exts)) - alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); - } -#endif + alloc_tagging_slab_alloc_hook(s, p[i], flags); } return memcg_slab_post_alloc_hook(s, lru, flags, size, p); base-commit: 795c58e4c7fc6163d8fb9f2baa86cfe898fa4b19 -- 2.45.2.803.g4e1b14247a-goog