Account slab allocations using codetag reference embedded into slabobj_ext. Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> Co-developed-by: Kent Overstreet <kent.overstreet@xxxxxxxxx> Signed-off-by: Kent Overstreet <kent.overstreet@xxxxxxxxx> --- include/linux/slab_def.h | 2 +- include/linux/slub_def.h | 4 ++-- mm/slab.c | 4 +++- mm/slab.h | 35 +++++++++++++++++++++++++++++++++++ 4 files changed, 41 insertions(+), 4 deletions(-) diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index a61e7d55d0d3..23f14dcb8d5b 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -107,7 +107,7 @@ static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *sla * reciprocal_divide(offset, cache->reciprocal_buffer_size) */ static inline unsigned int obj_to_index(const struct kmem_cache *cache, - const struct slab *slab, void *obj) + const struct slab *slab, const void *obj) { u32 offset = (obj - slab->s_mem); return reciprocal_divide(offset, cache->reciprocal_buffer_size); diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index f6df03f934e5..e8be5b368857 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -176,14 +176,14 @@ static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *sla /* Determine object index from a given position */ static inline unsigned int __obj_to_index(const struct kmem_cache *cache, - void *addr, void *obj) + void *addr, const void *obj) { return reciprocal_divide(kasan_reset_tag(obj) - addr, cache->reciprocal_size); } static inline unsigned int obj_to_index(const struct kmem_cache *cache, - const struct slab *slab, void *obj) + const struct slab *slab, const void *obj) { if (is_kfence_address(obj)) return 0; diff --git a/mm/slab.c b/mm/slab.c index ccc76f7455e9..026f0c08708a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3367,9 +3367,11 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, unsigned long caller) { + struct slab *slab = virt_to_slab(objp); bool init; - memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1); + memcg_slab_free_hook(cachep, slab, &objp, 1); + alloc_tagging_slab_free_hook(cachep, slab, &objp, 1); if (is_kfence_address(objp)) { kmemleak_free_recursive(objp, cachep->flags); diff --git a/mm/slab.h b/mm/slab.h index f953e7c81e98..f9442d3a10b2 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -494,6 +494,35 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) #endif /* CONFIG_SLAB_OBJ_EXT */ +#ifdef CONFIG_MEM_ALLOC_PROFILING + +static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, + void **p, int objects) +{ + struct slabobj_ext *obj_exts; + int i; + + if (!mem_alloc_profiling_enabled()) + return; + + obj_exts = slab_obj_exts(slab); + if (!obj_exts) + return; + + for (i = 0; i < objects; i++) { + unsigned int off = obj_to_index(s, slab, p[i]); + + alloc_tag_sub(&obj_exts[off].ref, s->size); + } +} + +#else + +static inline void alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, + void **p, int objects) {} + +#endif /* CONFIG_MEM_ALLOC_PROFILING */ + #ifdef CONFIG_MEMCG_KMEM void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, enum node_stat_item idx, int nr); @@ -776,6 +805,12 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, s->flags, flags); kmsan_slab_alloc(s, p[i], flags); obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]); + +#ifdef CONFIG_MEM_ALLOC_PROFILING + /* obj_exts can be allocated for other reasons */ + if (likely(obj_exts) && mem_alloc_profiling_enabled()) + alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); +#endif } memcg_slab_post_alloc_hook(s, objcg, flags, size, p); -- 2.40.1.495.gc816e09b53d-goog