As suggested by Linus, introduce a slab API function to memcg-charge a an object that was previously allocated without __GFP_ACCOUNT and from a cache that's not SLAB_ACCOUNT. This may be useful when it's likely the object is to be freed soon, and thus the charging/uncharging overhead can be avoided. In case kmem_cache_charge() is called on an already-charged object, it's a no-op. Suggested-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Link: https://lore.kernel.org/all/CAHk-=whYOOdM7jWy5jdrAm8LxcgCMFyk2bt8fYYvZzM4U-zAQA@xxxxxxxxxxxxxx/ Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> --- include/linux/slab.h | 10 ++++++++++ mm/slub.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/include/linux/slab.h b/include/linux/slab.h index b5f5ee8308d0..0c3acb2fa3e6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -491,6 +491,16 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_ali void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *s, void *objp); +#ifdef CONFIG_MEMCG_KMEM +int kmem_cache_charge(struct kmem_cache *s, gfp_t flags, void *objp); +#else +static inline int +kmem_cache_charge(struct kmem_cache *s, gfp_t flags, void *objp) +{ + return 0; +} +#endif + /* * Bulk allocation and freeing operations. These are accelerated in an diff --git a/mm/slub.c b/mm/slub.c index 64da169d672a..72b61b379ba1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4241,6 +4241,35 @@ void kmem_cache_free(struct kmem_cache *s, void *x) } EXPORT_SYMBOL(kmem_cache_free); +#ifdef CONFIG_MEMCG_KMEM +int kmem_cache_charge(struct kmem_cache *s, gfp_t flags, void *x) +{ + struct obj_cgroup ** objcg; + struct slab *slab; + + s = cache_from_obj(s, x); + if (!s) + return -EINVAL; + + if (likely(!memcg_kmem_online())) + return 0; + + /* was it already accounted? */ + slab = virt_to_slab(x); + if ((objcg = slab_objcgs(slab))) { + unsigned int off = obj_to_index(s, slab, x); + + if (objcg[off]) + return 0; + } + + if (!memcg_slab_post_alloc_hook(s, NULL, flags, 1, &x)) + return -ENOMEM; + + return 0; +} +#endif + static void free_large_kmalloc(struct folio *folio, void *object) { unsigned int order = folio_order(folio); -- 2.44.0