Re: [PATCH RFC 1/4] mm, slab: move memcg charging to post-alloc hook

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 2024/3/2 01:07, Vlastimil Babka wrote:
> The MEMCG_KMEM integration with slab currently relies on two hooks
> during allocation. memcg_slab_pre_alloc_hook() determines the objcg and
> charges it, and memcg_slab_post_alloc_hook() assigns the objcg pointer
> to the allocated object(s).
> 
> As Linus pointed out, this is unnecessarily complex. Failing to charge
> due to memcg limits should be rare, so we can optimistically allocate
> the object(s) and do the charging together with assigning the objcg
> pointer in a single post_alloc hook. In the rare case the charging
> fails, we can free the object(s) back.
> 
> This simplifies the code (no need to pass around the objcg pointer) and
> potentially allows to separate charging from allocation in cases where
> it's common that the allocation would be immediately freed, and the
> memcg handling overhead could be saved.
> 
> Suggested-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
> Link: https://lore.kernel.org/all/CAHk-=whYOOdM7jWy5jdrAm8LxcgCMFyk2bt8fYYvZzM4U-zAQA@xxxxxxxxxxxxxx/
> Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx>

Nice!

Reviewed-by: Chengming Zhou <chengming.zhou@xxxxxxxxx>

Thanks.

> ---
>  mm/slub.c | 180 +++++++++++++++++++++++++++-----------------------------------
>  1 file changed, 77 insertions(+), 103 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 2ef88bbf56a3..7022a1246bab 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1897,23 +1897,36 @@ static inline size_t obj_full_size(struct kmem_cache *s)
>  	return s->size + sizeof(struct obj_cgroup *);
>  }
>  
> -/*
> - * Returns false if the allocation should fail.
> - */
> -static bool __memcg_slab_pre_alloc_hook(struct kmem_cache *s,
> -					struct list_lru *lru,
> -					struct obj_cgroup **objcgp,
> -					size_t objects, gfp_t flags)
> +static bool __memcg_slab_post_alloc_hook(struct kmem_cache *s,
> +					 struct list_lru *lru,
> +					 gfp_t flags, size_t size,
> +					 void **p)
>  {
> +	struct obj_cgroup *objcg;
> +	struct slab *slab;
> +	unsigned long off;
> +	size_t i;
> +
>  	/*
>  	 * The obtained objcg pointer is safe to use within the current scope,
>  	 * defined by current task or set_active_memcg() pair.
>  	 * obj_cgroup_get() is used to get a permanent reference.
>  	 */
> -	struct obj_cgroup *objcg = current_obj_cgroup();
> +	objcg = current_obj_cgroup();
>  	if (!objcg)
>  		return true;
>  
> +	/*
> +	 * slab_alloc_node() avoids the NULL check, so we might be called with a
> +	 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
> +	 * the whole requested size.
> +	 * return success as there's nothing to free back
> +	 */
> +	if (unlikely(*p == NULL))
> +		return true;
> +
> +	flags &= gfp_allowed_mask;
> +
>  	if (lru) {
>  		int ret;
>  		struct mem_cgroup *memcg;
> @@ -1926,71 +1939,51 @@ static bool __memcg_slab_pre_alloc_hook(struct kmem_cache *s,
>  			return false;
>  	}
>  
> -	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
> +	if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
>  		return false;
>  
> -	*objcgp = objcg;
> +	for (i = 0; i < size; i++) {
> +		slab = virt_to_slab(p[i]);
> +
> +		if (!slab_objcgs(slab) &&
> +		    memcg_alloc_slab_cgroups(slab, s, flags, false)) {
> +			obj_cgroup_uncharge(objcg, obj_full_size(s));
> +			continue;
> +		}
> +
> +		off = obj_to_index(s, slab, p[i]);
> +		obj_cgroup_get(objcg);
> +		slab_objcgs(slab)[off] = objcg;
> +		mod_objcg_state(objcg, slab_pgdat(slab),
> +				cache_vmstat_idx(s), obj_full_size(s));
> +	}
> +
>  	return true;
>  }
>  
> -/*
> - * Returns false if the allocation should fail.
> - */
> +static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
> +
>  static __fastpath_inline
> -bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
> -			       struct obj_cgroup **objcgp, size_t objects,
> -			       gfp_t flags)
> +bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
> +				gfp_t flags, size_t size, void **p)
>  {
> -	if (!memcg_kmem_online())
> +	if (likely(!memcg_kmem_online()))
>  		return true;
>  
>  	if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
>  		return true;
>  
> -	return likely(__memcg_slab_pre_alloc_hook(s, lru, objcgp, objects,
> -						  flags));
> -}
> -
> -static void __memcg_slab_post_alloc_hook(struct kmem_cache *s,
> -					 struct obj_cgroup *objcg,
> -					 gfp_t flags, size_t size,
> -					 void **p)
> -{
> -	struct slab *slab;
> -	unsigned long off;
> -	size_t i;
> -
> -	flags &= gfp_allowed_mask;
> -
> -	for (i = 0; i < size; i++) {
> -		if (likely(p[i])) {
> -			slab = virt_to_slab(p[i]);
> -
> -			if (!slab_objcgs(slab) &&
> -			    memcg_alloc_slab_cgroups(slab, s, flags, false)) {
> -				obj_cgroup_uncharge(objcg, obj_full_size(s));
> -				continue;
> -			}
> +	if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p)))
> +		return true;
>  
> -			off = obj_to_index(s, slab, p[i]);
> -			obj_cgroup_get(objcg);
> -			slab_objcgs(slab)[off] = objcg;
> -			mod_objcg_state(objcg, slab_pgdat(slab),
> -					cache_vmstat_idx(s), obj_full_size(s));
> -		} else {
> -			obj_cgroup_uncharge(objcg, obj_full_size(s));
> -		}
> +	if (likely(size == 1)) {
> +		memcg_alloc_abort_single(s, p);
> +		*p = NULL;
> +	} else {
> +		kmem_cache_free_bulk(s, size, p);
>  	}
> -}
> -
> -static __fastpath_inline
> -void memcg_slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
> -				gfp_t flags, size_t size, void **p)
> -{
> -	if (likely(!memcg_kmem_online() || !objcg))
> -		return;
>  
> -	return __memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
> +	return false;
>  }
>  
>  static void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> @@ -2029,14 +2022,6 @@ void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
>  
>  	__memcg_slab_free_hook(s, slab, p, objects, objcgs);
>  }
> -
> -static inline
> -void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
> -			   struct obj_cgroup *objcg)
> -{
> -	if (objcg)
> -		obj_cgroup_uncharge(objcg, objects * obj_full_size(s));
> -}
>  #else /* CONFIG_MEMCG_KMEM */
>  static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
>  {
> @@ -2047,31 +2032,18 @@ static inline void memcg_free_slab_cgroups(struct slab *slab)
>  {
>  }
>  
> -static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
> -					     struct list_lru *lru,
> -					     struct obj_cgroup **objcgp,
> -					     size_t objects, gfp_t flags)
> -{
> -	return true;
> -}
> -
> -static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
> -					      struct obj_cgroup *objcg,
> +static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
> +					      struct list_lru *lru,
>  					      gfp_t flags, size_t size,
>  					      void **p)
>  {
> +	return true;
>  }
>  
>  static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
>  					void **p, int objects)
>  {
>  }
> -
> -static inline
> -void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
> -				 struct obj_cgroup *objcg)
> -{
> -}
>  #endif /* CONFIG_MEMCG_KMEM */
>  
>  /*
> @@ -3751,10 +3723,7 @@ noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
>  ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
>  
>  static __fastpath_inline
> -struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
> -				       struct list_lru *lru,
> -				       struct obj_cgroup **objcgp,
> -				       size_t size, gfp_t flags)
> +struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
>  {
>  	flags &= gfp_allowed_mask;
>  
> @@ -3763,14 +3732,11 @@ struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
>  	if (unlikely(should_failslab(s, flags)))
>  		return NULL;
>  
> -	if (unlikely(!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)))
> -		return NULL;
> -
>  	return s;
>  }
>  
>  static __fastpath_inline
> -void slab_post_alloc_hook(struct kmem_cache *s,	struct obj_cgroup *objcg,
> +bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
>  			  gfp_t flags, size_t size, void **p, bool init,
>  			  unsigned int orig_size)
>  {
> @@ -3819,7 +3785,7 @@ void slab_post_alloc_hook(struct kmem_cache *s,	struct obj_cgroup *objcg,
>  		kmsan_slab_alloc(s, p[i], init_flags);
>  	}
>  
> -	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
> +	return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
>  }
>  
>  /*
> @@ -3836,10 +3802,9 @@ static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list
>  		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
>  {
>  	void *object;
> -	struct obj_cgroup *objcg = NULL;
>  	bool init = false;
>  
> -	s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
> +	s = slab_pre_alloc_hook(s, gfpflags);
>  	if (unlikely(!s))
>  		return NULL;
>  
> @@ -3856,8 +3821,10 @@ static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list
>  	/*
>  	 * When init equals 'true', like for kzalloc() family, only
>  	 * @orig_size bytes might be zeroed instead of s->object_size
> +	 * In case this fails due to memcg_slab_post_alloc_hook(),
> +	 * object is set to NULL
>  	 */
> -	slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size);
> +	slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size);
>  
>  	return object;
>  }
> @@ -4300,6 +4267,16 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
>  		do_slab_free(s, slab, object, object, 1, addr);
>  }
>  
> +#ifdef CONFIG_MEMCG_KMEM
> +/* Do not inline the rare memcg charging failed path into the allocation path */
> +static noinline
> +void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
> +{
> +	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
> +		do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_);
> +}
> +#endif
> +
>  static __fastpath_inline
>  void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
>  		    void *tail, void **p, int cnt, unsigned long addr)
> @@ -4635,29 +4612,26 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
>  			  void **p)
>  {
>  	int i;
> -	struct obj_cgroup *objcg = NULL;
>  
>  	if (!size)
>  		return 0;
>  
> -	/* memcg and kmem_cache debug support */
> -	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
> +	s = slab_pre_alloc_hook(s, flags);
>  	if (unlikely(!s))
>  		return 0;
>  
>  	i = __kmem_cache_alloc_bulk(s, flags, size, p);
> +	if (unlikely(i == 0))
> +		return 0;
>  
>  	/*
>  	 * memcg and kmem_cache debug support and memory initialization.
>  	 * Done outside of the IRQ disabled fastpath loop.
>  	 */
> -	if (likely(i != 0)) {
> -		slab_post_alloc_hook(s, objcg, flags, size, p,
> -			slab_want_init_on_alloc(flags, s), s->object_size);
> -	} else {
> -		memcg_slab_alloc_error_hook(s, size, objcg);
> +	if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p,
> +		    slab_want_init_on_alloc(flags, s), s->object_size))) {
> +		return 0;
>  	}
> -
>  	return i;
>  }
>  EXPORT_SYMBOL(kmem_cache_alloc_bulk);
> 





[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [NTFS 3]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [NTFS 3]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux