Re: [PATCH v2 07/15] slub: pull kmem_cache_open() into do_kmem_cache_create()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Sep 03, 2024 at 04:20:48PM +0200, Christian Brauner wrote:
> do_kmem_cache_create() is the only caller and we're going to pass down
> struct kmem_cache_args in a follow-up patch.
> 
> Signed-off-by: Christian Brauner <brauner@xxxxxxxxxx>

Error handling in kmem_cache_open begs for improvement, but that's not
related to this patch.

Reviewed-by: Mike Rapoport (Microsoft) <rppt@xxxxxxxxxx>

> ---
>  mm/slub.c | 132 +++++++++++++++++++++++++++++---------------------------------
>  1 file changed, 62 insertions(+), 70 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 23d9d783ff26..30f4ca6335c7 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -5290,65 +5290,6 @@ static int calculate_sizes(struct kmem_cache *s)
>  	return !!oo_objects(s->oo);
>  }
>  
> -static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
> -{
> -	s->flags = kmem_cache_flags(flags, s->name);
> -#ifdef CONFIG_SLAB_FREELIST_HARDENED
> -	s->random = get_random_long();
> -#endif
> -
> -	if (!calculate_sizes(s))
> -		goto error;
> -	if (disable_higher_order_debug) {
> -		/*
> -		 * Disable debugging flags that store metadata if the min slab
> -		 * order increased.
> -		 */
> -		if (get_order(s->size) > get_order(s->object_size)) {
> -			s->flags &= ~DEBUG_METADATA_FLAGS;
> -			s->offset = 0;
> -			if (!calculate_sizes(s))
> -				goto error;
> -		}
> -	}
> -
> -#ifdef system_has_freelist_aba
> -	if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
> -		/* Enable fast mode */
> -		s->flags |= __CMPXCHG_DOUBLE;
> -	}
> -#endif
> -
> -	/*
> -	 * The larger the object size is, the more slabs we want on the partial
> -	 * list to avoid pounding the page allocator excessively.
> -	 */
> -	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
> -	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
> -
> -	set_cpu_partial(s);
> -
> -#ifdef CONFIG_NUMA
> -	s->remote_node_defrag_ratio = 1000;
> -#endif
> -
> -	/* Initialize the pre-computed randomized freelist if slab is up */
> -	if (slab_state >= UP) {
> -		if (init_cache_random_seq(s))
> -			goto error;
> -	}
> -
> -	if (!init_kmem_cache_nodes(s))
> -		goto error;
> -
> -	if (alloc_kmem_cache_cpus(s))
> -		return 0;
> -
> -error:
> -	__kmem_cache_release(s);
> -	return -EINVAL;
> -}
> -
>  static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
>  			      const char *text)
>  {
> @@ -5904,26 +5845,77 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
>  
>  int do_kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
>  {
> -	int err;
> +	int err = -EINVAL;
>  
> -	err = kmem_cache_open(s, flags);
> -	if (err)
> -		return err;
> +	s->flags = kmem_cache_flags(flags, s->name);
> +#ifdef CONFIG_SLAB_FREELIST_HARDENED
> +	s->random = get_random_long();
> +#endif
> +
> +	if (!calculate_sizes(s))
> +		goto out;
> +	if (disable_higher_order_debug) {
> +		/*
> +		 * Disable debugging flags that store metadata if the min slab
> +		 * order increased.
> +		 */
> +		if (get_order(s->size) > get_order(s->object_size)) {
> +			s->flags &= ~DEBUG_METADATA_FLAGS;
> +			s->offset = 0;
> +			if (!calculate_sizes(s))
> +				goto out;
> +		}
> +	}
> +
> +#ifdef system_has_freelist_aba
> +	if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
> +		/* Enable fast mode */
> +		s->flags |= __CMPXCHG_DOUBLE;
> +	}
> +#endif
> +
> +	/*
> +	 * The larger the object size is, the more slabs we want on the partial
> +	 * list to avoid pounding the page allocator excessively.
> +	 */
> +	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
> +	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
> +
> +	set_cpu_partial(s);
> +
> +#ifdef CONFIG_NUMA
> +	s->remote_node_defrag_ratio = 1000;
> +#endif
> +
> +	/* Initialize the pre-computed randomized freelist if slab is up */
> +	if (slab_state >= UP) {
> +		if (init_cache_random_seq(s))
> +			goto out;
> +	}
> +
> +	if (!init_kmem_cache_nodes(s))
> +		goto out;
> +
> +	if (!alloc_kmem_cache_cpus(s))
> +		goto out;
>  
>  	/* Mutex is not taken during early boot */
> -	if (slab_state <= UP)
> -		return 0;
> +	if (slab_state <= UP) {
> +		err = 0;
> +		goto out;
> +	}
>  
>  	err = sysfs_slab_add(s);
> -	if (err) {
> -		__kmem_cache_release(s);
> -		return err;
> -	}
> +	if (err)
> +		goto out;
>  
>  	if (s->flags & SLAB_STORE_USER)
>  		debugfs_slab_add(s);
>  
> -	return 0;
> +out:
> +	if (err)
> +		__kmem_cache_release(s);
> +	return err;
>  }
>  
>  #ifdef SLAB_SUPPORTS_SYSFS
> 
> -- 
> 2.45.2
> 

-- 
Sincerely yours,
Mike.




[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [NTFS 3]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [NTFS 3]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux