Re: [PATCH v2] mm/slab: fix warning caused by duplicate kmem_cache creation in kmem_buckets_create

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Nov 05, 2024 at 11:27:47AM +0900, Koichiro Den wrote:
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 3d26c257ed8b..db6ffe53c23e 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -380,8 +380,11 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
>  				  unsigned int usersize,
>  				  void (*ctor)(void *))
>  {
> +	unsigned long mask = 0;
> +	unsigned int idx;
>  	kmem_buckets *b;
> -	int idx;
> +
> +	BUILD_BUG_ON(ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]) > BITS_PER_LONG);
>  
>  	/*
>  	 * When the separate buckets API is not built in, just return
> @@ -403,7 +406,7 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
>  	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
>  		char *short_size, *cache_name;
>  		unsigned int cache_useroffset, cache_usersize;
> -		unsigned int size;
> +		unsigned int size, aligned_idx;
>  
>  		if (!kmalloc_caches[KMALLOC_NORMAL][idx])
>  			continue;
> @@ -416,10 +419,6 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
>  		if (WARN_ON(!short_size))
>  			goto fail;
>  
> -		cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1);
> -		if (WARN_ON(!cache_name))
> -			goto fail;
> -
>  		if (useroffset >= size) {
>  			cache_useroffset = 0;
>  			cache_usersize = 0;
> @@ -427,18 +426,29 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
>  			cache_useroffset = useroffset;
>  			cache_usersize = min(size - cache_useroffset, usersize);
>  		}
> -		(*b)[idx] = kmem_cache_create_usercopy(cache_name, size,
> -					0, flags, cache_useroffset,
> -					cache_usersize, ctor);
> -		kfree(cache_name);
> -		if (WARN_ON(!(*b)[idx]))
> -			goto fail;
> +
> +		aligned_idx = __kmalloc_index(size, false);
> +		if (!(*b)[aligned_idx]) {
> +			cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1);
> +			if (WARN_ON(!cache_name))
> +				goto fail;
> +			(*b)[aligned_idx] = kmem_cache_create_usercopy(cache_name, size,
> +						0, flags, cache_useroffset,
> +						cache_usersize, ctor);
> +			if (WARN_ON(!(*b)[aligned_idx])) {
> +				kfree(cache_name);
> +				goto fail;
> +			}
> +			set_bit(aligned_idx, &mask);
> +		}
> +		if (idx != aligned_idx)
> +			(*b)[idx] = (*b)[aligned_idx];
>  	}

It looks fine. This pretty much matches the logic in new_kmalloc_cache()
(from commit 963e84b0f262).

>  	return b;
>  
>  fail:
> -	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++)
> +	for_each_set_bit(idx, &mask, ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]))
>  		kmem_cache_destroy((*b)[idx]);
>  	kmem_cache_free(kmem_buckets_cache, b);

I gave this a try with swiotlb=noforce as well (which pushed the minimum
alignment to 64). So:

Reviewed-by: Catalin Marinas <catalin.marinas@xxxxxxx>
Tested-by: Catalin Marinas <catalin.marinas@xxxxxxx>




[Index of Archives]     [Linux Kernel]     [Kernel Development Newbies]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite Hiking]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux