Re: [RFC PATCH v1 01/15] mm/slab: cleanup slab_alloc() and slab_alloc_node()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 3/8/22 12:41, Hyeonggon Yoo wrote:
> +
>  static __always_inline void *
> -slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
> +		   unsigned long caller)
>  {
>  	unsigned long save_flags;
> -	void *objp;
> +	void *ptr;
> +	int slab_node = numa_mem_id();
>  	struct obj_cgroup *objcg = NULL;
>  	bool init = false;
>  
> @@ -3299,21 +3255,49 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned lo
>  	if (unlikely(!cachep))
>  		return NULL;
>  
> -	objp = kfence_alloc(cachep, orig_size, flags);
> -	if (unlikely(objp))
> -		goto out;
> +	ptr = kfence_alloc(cachep, orig_size, flags);
> +	if (unlikely(ptr))
> +		goto out_hooks;
>  
>  	cache_alloc_debugcheck_before(cachep, flags);
>  	local_irq_save(save_flags);
> -	objp = __do_cache_alloc(cachep, flags);

Looks like after this patch, slab_alloc() (without a node specified)
will not end up in __do_cache_alloc() anymore, so there's no more
possibility of alternate_node_alloc(), which looks like a functional
regression?

> +
> +	if (node_match(nodeid, slab_node)) {
> +		/*
> +		 * Use the locally cached objects if possible.
> +		 * However ____cache_alloc does not allow fallback
> +		 * to other nodes. It may fail while we still have
> +		 * objects on other nodes available.
> +		 */
> +		ptr = ____cache_alloc(cachep, flags);
> +		if (ptr)
> +			goto out;
> +	}
> +#ifdef CONFIG_NUMA
> +	else if (unlikely(!get_node(cachep, nodeid))) {
> +		/* Node not bootstrapped yet */
> +		ptr = fallback_alloc(cachep, flags);
> +		goto out;
> +	}
> +
> +	/* ___cache_alloc_node can fall back to other nodes */
> +	ptr = ____cache_alloc_node(cachep, flags, nodeid);
> +#endif
> +out:
>  	local_irq_restore(save_flags);
> -	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
> -	prefetchw(objp);
> +	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
> +	prefetchw(ptr);
>  	init = slab_want_init_on_alloc(flags, cachep);
>  
> -out:
> -	slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init);
> -	return objp;
> +out_hooks:
> +	slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr, init);
> +	return ptr;
> +}
> +
> +static __always_inline void *
> +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller)
> +{
> +	return slab_alloc_node(cachep, flags, NUMA_NO_NODE, orig_size, caller);
>  }
>  
>  /*





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux