Re: [PATCH 03/10] mm: remove __vfree_deferred

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Jan 19, 2023 at 11:02:19AM +0100, Christoph Hellwig wrote:
> Fold __vfree_deferred into vfree_atomic, and call vfree_atomic early on
> from vfree if called from interrupt context so that the extra low-level
> helper can be avoided.
> 
> Signed-off-by: Christoph Hellwig <hch@xxxxxx>
> ---
>  mm/vmalloc.c | 43 +++++++++++++++++--------------------------
>  1 file changed, 17 insertions(+), 26 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index b989828b45109a..fafb6227f4428f 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2769,20 +2769,6 @@ static void __vunmap(const void *addr, int deallocate_pages)
>  	kfree(area);
>  }
>  
> -static inline void __vfree_deferred(const void *addr)
> -{
> -	/*
> -	 * Use raw_cpu_ptr() because this can be called from preemptible
> -	 * context. Preemption is absolutely fine here, because the llist_add()
> -	 * implementation is lockless, so it works even if we are adding to
> -	 * another cpu's list. schedule_work() should be fine with this too.
> -	 */
> -	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
> -
> -	if (llist_add((struct llist_node *)addr, &p->list))
> -		schedule_work(&p->wq);
> -}
> -
>  /**
>   * vfree_atomic - release memory allocated by vmalloc()
>   * @addr:	  memory base address
> @@ -2792,13 +2778,19 @@ static inline void __vfree_deferred(const void *addr)
>   */
>  void vfree_atomic(const void *addr)
>  {
> -	BUG_ON(in_nmi());
> +	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
>  
> +	BUG_ON(in_nmi());
>  	kmemleak_free(addr);
>  
> -	if (!addr)
> -		return;
> -	__vfree_deferred(addr);
> +	/*
> +	 * Use raw_cpu_ptr() because this can be called from preemptible
> +	 * context. Preemption is absolutely fine here, because the llist_add()
> +	 * implementation is lockless, so it works even if we are adding to
> +	 * another cpu's list. schedule_work() should be fine with this too.
> +	 */
> +	if (addr && llist_add((struct llist_node *)addr, &p->list))
> +		schedule_work(&p->wq);
>  }
>  
>  /**
> @@ -2820,17 +2812,16 @@ void vfree_atomic(const void *addr)
>   */
>  void vfree(const void *addr)
>  {
> -	BUG_ON(in_nmi());
> +	if (unlikely(in_interrupt())) {
> +		vfree_atomic(addr);
> +		return;
> +	}
>  
> +	BUG_ON(in_nmi());
>  	kmemleak_free(addr);
> +	might_sleep();
>  
> -	might_sleep_if(!in_interrupt());
> -
> -	if (!addr)
> -		return;
> -	if (unlikely(in_interrupt()))
> -		__vfree_deferred(addr);
> -	else
> +	if (addr)
>  		__vunmap(addr, 1);
>  }
>  EXPORT_SYMBOL(vfree);
> -- 
> 2.39.0
> 
Such folding makes sense to me.

Reviewed-by: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx>

--
Uladzislau Rezki




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux