Re: [PATCH] mm/vmalloc: Remove WARN_ON_ONCE related to adjust_va_to_fit_type

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 09/27/23 at 01:49pm, Uladzislau Rezki wrote:
> > > Yes, but GFP_NOWAIT-alloc-error can easily occur for low-memory device.
> > >
> > Agree. You are really in a low memory condition. We end up here only if
> > pre-loading also has not succeeded, i.e. GFP_KERNEL also fails.
> > 
> > But i agree with you, we should "improve the warning" because we drain
> > and repeat.
> > 
> > > How about changing fix as below?:
> > > 
> > > <snip>
> > > --- a/mm/vmalloc.c
> > > +++ b/mm/vmalloc.c
> > > @@ -1468,6 +1468,7 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
> > >                  */
> > >                 va->va_start = nva_start_addr + size;
> > >         } else {
> > > +               WARN_ON_ONCE(1);
> > >                 return -1;
> > >         }
> > >  
> > > @@ -1522,7 +1523,7 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
> > >  
> > >         /* Update the free vmap_area. */
> > >         ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
> > > -       if (WARN_ON_ONCE(ret))
> > > +       if (ret)
> > >                 return vend;
> > >  
> > >  #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
> > > @@ -4143,7 +4144,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
> > >                 ret = adjust_va_to_fit_type(&free_vmap_area_root,
> > >                                             &free_vmap_area_list,
> > >                                             va, start, size);
> > > -               if (WARN_ON_ONCE(unlikely(ret)))
> > > +               if (unlikely(ret))
> > >                         /* It is a BUG(), but trigger recovery instead. */
> > >                         goto recovery;
> > >  
> > > <snip>
> > > It will WARN_ONCE_ONCE() only if classify_va_fit_type() is "(type == NOTHING_FIT)".
> > > 
> > This is good but i think it should be improved further. We need to
> > understand from the warning when no memory and when there is no a
> > vmap space, so:
> > 
> > - if NOTHING_FIT, we should WARN() for sure;
> > - Second place in the pcpu_get_vm_area(), we do not use NE_FIT. Only in
> >   the begging after boot, but potentially we can trigger -ENOMEM and we
> >   should warn in this case. Otherwise you just hide it;
> > - And last one if after repeating we still do not manage to allocate.
> > 
> 
> We should understand a reason of failing. I think error handling should
> be improved. Something like:

This looks good to me, while the parameter 'error' looks a little ugly.
How about this?

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ef8599d394fd..32805c82373b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1454,7 +1454,7 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
 			 */
 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
 			if (!lva)
-				return -1;
+				return -ENOMEM;
 		}
 
 		/*
@@ -1468,7 +1468,7 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
 		 */
 		va->va_start = nva_start_addr + size;
 	} else {
-		return -1;
+		return -EINVAL;
 	}
 
 	if (type != FL_FIT_TYPE) {
@@ -1509,7 +1509,7 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
 
 	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
 	if (unlikely(!va))
-		return vend;
+		return -ENOENT;
 
 	if (va->va_start > vstart)
 		nva_start_addr = ALIGN(va->va_start, align);
@@ -1518,12 +1518,12 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
 
 	/* Check the "vend" restriction. */
 	if (nva_start_addr + size > vend)
-		return vend;
+		return -ERANGE;
 
 	/* Update the free vmap_area. */
 	ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
-	if (WARN_ON_ONCE(ret))
-		return vend;
+	if (ret)
+		return ret;
 
 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
 	find_vmap_lowest_match_check(root, head, size, align);
@@ -1616,13 +1616,13 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 		size, align, vstart, vend);
 	spin_unlock(&free_vmap_area_lock);
 
-	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
+	trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR(addr));
 
 	/*
-	 * If an allocation fails, the "vend" address is
+	 * If an allocation fails, the error value is
 	 * returned. Therefore trigger the overflow path.
 	 */
-	if (unlikely(addr == vend))
+	if (unlikely(IS_ERR(addr)))
 		goto overflow;
 
 	va->va_start = addr;
@@ -1662,8 +1662,9 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
 	}
 
 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
-		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
-			size);
+		pr_warn("vmap allocation for size %lu failed: "
+			"use vmalloc=<size> to increase size, errno: (%d)\n",
+			size, addr);
 
 	kmem_cache_free(vmap_area_cachep, va);
 	return ERR_PTR(-EBUSY);
@@ -4143,8 +4144,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 		ret = adjust_va_to_fit_type(&free_vmap_area_root,
 					    &free_vmap_area_list,
 					    va, start, size);
-		if (WARN_ON_ONCE(unlikely(ret)))
-			/* It is a BUG(), but trigger recovery instead. */
+		if ((unlikely(ret)))
+			WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret);
 			goto recovery;
 
 		/* Allocated area. */

> 
> <snip>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index ef8599d394fd..03a36921a3fc 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1454,7 +1454,7 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
>  			 */
>  			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
>  			if (!lva)
> -				return -1;
> +				return -ENOMEM;
>  		}
>  
>  		/*
> @@ -1468,7 +1468,7 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
>  		 */
>  		va->va_start = nva_start_addr + size;
>  	} else {
> -		return -1;
> +		return -EINVAL;
>  	}
>  
>  	if (type != FL_FIT_TYPE) {
> @@ -1488,7 +1488,8 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
>  static __always_inline unsigned long
>  __alloc_vmap_area(struct rb_root *root, struct list_head *head,
>  	unsigned long size, unsigned long align,
> -	unsigned long vstart, unsigned long vend)
> +	unsigned long vstart, unsigned long vend,
> +	int *error)
>  {
>  	bool adjust_search_size = true;
>  	unsigned long nva_start_addr;
> @@ -1508,8 +1509,10 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
>  		adjust_search_size = false;
>  
>  	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
> -	if (unlikely(!va))
> +	if (unlikely(!va)) {
> +		*error = -ENOENT;
>  		return vend;
> +	}
>  
>  	if (va->va_start > vstart)
>  		nva_start_addr = ALIGN(va->va_start, align);
> @@ -1517,13 +1520,17 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
>  		nva_start_addr = ALIGN(vstart, align);
>  
>  	/* Check the "vend" restriction. */
> -	if (nva_start_addr + size > vend)
> +	if (nva_start_addr + size > vend) {
> +		*error = -ERANGE;
>  		return vend;
> +	}
>  
>  	/* Update the free vmap_area. */
>  	ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
> -	if (WARN_ON_ONCE(ret))
> +	if (ret) {
> +		*error = ret;
>  		return vend;
> +	}
>  
>  #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
>  	find_vmap_lowest_match_check(root, head, size, align);
> @@ -1589,7 +1596,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  	unsigned long freed;
>  	unsigned long addr;
>  	int purged = 0;
> -	int ret;
> +	int ret, error;
>  
>  	if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
>  		return ERR_PTR(-EINVAL);
> @@ -1613,7 +1620,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  retry:
>  	preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
>  	addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
> -		size, align, vstart, vend);
> +		size, align, vstart, vend, &error);
>  	spin_unlock(&free_vmap_area_lock);
>  
>  	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
> @@ -1662,8 +1669,9 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  	}
>  
>  	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
> -		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
> -			size);
> +		pr_warn("vmap allocation for size %lu failed: "
> +			"use vmalloc=<size> to increase size, errno: (%d)\n",
> +			size, error);
>  
>  	kmem_cache_free(vmap_area_cachep, va);
>  	return ERR_PTR(-EBUSY);
> @@ -4143,9 +4151,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
>  		ret = adjust_va_to_fit_type(&free_vmap_area_root,
>  					    &free_vmap_area_list,
>  					    va, start, size);
> -		if (WARN_ON_ONCE(unlikely(ret)))
> -			/* It is a BUG(), but trigger recovery instead. */
> +		if (unlikely(ret)) {
> +			WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret);
>  			goto recovery;
> +		}
>  
>  		/* Allocated area. */
>  		va = vas[area];
> <snip>
> 
> Any thoughts?





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux