Re: [PATCH 1/2] mm/page-alloc: Rename gfp_mask to gfp

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 1/24/21 1:03 PM, Matthew Wilcox (Oracle) wrote:
> Shorten some overly-long lines by renaming this identifier.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>

Acked-by: Vlastimil Babka <vbabka@xxxxxxx>

> ---
>  mm/page_alloc.c | 19 ++++++++++---------
>  1 file changed, 10 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index b031a5ae0bd5..d72ef706f6e6 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4963,7 +4963,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
>   * This is the 'heart' of the zoned buddy allocator.
>   */
>  struct page *
> -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
> +__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
>  							nodemask_t *nodemask)
>  {
>  	struct page *page;
> @@ -4976,20 +4976,21 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
>  	 * so bail out early if the request is out of bound.
>  	 */
>  	if (unlikely(order >= MAX_ORDER)) {
> -		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
> +		WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
>  		return NULL;
>  	}
>  
> -	gfp_mask &= gfp_allowed_mask;
> -	alloc_mask = gfp_mask;
> -	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
> +	gfp &= gfp_allowed_mask;
> +	alloc_mask = gfp;
> +	if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
> +					&alloc_mask, &alloc_flags))
>  		return NULL;
>  
>  	/*
>  	 * Forbid the first pass from falling back to types that fragment
>  	 * memory until all local zones are considered.
>  	 */
> -	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
> +	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
>  
>  	/* First allocation attempt */
>  	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
> @@ -5002,7 +5003,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
>  	 * from a particular context which has been marked by
>  	 * memalloc_no{fs,io}_{save,restore}.
>  	 */
> -	alloc_mask = current_gfp_context(gfp_mask);
> +	alloc_mask = current_gfp_context(gfp);
>  	ac.spread_dirty_pages = false;
>  
>  	/*
> @@ -5014,8 +5015,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
>  	page = __alloc_pages_slowpath(alloc_mask, order, &ac);
>  
>  out:
> -	if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
> -	    unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
> +	if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
> +	    unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
>  		__free_pages(page, order);
>  		page = NULL;
>  	}
> 





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux