Re: [RFC PATCH] mm, page_alloc: avoid page_to_pfn() in move_freepages()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed 27-11-19 18:28:00, Kefeng Wang wrote:
> The start_pfn and end_pfn are already available in move_freepages_block(),
> pfn_valid_within() should validate pfn first before touching the page,
> or we might access an unitialized page with CONFIG_HOLES_IN_ZONE configs.
> 
> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
> Cc: Michal Hocko <mhocko@xxxxxxxx>
> Cc: Vlastimil Babka <vbabka@xxxxxxx>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx>
> ---
> 
> Here is an oops in 4.4(arm64 enabled CONFIG_HOLES_IN_ZONE),

Is this reproducible with the current upstream kernel? There were large
changes in this aread since 4.4

Btw. the below should be part of the changelog.

> Unable to handle kernel NULL pointer dereference at virtual address 00000000
> pgd = ffffff8008f7e000
> [00000000] *pgd=0000000017ffe003, *pud=0000000017ffe003, *pmd=0000000000000000
> Internal error: Oops: 96000007 [#1] SMP
> CPU: 0 PID: 0 Comm: swapper/0 Tainted: G        W  O    4.4.185 #1
> 
> PC is at move_freepages+0x80/0x10c
> LR is at move_freepages_block+0xd4/0xf4
> pc : [<ffffff80083332e8>] lr : [<ffffff8008333448>] pstate: 80000085
> [...]
> [<ffffff80083332e8>] move_freepages+0x80/0x10c
> [<ffffff8008333448>] move_freepages_block+0xd4/0xf4
> [<ffffff8008335414>] __rmqueue+0x2bc/0x44c
> [<ffffff800833580c>] get_page_from_freelist+0x268/0x600
> [<ffffff8008335e84>] __alloc_pages_nodemask+0x184/0x88c
> [<ffffff800837fae8>] new_slab+0xd0/0x494
> [<ffffff8008381834>] ___slab_alloc.constprop.29+0x1c8/0x2e8
> [<ffffff80083819a8>] __slab_alloc.constprop.28+0x54/0x84
> [<ffffff8008381e68>] kmem_cache_alloc+0x64/0x198
> [<ffffff80085b04e0>] __build_skb+0x44/0xa4
> [<ffffff80085b06e4>] __netdev_alloc_skb+0xe4/0x134
> 
>  mm/page_alloc.c | 25 ++++++++++++-------------
>  1 file changed, 12 insertions(+), 13 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index f391c0c4ed1d..59f2c2b860fe 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -2246,19 +2246,21 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
>   * boundary. If alignment is required, use move_freepages_block()
>   */
>  static int move_freepages(struct zone *zone,
> -			  struct page *start_page, struct page *end_page,
> +			  unsigned long start_pfn, unsigned long end_pfn,
>  			  int migratetype, int *num_movable)
>  {
>  	struct page *page;
> +	unsigned long pfn;
>  	unsigned int order;
>  	int pages_moved = 0;
>  
> -	for (page = start_page; page <= end_page;) {
> -		if (!pfn_valid_within(page_to_pfn(page))) {
> -			page++;
> +	for (pfn = start_pfn; pfn <= end_pfn;) {
> +		if (!pfn_valid_within(pfn)) {
> +			pfn++;
>  			continue;
>  		}
>  
> +		page = pfn_to_page(pfn);
>  		if (!PageBuddy(page)) {
>  			/*
>  			 * We assume that pages that could be isolated for
> @@ -2268,8 +2270,7 @@ static int move_freepages(struct zone *zone,
>  			if (num_movable &&
>  					(PageLRU(page) || __PageMovable(page)))
>  				(*num_movable)++;
> -
> -			page++;
> +			pfn++;
>  			continue;
>  		}
>  
> @@ -2280,6 +2281,7 @@ static int move_freepages(struct zone *zone,
>  		order = page_order(page);
>  		move_to_free_area(page, &zone->free_area[order], migratetype);
>  		page += 1 << order;
> +		pfn += 1 << order;
>  		pages_moved += 1 << order;
>  	}
>  
> @@ -2289,25 +2291,22 @@ static int move_freepages(struct zone *zone,
>  int move_freepages_block(struct zone *zone, struct page *page,
>  				int migratetype, int *num_movable)
>  {
> -	unsigned long start_pfn, end_pfn;
> -	struct page *start_page, *end_page;
> +	unsigned long start_pfn, end_pfn, pfn;
>  
>  	if (num_movable)
>  		*num_movable = 0;
>  
> -	start_pfn = page_to_pfn(page);
> +	pfn = start_pfn = page_to_pfn(page);
>  	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
> -	start_page = pfn_to_page(start_pfn);
> -	end_page = start_page + pageblock_nr_pages - 1;
>  	end_pfn = start_pfn + pageblock_nr_pages - 1;
>  
>  	/* Do not cross zone boundaries */
>  	if (!zone_spans_pfn(zone, start_pfn))
> -		start_page = page;
> +		start_pfn = pfn;
>  	if (!zone_spans_pfn(zone, end_pfn))
>  		return 0;
>  
> -	return move_freepages(zone, start_page, end_page, migratetype,
> +	return move_freepages(zone, start_pfn, end_pfn, migratetype,
>  								num_movable);
>  }
>  
> -- 
> 2.20.1
> 

-- 
Michal Hocko
SUSE Labs




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux