From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Matches the change to the __alloc_pages_nodemask API. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- arch/x86/mm/init.c | 3 ++- arch/x86/mm/pgtable.c | 7 ++++--- drivers/base/devres.c | 2 +- include/linux/gfp.h | 6 +++--- mm/mmu_gather.c | 2 +- mm/page_alloc.c | 8 ++++---- 6 files changed, 15 insertions(+), 13 deletions(-) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index f905a2371080..963f30581291 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -94,7 +94,8 @@ __ref void *alloc_low_pages(unsigned int num) unsigned int order; order = get_order((unsigned long)num << PAGE_SHIFT); - return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); + return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO | + __GFP_ORDER(order)); } if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 7bd01709a091..3d3d13f859e5 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -401,8 +401,8 @@ static inline pgd_t *_pgd_alloc(void) * We allocate one page for pgd. */ if (!SHARED_KERNEL_PMD) - return (pgd_t *)__get_free_pages(PGALLOC_GFP, - PGD_ALLOCATION_ORDER); + return (pgd_t *)__get_free_pages(PGALLOC_GFP | + __GFP_ORDER(PGD_ALLOCATION_ORDER)); /* * Now PAE kernel is not running as a Xen domain. We can allocate @@ -422,7 +422,8 @@ static inline void _pgd_free(pgd_t *pgd) static inline pgd_t *_pgd_alloc(void) { - return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER); + return (pgd_t *)__get_free_pages(PGALLOC_GFP | + __GFP_ORDER(PGD_ALLOCATION_ORDER)); } static inline void _pgd_free(pgd_t *pgd) diff --git a/drivers/base/devres.c b/drivers/base/devres.c index e038e2b3b7ea..572e81282285 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -992,7 +992,7 @@ unsigned long devm_get_free_pages(struct device *dev, struct pages_devres *devres; unsigned long addr; - addr = __get_free_pages(gfp_mask, order); + addr = __get_free_pages(gfp_mask | __GFP_ORDER(order)); if (unlikely(!addr)) return 0; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index e7845c2510db..23fbd6da1fb6 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -536,7 +536,7 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, struct vm_area_struct *vma, #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ alloc_pages_vma(gfp_mask, vma, addr, node, false) -extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); +extern unsigned long __get_free_pages(gfp_t gfp_mask); extern unsigned long get_zeroed_page(gfp_t gfp_mask); void *alloc_pages_exact(size_t size, gfp_t gfp_mask); @@ -544,10 +544,10 @@ void free_pages_exact(void *virt, size_t size); void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); #define __get_free_page(gfp_mask) \ - __get_free_pages((gfp_mask), 0) + __get_free_pages(gfp_mask) #define __get_dma_pages(gfp_mask, order) \ - __get_free_pages((gfp_mask) | GFP_DMA, (order)) + __get_free_pages((gfp_mask) | GFP_DMA | __GFP_ORDER(order)) extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index f2f03c655807..d370621c8c5d 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -26,7 +26,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb) if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) return false; - batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); + batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (!batch) return false; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 13191fe2f19d..e26536825a0b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4681,11 +4681,11 @@ EXPORT_SYMBOL(__alloc_pages_nodemask); * address cannot represent highmem pages. Use alloc_pages and then kmap if * you need to access high mem. */ -unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) +unsigned long __get_free_pages(gfp_t gfp_mask) { struct page *page; - page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); + page = __alloc_pages(gfp_mask & ~__GFP_HIGHMEM, numa_mem_id()); if (!page) return 0; return (unsigned long) page_address(page); @@ -4694,7 +4694,7 @@ EXPORT_SYMBOL(__get_free_pages); unsigned long get_zeroed_page(gfp_t gfp_mask) { - return __get_free_pages(gfp_mask | __GFP_ZERO, 0); + return __get_free_page(gfp_mask | __GFP_ZERO); } EXPORT_SYMBOL(get_zeroed_page); @@ -4869,7 +4869,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) if (WARN_ON_ONCE(gfp_mask & __GFP_COMP)) gfp_mask &= ~__GFP_COMP; - addr = __get_free_pages(gfp_mask, order); + addr = __get_free_pages(gfp_mask | __GFP_ORDER(order)); return make_alloc_exact(addr, order, size); } EXPORT_SYMBOL(alloc_pages_exact); -- 2.20.1