From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Matches the change to the __alloc_pages_nodemask API. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/mempolicy.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 0a22f106edb2..8d5375cdd928 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2006,12 +2006,11 @@ bool mempolicy_nodemask_intersects(struct task_struct *tsk, /* Allocate a page in interleaved policy. Own path because it needs to do special accounting. */ -static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, - unsigned nid) +static struct page *alloc_page_interleave(gfp_t gfp, unsigned nid) { struct page *page; - page = __alloc_pages(gfp | __GFP_ORDER(order), nid); + page = __alloc_pages(gfp, nid); /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ if (!static_branch_likely(&vm_numa_stat_key)) return page; @@ -2062,7 +2061,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); mpol_cond_put(pol); - page = alloc_page_interleave(gfp, order, nid); + page = alloc_page_interleave(gfp | __GFP_ORDER(order), nid); goto out; } @@ -2128,7 +2127,8 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) * nor system default_policy */ if (pol->mode == MPOL_INTERLEAVE) - page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); + page = alloc_page_interleave(gfp | __GFP_ORDER(order), + interleave_nodes(pol)); else page = __alloc_pages_nodemask(gfp | __GFP_ORDER(order), policy_node(gfp, pol, numa_node_id()), -- 2.20.1