From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Matches the change to the __alloc_pages_nodemask API. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/gfp.h | 8 +++----- mm/mempolicy.c | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c466b08df0ec..9ddc7703ea81 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -478,11 +478,9 @@ static inline void arch_alloc_page(struct page *page, int order) { } struct page * __alloc_pages_nodemask(gfp_t gfp_mask, int preferred_nid, nodemask_t *nodemask); -static inline struct page * -__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) +static inline struct page *__alloc_pages(gfp_t gfp, int preferred_nid) { - return __alloc_pages_nodemask(gfp_mask | __GFP_ORDER(order), - preferred_nid, NULL); + return __alloc_pages_nodemask(gfp, preferred_nid, NULL); } /* @@ -495,7 +493,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); - return __alloc_pages(gfp_mask, order, nid); + return __alloc_pages(gfp_mask | __GFP_ORDER(order), nid); } /* diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 310ad69effdd..0a22f106edb2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2011,7 +2011,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, { struct page *page; - page = __alloc_pages(gfp, order, nid); + page = __alloc_pages(gfp | __GFP_ORDER(order), nid); /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ if (!static_branch_likely(&vm_numa_stat_key)) return page; -- 2.20.1