From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Matches the change to the __alloc_pages_nodemask API. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/page_alloc.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4705d0e7cf6f..cf71547be903 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3482,13 +3482,14 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) * a page. */ static struct page * -get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, - const struct alloc_context *ac) +get_page_from_freelist(gfp_t gfp_mask, int alloc_flags, + const struct alloc_context *ac) { struct zoneref *z; struct zone *zone; struct pglist_data *last_pgdat_dirty_limit = NULL; bool no_fallback; + unsigned int order = gfp_order(gfp_mask); retry: /* @@ -3684,15 +3685,13 @@ __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, { struct page *page; - page = get_page_from_freelist(gfp_mask, order, - alloc_flags|ALLOC_CPUSET, ac); + page = get_page_from_freelist(gfp_mask, alloc_flags|ALLOC_CPUSET, ac); /* * fallback to ignore cpuset restriction if our nodes * are depleted */ if (!page) - page = get_page_from_freelist(gfp_mask, order, - alloc_flags, ac); + page = get_page_from_freelist(gfp_mask, alloc_flags, ac); return page; } @@ -3730,7 +3729,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, * allocation which will never fail due to oom_lock already held. */ page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & - ~__GFP_DIRECT_RECLAIM, order, + ~__GFP_DIRECT_RECLAIM, ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); if (page) goto out; @@ -3831,7 +3830,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, /* Try get a page from the freelist if available */ if (!page) - page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); + page = get_page_from_freelist(gfp_mask, alloc_flags, ac); if (page) { struct zone *zone = page_zone(page); @@ -4058,7 +4057,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, return NULL; retry: - page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); + page = get_page_from_freelist(gfp_mask, alloc_flags, ac); /* * If an allocation failed after direct reclaim, it could be because @@ -4363,7 +4362,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * The adjusted alloc_flags might result in immediate success, so try * that first */ - page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); + page = get_page_from_freelist(gfp_mask, alloc_flags, ac); if (page) goto got_pg; @@ -4433,7 +4432,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, } /* Attempt with potentially adjusted zonelist and alloc_flags */ - page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); + page = get_page_from_freelist(gfp_mask, alloc_flags, ac); if (page) goto got_pg; @@ -4640,7 +4639,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, int preferred_nid, nodemask_t *nodemask) alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); /* First allocation attempt */ - page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); + page = get_page_from_freelist(alloc_mask, alloc_flags, &ac); if (likely(page)) goto out; -- 2.20.1