In preparation for allocating frozen pages, stop initialising the page refcount in __alloc_pages_slowpath(). Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/page_alloc.c | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7c306231b336..26f8ed480ebb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5055,10 +5055,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * that first */ page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* * For costly allocations, try direct compaction first, as it's likely @@ -5077,10 +5075,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, alloc_flags, ac, INIT_COMPACT_PRIORITY, &compact_result); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* * Checks for costly allocations with __GFP_NORETRY, which @@ -5139,10 +5135,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, /* Attempt with potentially adjusted zonelist and alloc_flags */ page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* Caller is not willing to reclaim, we can't balance anything */ if (!can_direct_reclaim) @@ -5155,18 +5149,14 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, /* Try direct reclaim and then allocating */ page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, &did_some_progress); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* Try direct compaction and then allocating */ page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, compact_priority, &compact_result); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* Do not loop if specifically requested */ if (gfp_mask & __GFP_NORETRY) @@ -5202,10 +5192,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, /* Reclaim has failed us, start killing things */ page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* Avoid allocations with no watermarks from looping endlessly */ if (tsk_is_oom_victim(current) && @@ -5258,10 +5246,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * the situation worse */ page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } cond_resched(); goto retry; @@ -5542,6 +5528,8 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, ac.nodemask = nodemask; page = __alloc_pages_slowpath(alloc_gfp, order, &ac); + if (page) + set_page_refcounted(page); out: if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page && -- 2.35.1