[PATCH 16/18] alloc_pages_nodemask: turn preferred nid into a nodemask

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The guts of the page allocator already understands that the memory
policy might provide multiple preferred nodes. Ideally, the alloc
function itself wouldn't take multiple nodes until one of the callers
decided it would be useful. Unfortunately, the way the callstack is
today is the caller of __alloc_pages_nodemask is responsible for
figuring out the preferred nodes (almost always without policy in place,
this is numa_node_id()). The purpose of this patch is to allow multiple
preferred nodes while keeping the existing logical preference
assignments in place.

In other words, everything at, and below __alloc_pages_nodemask() has no
concept of policy, and this patch maintains that division.

Like bindmask, NULL and empty set for preference are allowed.

A note on allocation. One of the obvious fallouts from this is some
callers are now going to allocate nodemasks on their stack. When no
policy is in place, these nodemasks are simply the
nodemask_of_node(numa_node_id()). Some amount of this is addressed in
the next patch. The alternatives are kmalloc which is unsafe in these
paths, a percpu variable can't work because a nodemask today can be 128B
at the max NODE_SHIFT of 10 on x86 cnd ia64 is too large for a percpu
variable, or a lookup table. There's no reason a lookup table can't
work, but it seems like a premature optimization. If you were to make a
lookup table for the more extreme cases of large systems, each nodemask
would be 128B, and you have 1024 nodes - so the size of just that is
128K.

I'm very open to better solutions.

Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Jason Gunthorpe <jgg@xxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx>
Signed-off-by: Ben Widawsky <ben.widawsky@xxxxxxxxx>
---
 include/linux/gfp.h     |  8 +++-----
 include/linux/migrate.h |  4 ++--
 mm/hugetlb.c            |  3 +--
 mm/mempolicy.c          | 27 ++++++---------------------
 mm/page_alloc.c         | 10 ++++------
 5 files changed, 16 insertions(+), 36 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 9ab5c07579bd..47e9c02c17ae 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -499,15 +499,13 @@ static inline int arch_make_page_accessible(struct page *page)
 }
 #endif
 
-struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
-							nodemask_t *nodemask);
+struct page *__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+				    nodemask_t *prefmask, nodemask_t *nodemask);
 
 static inline struct page *
 __alloc_pages_nodes(nodemask_t *nodes, gfp_t gfp_mask, unsigned int order)
 {
-	return __alloc_pages_nodemask(gfp_mask, order, first_node(*nodes),
-				      NULL);
+	return __alloc_pages_nodemask(gfp_mask, order, nodes, NULL);
 }
 
 /*
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 3e546cbf03dd..91b399ec9249 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -37,6 +37,7 @@ static inline struct page *new_page_nodemask(struct page *page,
 	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
 	unsigned int order = 0;
 	struct page *new_page = NULL;
+	nodemask_t pmask = nodemask_of_node(preferred_nid);
 
 	if (PageHuge(page))
 		return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
@@ -50,8 +51,7 @@ static inline struct page *new_page_nodemask(struct page *page,
 	if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
 		gfp_mask |= __GFP_HIGHMEM;
 
-	new_page = __alloc_pages_nodemask(gfp_mask, order,
-				preferred_nid, nodemask);
+	new_page = __alloc_pages_nodemask(gfp_mask, order, &pmask, nodemask);
 
 	if (new_page && PageTransHuge(new_page))
 		prep_transhuge_page(new_page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 71b6750661df..52e097aed7ed 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1706,8 +1706,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
 	if (alloc_try_hard)
 		gfp_mask |= __GFP_RETRY_MAYFAIL;
-	page = __alloc_pages_nodemask(gfp_mask, order, first_node(pmask),
-				      nmask);
+	page = __alloc_pages_nodemask(gfp_mask, order, &pmask, nmask);
 	if (page)
 		__count_vm_event(HTLB_BUDDY_PGALLOC);
 	else
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9521bb46aa00..fb49bea41ab8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2274,7 +2274,6 @@ static struct page *alloc_pages_vma_thp(gfp_t gfp, struct mempolicy *pol,
 {
 	nodemask_t *nmask;
 	struct page *page;
-	int hpage_node = first_node(*prefmask);
 
 	/*
 	 * For hugepage allocation and non-interleave policy which allows the
@@ -2282,9 +2281,6 @@ static struct page *alloc_pages_vma_thp(gfp_t gfp, struct mempolicy *pol,
 	 * allocate from the current/preferred node and don't fall back to other
 	 * nodes, as the cost of remote accesses would likely offset THP
 	 * benefits.
-	 *
-	 * If the policy is interleave or multiple preferred nodes, or does not
-	 * allow the current node in its nodemask, we allocate the standard way.
 	 */
 	nmask = policy_nodemask(gfp, pol);
 
@@ -2293,7 +2289,7 @@ static struct page *alloc_pages_vma_thp(gfp_t gfp, struct mempolicy *pol,
 	 * unnecessarily, just compact.
 	 */
 	page = __alloc_pages_nodemask(gfp | __GFP_THISNODE | __GFP_NORETRY,
-				      order, hpage_node, nmask);
+				      order, prefmask, nmask);
 
 	/*
 	 * If hugepage allocations are configured to always synchronous compact
@@ -2301,7 +2297,7 @@ static struct page *alloc_pages_vma_thp(gfp_t gfp, struct mempolicy *pol,
 	 * allowing remote memory with both reclaim and compact as well.
 	 */
 	if (!page && (gfp & __GFP_DIRECT_RECLAIM))
-		page = __alloc_pages_nodemask(gfp, order, hpage_node, nmask);
+		page = __alloc_pages_nodemask(gfp, order, prefmask, nmask);
 
 	VM_BUG_ON(page && nmask && !node_isset(page_to_nid(page), *nmask));
 
@@ -2337,14 +2333,10 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 {
 	struct mempolicy *pol;
 	struct page *page;
-	nodemask_t *nmask, *pmask, tmp;
+	nodemask_t *nmask, *pmask;
 
 	pol = get_vma_policy(vma, addr);
 	pmask = policy_preferred_nodes(gfp, pol);
-	if (!pmask) {
-		tmp = nodemask_of_node(node);
-		pmask = &tmp;
-	}
 
 	if (pol->mode == MPOL_INTERLEAVE) {
 		unsigned nid;
@@ -2358,9 +2350,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 		mpol_cond_put(pol);
 	} else {
 		nmask = policy_nodemask(gfp, pol);
-		page = __alloc_pages_nodemask(gfp, order, first_node(*pmask),
-					      nmask);
 		mpol_cond_put(pol);
+		page = __alloc_pages_nodemask(gfp, order, pmask, nmask);
 	}
 
 	return page;
@@ -2397,14 +2388,8 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 	if (pol->mode == MPOL_INTERLEAVE) {
 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
 	} else {
-		nodemask_t tmp, *pmask;
-
-		pmask = policy_preferred_nodes(gfp, pol);
-		if (!pmask) {
-			tmp = nodemask_of_node(numa_node_id());
-			pmask = &tmp;
-		}
-		page = __alloc_pages_nodemask(gfp, order, first_node(*pmask),
+		page = __alloc_pages_nodemask(gfp, order,
+					      policy_preferred_nodes(gfp, pol),
 					      policy_nodemask(gfp, pol));
 	}
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c6f8f112a5d4..0f90419fe0d8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4967,15 +4967,13 @@ static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
-struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
-							nodemask_t *nodemask)
+struct page *__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+				    nodemask_t *prefmask, nodemask_t *nodemask)
 {
 	struct page *page;
 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
 	gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
 	struct alloc_context ac = { };
-	nodemask_t prefmask = nodemask_of_node(preferred_nid);
 
 	/*
 	 * There are several places where we assume that the order value is sane
@@ -4988,11 +4986,11 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
 
 	gfp_mask &= gfp_allowed_mask;
 	alloc_mask = gfp_mask;
-	if (!prepare_alloc_pages(gfp_mask, order, &prefmask, nodemask, &ac,
+	if (!prepare_alloc_pages(gfp_mask, order, prefmask, nodemask, &ac,
 				 &alloc_mask, &alloc_flags))
 		return NULL;
 
-	ac.prefmask = &prefmask;
+	ac.prefmask = prefmask;
 
 	finalise_ac(gfp_mask, &ac);
 
-- 
2.27.0





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux