[PATCH 15/18] mm: convert callers of __alloc_pages_nodemask to pmask

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Now that the infrastructure is in place to both select, and allocate a
set of preferred nodes as specified by policy (or perhaps in the future,
the calling function), start transitioning over functions that can
benefit from this.

This patch looks stupid. It seems to artificially insert a nodemask on
the stack, then just use the first node from that mask - in other words,
a nop just adding overhead. It does. The reason for this is it's a
preparatory patch for when we switch over to __alloc_pages_nodemask() to
using a mask for preferences. This helps with readability and
bisectability.

Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx>
Cc: Mina Almasry <almasrymina@xxxxxxxxxx>
Cc: Vlastimil Babka <vbabka@xxxxxxx>
Signed-off-by: Ben Widawsky <ben.widawsky@xxxxxxxxx>
---
 mm/hugetlb.c   | 11 ++++++++---
 mm/mempolicy.c | 38 +++++++++++++++++++++++---------------
 2 files changed, 31 insertions(+), 18 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 57ece74e3aae..71b6750661df 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1687,6 +1687,12 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
 	int order = huge_page_order(h);
 	struct page *page;
 	bool alloc_try_hard = true;
+	nodemask_t pmask;
+
+	if (nid == NUMA_NO_NODE)
+		nid = numa_mem_id();
+
+	pmask = nodemask_of_node(nid);
 
 	/*
 	 * By default we always try hard to allocate the page with
@@ -1700,9 +1706,8 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
 	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
 	if (alloc_try_hard)
 		gfp_mask |= __GFP_RETRY_MAYFAIL;
-	if (nid == NUMA_NO_NODE)
-		nid = numa_mem_id();
-	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
+	page = __alloc_pages_nodemask(gfp_mask, order, first_node(pmask),
+				      nmask);
 	if (page)
 		__count_vm_event(HTLB_BUDDY_PGALLOC);
 	else
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3c48f299d344..9521bb46aa00 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2270,11 +2270,11 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
 }
 
 static struct page *alloc_pages_vma_thp(gfp_t gfp, struct mempolicy *pol,
-					int order, int node)
+					int order, nodemask_t *prefmask)
 {
 	nodemask_t *nmask;
 	struct page *page;
-	int hpage_node = node;
+	int hpage_node = first_node(*prefmask);
 
 	/*
 	 * For hugepage allocation and non-interleave policy which allows the
@@ -2286,9 +2286,6 @@ static struct page *alloc_pages_vma_thp(gfp_t gfp, struct mempolicy *pol,
 	 * If the policy is interleave or multiple preferred nodes, or does not
 	 * allow the current node in its nodemask, we allocate the standard way.
 	 */
-	if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
-		hpage_node = first_node(pol->v.preferred_nodes);
-
 	nmask = policy_nodemask(gfp, pol);
 
 	/*
@@ -2340,10 +2337,14 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 {
 	struct mempolicy *pol;
 	struct page *page;
-	int preferred_nid;
-	nodemask_t *nmask;
+	nodemask_t *nmask, *pmask, tmp;
 
 	pol = get_vma_policy(vma, addr);
+	pmask = policy_preferred_nodes(gfp, pol);
+	if (!pmask) {
+		tmp = nodemask_of_node(node);
+		pmask = &tmp;
+	}
 
 	if (pol->mode == MPOL_INTERLEAVE) {
 		unsigned nid;
@@ -2353,12 +2354,12 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 		page = alloc_page_interleave(gfp, order, nid);
 	} else if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
 			    hugepage)) {
-		page = alloc_pages_vma_thp(gfp, pol, order, node);
+		page = alloc_pages_vma_thp(gfp, pol, order, pmask);
 		mpol_cond_put(pol);
 	} else {
 		nmask = policy_nodemask(gfp, pol);
-		preferred_nid = policy_node(gfp, pol, node);
-		page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
+		page = __alloc_pages_nodemask(gfp, order, first_node(*pmask),
+					      nmask);
 		mpol_cond_put(pol);
 	}
 
@@ -2393,12 +2394,19 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 	 * No reference counting needed for current->mempolicy
 	 * nor system default_policy
 	 */
-	if (pol->mode == MPOL_INTERLEAVE)
+	if (pol->mode == MPOL_INTERLEAVE) {
 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
-	else
-		page = __alloc_pages_nodemask(gfp, order,
-				policy_node(gfp, pol, numa_node_id()),
-				policy_nodemask(gfp, pol));
+	} else {
+		nodemask_t tmp, *pmask;
+
+		pmask = policy_preferred_nodes(gfp, pol);
+		if (!pmask) {
+			tmp = nodemask_of_node(numa_node_id());
+			pmask = &tmp;
+		}
+		page = __alloc_pages_nodemask(gfp, order, first_node(*pmask),
+					      policy_nodemask(gfp, pol));
+	}
 
 	return page;
 }
-- 
2.27.0





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux