[PATCH 11/18] mm: Extract THP hugepage allocation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The next patch is going to rework this code to support
MPOL_PREFERRED_MANY. This refactor makes the that change much more
readable.

After the extraction, the resulting code makes it apparent that this can
be converted to a simple if ladder and thus allows removing the goto.

There is not meant to be any functional or behavioral changes.

Note that still at this point MPOL_PREFERRED_MANY isn't specially
handled for huge pages.

Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Signed-off-by: Ben Widawsky <ben.widawsky@xxxxxxxxx>
---
 mm/mempolicy.c | 96 ++++++++++++++++++++++++++------------------------
 1 file changed, 49 insertions(+), 47 deletions(-)

diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 408ba78c8424..3ce2354fed44 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2232,6 +2232,48 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
 	return page;
 }
 
+static struct page *alloc_pages_vma_thp(gfp_t gfp, struct mempolicy *pol,
+					int order, int node)
+{
+	nodemask_t *nmask;
+	struct page *page;
+	int hpage_node = node;
+
+	/*
+	 * For hugepage allocation and non-interleave policy which allows the
+	 * current node (or other explicitly preferred node) we only try to
+	 * allocate from the current/preferred node and don't fall back to other
+	 * nodes, as the cost of remote accesses would likely offset THP
+	 * benefits.
+	 *
+	 * If the policy is interleave or multiple preferred nodes, or does not
+	 * allow the current node in its nodemask, we allocate the standard way.
+	 */
+	if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
+		hpage_node = first_node(pol->v.preferred_nodes);
+
+	nmask = policy_nodemask(gfp, pol);
+
+	/*
+	 * First, try to allocate THP only on local node, but don't reclaim
+	 * unnecessarily, just compact.
+	 */
+	page = __alloc_pages_nodemask(gfp | __GFP_THISNODE | __GFP_NORETRY,
+				      order, hpage_node, nmask);
+
+	/*
+	 * If hugepage allocations are configured to always synchronous compact
+	 * or the vma has been madvised to prefer hugepage backing, retry
+	 * allowing remote memory with both reclaim and compact as well.
+	 */
+	if (!page && (gfp & __GFP_DIRECT_RECLAIM))
+		page = __alloc_pages_nodemask(gfp, order, hpage_node, nmask);
+
+	VM_BUG_ON(page && nmask && !node_isset(page_to_nid(page), *nmask));
+
+	return page;
+}
+
 /**
  * 	alloc_pages_vma	- Allocate a page for a VMA.
  *
@@ -2272,57 +2314,17 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
 		mpol_cond_put(pol);
 		page = alloc_page_interleave(gfp, order, nid);
-		goto out;
-	}
-
-	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
-		int hpage_node = node;
-
-		/*
-		 * For hugepage allocation and non-interleave policy which
-		 * allows the current node (or other explicitly preferred
-		 * node) we only try to allocate from the current/preferred
-		 * node and don't fall back to other nodes, as the cost of
-		 * remote accesses would likely offset THP benefits.
-		 *
-		 * If the policy is interleave or multiple preferred nodes, or
-		 * does not allow the current node in its nodemask, we allocate
-		 * the standard way.
-		 */
-		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
-			hpage_node = first_node(pol->v.preferred_nodes);
-
+	} else if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+			    hugepage)) {
+		page = alloc_pages_vma_thp(gfp, pol, order, node);
+		mpol_cond_put(pol);
+	} else {
 		nmask = policy_nodemask(gfp, pol);
+		preferred_nid = policy_node(gfp, pol, node);
+		page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
 		mpol_cond_put(pol);
-
-		/*
-		 * First, try to allocate THP only on local node, but
-		 * don't reclaim unnecessarily, just compact.
-		 */
-		page = __alloc_pages_nodemask(gfp | __GFP_THISNODE |
-						      __GFP_NORETRY,
-					      order, hpage_node, nmask);
-
-		/*
-		 * If hugepage allocations are configured to always synchronous
-		 * compact or the vma has been madvised to prefer hugepage
-		 * backing, retry allowing remote memory with both reclaim and
-		 * compact as well.
-		 */
-		if (!page && (gfp & __GFP_DIRECT_RECLAIM))
-			page = __alloc_pages_nodemask(gfp, order, hpage_node,
-						      nmask);
-
-		VM_BUG_ON(page && nmask &&
-			  !node_isset(page_to_nid(page), *nmask));
-		goto out;
 	}
 
-	nmask = policy_nodemask(gfp, pol);
-	preferred_nid = policy_node(gfp, pol, node);
-	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
-	mpol_cond_put(pol);
-out:
 	return page;
 }
 EXPORT_SYMBOL(alloc_pages_vma);
-- 
2.27.0





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux