[PATCH 21/21] mm/hugetlb: introduce hugetlb_dmb

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



If specified on the kernel command line the hugetlb_dmb parameter
modifies the behavior of the hugetlb_cma parameter to use the
Contiguous Memory Allocator within Designated Movable Blocks for
gigantic page allocation.

This allows the kernel page allocator to use the memory more
agressively than traditional CMA memory pools at the cost of
potentially increased allocation latency.

Signed-off-by: Doug Berger <opendmb@xxxxxxxxx>
---
 Documentation/admin-guide/kernel-parameters.txt |  3 +++
 mm/hugetlb.c                                    | 16 +++++++++++++---
 2 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 8141fac7c7cb..b29d1fa253d6 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1732,6 +1732,9 @@
 			hugepages using the CMA allocator. If enabled, the
 			boot-time allocation of gigantic hugepages is skipped.
 
+	hugetlb_dmb	[HW,CMA] Causes hugetlb_cma to use Designated Movable
+			Blocks for any CMA areas it reserves.
+
 	hugetlb_free_vmemmap=
 			[KNL] Reguires CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
 			enabled.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2f354423f50f..d3fb8b1f443f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -54,6 +54,7 @@ struct hstate hstates[HUGE_MAX_HSTATE];
 #ifdef CONFIG_CMA
 static struct cma *hugetlb_cma[MAX_NUMNODES];
 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
+static bool hugetlb_dmb __initdata;
 static bool hugetlb_cma_page(struct page *page, unsigned int order)
 {
 	return cma_pages_valid(hugetlb_cma[page_to_nid(page)], page,
@@ -7321,6 +7322,14 @@ static int __init cmdline_parse_hugetlb_cma(char *p)
 
 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
 
+static int __init cmdline_parse_hugetlb_dmb(char *p)
+{
+	hugetlb_dmb = true;
+	return 0;
+}
+
+early_param("hugetlb_dmb", cmdline_parse_hugetlb_dmb);
+
 void __init hugetlb_cma_reserve(int order)
 {
 	unsigned long size, reserved, per_node;
@@ -7396,10 +7405,11 @@ void __init hugetlb_cma_reserve(int order)
 		 * may be returned to CMA allocator in the case of
 		 * huge page demotion.
 		 */
-		res = cma_declare_contiguous_nid(0, size, 0,
+		res = __cma_declare_contiguous_nid(0, size, 0,
 						PAGE_SIZE << HUGETLB_PAGE_ORDER,
-						 0, false, name,
-						 &hugetlb_cma[nid], nid);
+						0, false, name,
+						&hugetlb_cma[nid], nid,
+						hugetlb_dmb);
 		if (res) {
 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
 				res, nid);
-- 
2.25.1




[Index of Archives]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux