[PATCH 4/5] page_alloc: Make movablecore_map has higher priority.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



If kernelcore or movablecore is specified at the same time with movablecore_map,
movablecore_map will have higher priority to be satisfied.

Signed-off-by: Tang Chen <tangchen@xxxxxxxxxxxxxx>
Reviewed-by: Wen Congyang <wency@xxxxxxxxxxxxxx>
Tested-by: Lin Feng <linfeng@xxxxxxxxxxxxxx>
---
 mm/page_alloc.c |   29 +++++++++++++++++++++++++++--
 1 files changed, 27 insertions(+), 2 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ae29970..c8dfb1e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4774,7 +4774,7 @@ static unsigned long __init early_calculate_totalpages(void)
 static void __init find_zone_movable_pfns_for_nodes(void)
 {
 	int i, nid;
-	unsigned long usable_startpfn;
+	unsigned long usable_startpfn, node_movable_limit;
 	unsigned long kernelcore_node, kernelcore_remaining;
 	/* save the state before borrow the nodemask */
 	nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
@@ -4803,7 +4803,6 @@ static void __init find_zone_movable_pfns_for_nodes(void)
 		required_kernelcore = max(required_kernelcore, corepages);
 	}
 
-	/* If kernelcore was not specified, there is no ZONE_MOVABLE */
 	if (!required_kernelcore)
 		goto out;
 
@@ -4817,6 +4816,9 @@ restart:
 	for_each_node_state(nid, N_HIGH_MEMORY) {
 		unsigned long start_pfn, end_pfn;
 
+		node_movable_limit = zone_movable_pfn[nid];
+		zone_movable_pfn[nid] = 0;
+
 		/*
 		 * Recalculate kernelcore_node if the division per node
 		 * now exceeds what is necessary to satisfy the requested
@@ -4840,6 +4842,29 @@ restart:
 			if (start_pfn >= end_pfn)
 				continue;
 
+			/*
+			 * If movablecore_map was specified with kernelcore
+			 * or movablecore, it will have higher priority to be
+			 * satisfied.
+			 */
+			if (start_pfn >= node_movable_limit) {
+				/*
+				 * Here, we meet the ZONE_MOVABLE boundary
+				 * specified by movablecore_map. We should
+				 * not spread any more, but keep the rest
+				 * of kernelcore_remaining and break out.
+				 * And also, usable_nodes should be decreased.
+				 */
+				usable_nodes--;
+				break;
+			}
+
+			/*
+			 * If ZONE_MOVABLE start_pfn is in the range, we need
+			 * to shrink end_pfn to ZONE_MOVABLE start_pfn.
+			 */
+			end_pfn = min(end_pfn, node_movable_limit);
+
 			/* Account for what is only usable for kernelcore */
 			if (start_pfn < usable_startpfn) {
 				unsigned long kernel_pages;
-- 
1.7.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@xxxxxxxxx.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@xxxxxxxxx";> email@xxxxxxxxx </a>


[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]