[PATCH v1] mm/memory_hotplug: don't check the nid in find_(smallest|biggest)_section_pfn

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Now that we always check against a zone, we can stop checking against
the nid, it is implicitly covered by the zone.

Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Oscar Salvador <osalvador@xxxxxxx>
Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>
---
 mm/memory_hotplug.c | 23 ++++++++---------------
 1 file changed, 8 insertions(+), 15 deletions(-)

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 46b2e056a43f..602f753c662c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -344,17 +344,14 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
 }
 
 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
-static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
-				     unsigned long start_pfn,
-				     unsigned long end_pfn)
+static unsigned long find_smallest_section_pfn(struct zone *zone,
+					       unsigned long start_pfn,
+					       unsigned long end_pfn)
 {
 	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
 		if (unlikely(!pfn_to_online_page(start_pfn)))
 			continue;
 
-		if (unlikely(pfn_to_nid(start_pfn) != nid))
-			continue;
-
 		if (zone != page_zone(pfn_to_page(start_pfn)))
 			continue;
 
@@ -365,9 +362,9 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
 }
 
 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
-static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
-				    unsigned long start_pfn,
-				    unsigned long end_pfn)
+static unsigned long find_biggest_section_pfn(struct zone *zone,
+					      unsigned long start_pfn,
+					      unsigned long end_pfn)
 {
 	unsigned long pfn;
 
@@ -377,9 +374,6 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
 		if (unlikely(!pfn_to_online_page(pfn)))
 			continue;
 
-		if (unlikely(pfn_to_nid(pfn) != nid))
-			continue;
-
 		if (zone != page_zone(pfn_to_page(pfn)))
 			continue;
 
@@ -393,7 +387,6 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
 			     unsigned long end_pfn)
 {
 	unsigned long pfn;
-	int nid = zone_to_nid(zone);
 
 	zone_span_writelock(zone);
 	if (zone->zone_start_pfn == start_pfn) {
@@ -403,7 +396,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
 		 * In this case, we find second smallest valid mem_section
 		 * for shrinking zone.
 		 */
-		pfn = find_smallest_section_pfn(nid, zone, end_pfn,
+		pfn = find_smallest_section_pfn(zone, end_pfn,
 						zone_end_pfn(zone));
 		if (pfn) {
 			zone->spanned_pages = zone_end_pfn(zone) - pfn;
@@ -419,7 +412,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
 		 * In this case, we find second biggest valid mem_section for
 		 * shrinking zone.
 		 */
-		pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
+		pfn = find_biggest_section_pfn(zone, zone->zone_start_pfn,
 					       start_pfn);
 		if (pfn)
 			zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
-- 
2.21.0






[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux