---
mm/page_alloc.c | 88 +++++++++++++++----------------------------------
1 file changed, 27 insertions(+), 61 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 68669d3a5a665..990514d8f0d94 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1708,55 +1708,23 @@ deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
}
/*
- * Initialize and free pages. We do it in two loops: first we initialize
- * struct page, then free to buddy allocator, because while we are
- * freeing pages we can access pages that are ahead (computing buddy
- * page in __free_one_page()).
- *
- * In order to try and keep some memory in the cache we have the loop
- * broken along max page order boundaries. This way we will not cause
- * any issues with the buddy page computation.
+ * Initialize the struct pages and then free them to the buddy allocator at
+ * most a max order block at a time because while we are freeing pages we can
+ * access pages that are ahead (computing buddy page in __free_one_page()).
+ * It's also cache friendly.
*/
static unsigned long __init
-deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
- unsigned long *end_pfn)
+deferred_init_maxorder(struct zone *zone, unsigned long *start_pfn,
+ unsigned long end_pfn)
{
- unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
- unsigned long spfn = *start_pfn, epfn = *end_pfn;
- unsigned long nr_pages = 0;
- u64 j = *i;
-
- /* First we loop through and initialize the page values */
- for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
- unsigned long t;
-
- if (mo_pfn <= *start_pfn)
- break;
-
- t = min(mo_pfn, *end_pfn);
- nr_pages += deferred_init_pages(zone, *start_pfn, t);
-
- if (mo_pfn < *end_pfn) {
- *start_pfn = mo_pfn;
- break;
- }
- }
-
- /* Reset values and now loop through freeing pages as needed */
- swap(j, *i);
-
- for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
- unsigned long t;
-
- if (mo_pfn <= spfn)
- break;
+ unsigned long nr_pages, pfn;
- t = min(mo_pfn, epfn);
- deferred_free_pages(spfn, t);
+ pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
+ pfn = min(pfn, end_pfn);
- if (mo_pfn <= epfn)
- break;
- }
+ nr_pages = deferred_init_pages(zone, *start_pfn, pfn);
+ deferred_free_pages(*start_pfn, pfn);
+ *start_pfn = pfn;
return nr_pages;
}
@@ -1814,9 +1782,11 @@ static int __init deferred_init_memmap(void *data)
* that we can avoid introducing any issues with the buddy
* allocator.
*/
- while (spfn < epfn) {
- nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
- cond_resched();
+ for_each_free_mem_pfn_range_in_zone_from(i, zone, &spfn, &epfn) {
+ while (spfn < epfn) {
+ nr_pages += deferred_init_maxorder(zone, &spfn, epfn);
+ cond_resched();
+ }
}
zone_empty:
/* Sanity check that the next zone really is unpopulated */
@@ -1883,22 +1853,18 @@ deferred_grow_zone(struct zone *zone, unsigned int order)
* that we can avoid introducing any issues with the buddy
* allocator.
*/
- while (spfn < epfn) {
- /* update our first deferred PFN for this section */
- first_deferred_pfn = spfn;
-
- nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
- touch_nmi_watchdog();
-
- /* We should only stop along section boundaries */
- if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
- continue;
-
- /* If our quota has been met we can stop here */
- if (nr_pages >= nr_pages_needed)
- break;
+ for_each_free_mem_pfn_range_in_zone_from(i, zone, &spfn, &epfn) {
+ while (spfn < epfn) {
+ nr_pages += deferred_init_maxorder(zone, &spfn, epfn);
+ touch_nmi_watchdog();
+
+ /* If our quota has been met we can stop here */
+ if (nr_pages >= nr_pages_needed)
+ goto out;
+ }
}
+out:
pgdat->first_deferred_pfn = spfn;
pgdat_resize_unlock(pgdat, &flags);