This patch makes the comment for cc->wrapped longer, explaining what is really going on. It also incorporates the comment fix pointed out by Minchan. Additionally, Minchan found that, when no pages get isolated, high_pte could be a value that is much lower than desired, which might potentially cause compaction to skip a range of pages. Only assign zone->compact_cache_free_pfn if we actually isolated free pages for compaction. Signed-off-by: Rik van Riel <riel@xxxxxxxxxx> --- This does not address the one bit in Minchan's review that I am not sure about... include/linux/mmzone.h | 2 +- mm/compaction.c | 7 ++++--- mm/internal.h | 6 +++++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e629594..e957fa1 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -370,7 +370,7 @@ struct zone { spinlock_t lock; int all_unreclaimable; /* All pages pinned */ #if defined CONFIG_COMPACTION || defined CONFIG_CMA - /* pfn where the last order > 0 compaction isolated free pages */ + /* pfn where the last incremental compaction isolated free pages */ unsigned long compact_cached_free_pfn; #endif #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/mm/compaction.c b/mm/compaction.c index 2668b77..2867166 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -472,10 +472,11 @@ static void isolate_freepages(struct zone *zone, * looking for free pages, the search will restart here as * page migration may have returned some pages to the allocator */ - if (isolated) + if (isolated) { high_pfn = max(high_pfn, pfn); - if (cc->order > 0) - zone->compact_cached_free_pfn = high_pfn; + if (cc->order > 0) + zone->compact_cached_free_pfn = high_pfn; + } } /* split_free_page does not map the pages */ diff --git a/mm/internal.h b/mm/internal.h index 0b72461..da6b9b2 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -121,7 +121,11 @@ struct compact_control { unsigned long start_free_pfn; /* where we started the search */ unsigned long migrate_pfn; /* isolate_migratepages search base */ bool sync; /* Synchronous migration */ - bool wrapped; /* Last round for order>0 compaction */ + bool wrapped; /* Order > 0 compactions are + incremental, once free_pfn + and migrate_pfn meet, we restart + from the top of the zone; + remember we wrapped around. */ int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>