Resetting the cached compaction scanner positions is now done implicitly in __reset_isolation_suitable() and compact_finished(). Encapsulate the functionality in a new function reset_cached_positions() and call it explicitly where needed. Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Michal Nazarewicz <mina86@xxxxxxxxxx> Cc: Naoya Horiguchi <n-horiguchi@xxxxxxxxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> --- mm/compaction.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 7e0a814..d334bb3 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -207,6 +207,13 @@ static inline bool isolation_suitable(struct compact_control *cc, return !get_pageblock_skip(page); } +static void reset_cached_positions(struct zone *zone) +{ + zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; + zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; + zone->compact_cached_free_pfn = zone_end_pfn(zone); +} + /* * This function is called to clear all cached information on pageblocks that * should be skipped for page isolation when the migrate and free page scanner @@ -218,9 +225,6 @@ static void __reset_isolation_suitable(struct zone *zone) unsigned long end_pfn = zone_end_pfn(zone); unsigned long pfn; - zone->compact_cached_migrate_pfn[0] = start_pfn; - zone->compact_cached_migrate_pfn[1] = start_pfn; - zone->compact_cached_free_pfn = end_pfn; zone->compact_blockskip_flush = false; /* Walk the zone and mark every pageblock as suitable for isolation */ @@ -250,8 +254,10 @@ void reset_isolation_suitable(pg_data_t *pgdat) continue; /* Only flush if a full compaction finished recently */ - if (zone->compact_blockskip_flush) + if (zone->compact_blockskip_flush) { __reset_isolation_suitable(zone); + reset_cached_positions(zone); + } } } @@ -1164,9 +1170,7 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc, /* Compaction run completes if the migrate and free scanner meet */ if (compact_scanners_met(cc)) { /* Let the next compaction start anew. */ - zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; - zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; - zone->compact_cached_free_pfn = zone_end_pfn(zone); + reset_cached_positions(zone); /* * Mark that the PG_migrate_skip information should be cleared @@ -1329,8 +1333,10 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) * is about to be retried after being deferred. kswapd does not do * this reset as it'll reset the cached information when going to sleep. */ - if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) + if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) { __reset_isolation_suitable(zone); + reset_cached_positions(zone); + } /* * Setup to move all movable pages to the end of the zone. Used cached -- 2.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>