In the fast_find_migrateblock(), It iterate freelist to find proper pageblock. But there are some misbehaviors. First, if the page we found is equal to cc->migrate_pfn, it is considered that we didn't found suitable pageblock. Second, if the loop was terminated because order is less than PAGE_ALLOC_COSTLY_ORDER, it could be considered that we found suitable one. Third, if the skip bit is set on the page block and go continue, it doesn't check the nr_scanned. Fourth, if the page block's skip bit is set, it check that page block is the last of list. But it is unnecessary. Fixes: 70b44595eafe9 ("mm, compaction: use free lists to quickly locate a migration source") Signed-off-by: Wonhyuk Yang <vvghjk1234@xxxxxxxxx> --- Changes in v2: - Follow Vlastimil's suggestions, using bool varable. - Fix new misbehavior and remove unecessary . v1: https://lore.kernel.org/linux-mm/20210123154320.24278-1-vvghjk1234@xxxxxxxxx --- mm/compaction.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index e5acb9714436..8e5cd9d20435 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1701,6 +1701,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) unsigned long pfn = cc->migrate_pfn; unsigned long high_pfn; int order; + bool found_block = false; /* Skip hints are relied on to avoid repeats on the fast search */ if (cc->ignore_skip_hint) @@ -1743,7 +1744,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); for (order = cc->order - 1; - order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit; + order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; order--) { struct free_area *area = &cc->zone->free_area[order]; struct list_head *freelist; @@ -1758,7 +1759,11 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) list_for_each_entry(freepage, freelist, lru) { unsigned long free_pfn; - nr_scanned++; + if (nr_scanned++ >= limit) { + move_freelist_tail(freelist, freepage); + break; + } + free_pfn = page_to_pfn(freepage); if (free_pfn < high_pfn) { /* @@ -1767,12 +1772,8 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) * the list assumes an entry is deleted, not * reordered. */ - if (get_pageblock_skip(freepage)) { - if (list_is_last(freelist, &freepage->lru)) - break; - + if (get_pageblock_skip(freepage)) continue; - } /* Reorder to so a future search skips recent pages */ move_freelist_tail(freelist, freepage); @@ -1780,15 +1781,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) update_fast_start_pfn(cc, free_pfn); pfn = pageblock_start_pfn(free_pfn); cc->fast_search_fail = 0; + found_block = true; set_pageblock_skip(freepage); break; } - - if (nr_scanned >= limit) { - cc->fast_search_fail++; - move_freelist_tail(freelist, freepage); - break; - } } spin_unlock_irqrestore(&cc->zone->lock, flags); } @@ -1799,9 +1795,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) * If fast scanning failed then use a cached entry for a page block * that had free pages as the basis for starting a linear scan. */ - if (pfn == cc->migrate_pfn) + if (!found_block) { + cc->fast_search_fail++; pfn = reinit_migrate_pfn(cc); - + } return pfn; } -- 2.25.1