On Thu, 11 Jul 2019 02:42:56 +0800 Mike Kravetz wrote: > > It is quite easy to hit the condition where: > nr_reclaimed == 0 && nr_scanned == 0 is true, but we skip the previous test > Then skipping check of __GFP_RETRY_MAYFAIL makes no sense in your case. It is restored in respin below. > and the compaction check: > sc->nr_reclaimed < pages_for_compaction && > inactive_lru_pages > pages_for_compaction > is true, so we return true before the below check of costly_fg_reclaim > This check is placed after COMPACT_SUCCESS; the latter is used to replace sc->nr_reclaimed < pages_for_compaction. And dryrun detection is added based on the result of last round of shrinking of inactive pages, particularly when their number is large enough. --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2571,18 +2571,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, return false; } - /* - * If we have not reclaimed enough pages for compaction and the - * inactive lists are large enough, continue reclaiming - */ - pages_for_compaction = compact_gap(sc->order); - inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); - if (get_nr_swap_pages() > 0) - inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); - if (sc->nr_reclaimed < pages_for_compaction && - inactive_lru_pages > pages_for_compaction) - return true; - /* If compaction would go ahead or the allocation would succeed, stop */ for (z = 0; z <= sc->reclaim_idx; z++) { struct zone *zone = &pgdat->node_zones[z]; @@ -2598,7 +2586,21 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, ; } } - return true; + + /* + * If we have not reclaimed enough pages for compaction and the + * inactive lists are large enough, continue reclaiming + */ + pages_for_compaction = compact_gap(sc->order); + inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); + if (get_nr_swap_pages() > 0) + inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); + + return inactive_lru_pages > pages_for_compaction && + /* + * avoid dryrun with plenty of inactive pages + */ + nr_scanned && nr_reclaimed; } static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg) --