If arch supports page access count, we would have already sorted the lru before collecting reclaimable pages. So unconditionally reclaim them in shrink_folio_list Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxx> --- mm/vmscan.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index a5f6238b3926..d9eb6a4d2975 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -5242,7 +5242,8 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap if (list_empty(&list)) return scanned; retry: - reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false); + reclaimed = shrink_folio_list(&list, pgdat, sc, + &stat, arch_supports_page_access_count()); sc->nr_reclaimed += reclaimed; list_for_each_entry_safe_reverse(folio, next, &list, lru) { @@ -5477,22 +5478,12 @@ bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaimi */ if (folio_test_active(folio)) seq = lrugen->max_seq; - else { - /* - * For a non active folio use the arch based - * aging details to derive the MGLRU generation. - */ - seq = arch_get_lru_gen_seq(lruvec, folio); - - if (seq == lrugen->min_seq[type]) { - if ((type == LRU_GEN_ANON && - !folio_test_swapcache(folio)) || - (folio_test_reclaim(folio) && - (folio_test_dirty(folio) || - folio_test_writeback(folio)))) - seq = lrugen->min_seq[type] + 1; - } - } + else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) || + (folio_test_reclaim(folio) && + (folio_test_dirty(folio) || folio_test_writeback(folio)))) + seq = lrugen->min_seq[type] + 1; + else + seq = lrugen->min_seq[type]; gen = lru_gen_from_seq(seq); flags = (gen + 1UL) << LRU_GEN_PGOFF; -- 2.39.2