On Fri, Mar 08, 2024 at 11:11:24AM +0800, lipeifeng@xxxxxxxx wrote: > Commit 6d4675e60135 ("mm: don't be stuck to rmap lock on reclaim path") > prevents the reclaim path from becoming stuck on the rmap lock. However, > it reinserts those folios at the head of the LRU during shrink_folio_list, > even if those folios are very cold. This seems like a lot of new code. Did you consider something simpler like this? Also, this is Minchan's patch you're complaining about. Add him to the cc. +++ b/mm/vmscan.c @@ -817,6 +817,7 @@ enum folio_references { FOLIOREF_RECLAIM, FOLIOREF_RECLAIM_CLEAN, FOLIOREF_KEEP, + FOLIOREF_RESCAN, FOLIOREF_ACTIVATE, }; @@ -837,9 +838,9 @@ static enum folio_references folio_check_references(struct folio *folio, if (vm_flags & VM_LOCKED) return FOLIOREF_ACTIVATE; - /* rmap lock contention: rotate */ + /* rmap lock contention: keep at the tail */ if (referenced_ptes == -1) - return FOLIOREF_KEEP; + return FOLIOREF_RESCAN; if (referenced_ptes) { /* @@ -1164,6 +1165,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, case FOLIOREF_ACTIVATE: goto activate_locked; case FOLIOREF_KEEP: + case FOLIOREF_RESCAN: stat->nr_ref_keep += nr_pages; goto keep_locked; case FOLIOREF_RECLAIM: @@ -1446,7 +1448,10 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, keep_locked: folio_unlock(folio); keep: - list_add(&folio->lru, &ret_folios); + if (references == FOLIOREF_RESCAN) + list_add(&folio->lru, &rescan_folios); + else + list_add(&folio->lru, &ret_folios); VM_BUG_ON_FOLIO(folio_test_lru(folio) || folio_test_unevictable(folio), folio); }