This patch add two reclaim stat: nr_promote: nr_pages shrink before promote by folio_update_gen. nr_demote: nr_pages NUMA demotion passed. And then, use correct nr_scanned which evict_folios passed into trace_mm_vmscan_lru_shrink_inactive. Mistake info like this: ``` kswapd0-89 [000] 64.887613: mm_vmscan_lru_shrink_inactive: nid=0 nr_scanned=64 nr_reclaimed=0 nr_dirty=0 nr_writeback=0 nr_congested=0 nr_immediate=0 nr_activate_anon=0 nr_activate_file=0 nr_ref_keep=0 nr_unmap_fail=0 priority=4 flags=RECLAIM_WB_FILE|RECLAIM_WB_ASYNC ``` Correct info like this: ``` <...>-9041 [006] 43.738481: mm_vmscan_lru_shrink_inactive: nid=0 nr_scanned=13 nr_reclaimed=0 nr_dirty=0 nr_writeback=0 nr_congested=0 nr_immediate=0 nr_activate_anon=9 nr_activate_file=0 nr_ref_keep=0 nr_unmap_fail=0 nr_promote=4 nr_demote=0 priority=12 flags=RECLAIM_WB_ANON|RECLAIM_WB_ASYNC ``` Signed-off-by: Huan Yang <link@xxxxxxxx> --- include/linux/vmstat.h | 2 ++ include/trace/events/vmscan.h | 8 +++++++- mm/vmscan.c | 26 +++++++++++++++++++++----- 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index fed855bae6d8..ac2dd9168780 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -32,6 +32,8 @@ struct reclaim_stat { unsigned nr_ref_keep; unsigned nr_unmap_fail; unsigned nr_lazyfree_fail; + unsigned nr_promote; + unsigned nr_demote; }; enum writeback_stat_item { diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index 1a488c30afa5..9b403824a293 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -366,6 +366,8 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive, __field(unsigned int, nr_activate1) __field(unsigned long, nr_ref_keep) __field(unsigned long, nr_unmap_fail) + __field(unsigned long, nr_promote) + __field(unsigned long, nr_demote) __field(int, priority) __field(int, reclaim_flags) ), @@ -382,17 +384,21 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive, __entry->nr_activate1 = stat->nr_activate[1]; __entry->nr_ref_keep = stat->nr_ref_keep; __entry->nr_unmap_fail = stat->nr_unmap_fail; + __entry->nr_promote = stat->nr_promote; + __entry->nr_demote = stat->nr_demote; __entry->priority = priority; __entry->reclaim_flags = trace_reclaim_flags(file); ), - TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld nr_dirty=%ld nr_writeback=%ld nr_congested=%ld nr_immediate=%ld nr_activate_anon=%d nr_activate_file=%d nr_ref_keep=%ld nr_unmap_fail=%ld priority=%d flags=%s", + TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld nr_dirty=%ld nr_writeback=%ld nr_congested=%ld nr_immediate=%ld nr_activate_anon=%d" + " nr_activate_file=%d nr_ref_keep=%ld nr_unmap_fail=%ld nr_promote=%ld nr_demote=%ld priority=%d flags=%s", __entry->nid, __entry->nr_scanned, __entry->nr_reclaimed, __entry->nr_dirty, __entry->nr_writeback, __entry->nr_congested, __entry->nr_immediate, __entry->nr_activate0, __entry->nr_activate1, __entry->nr_ref_keep, __entry->nr_unmap_fail, + __entry->nr_promote, __entry->nr_demote, __entry->priority, show_reclaim_flags(__entry->reclaim_flags)) ); diff --git a/mm/vmscan.c b/mm/vmscan.c index 2cc0cb41fb32..21099b9f21e0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1063,8 +1063,10 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, /* folio_update_gen() tried to promote this page? */ if (lru_gen_enabled() && !ignore_references && - folio_mapped(folio) && folio_test_referenced(folio)) + folio_mapped(folio) && folio_test_referenced(folio)) { + stat->nr_promote += nr_pages; goto keep_locked; + } /* * The number of dirty pages determines if a node is marked @@ -1193,6 +1195,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, (thp_migration_supported() || !folio_test_large(folio))) { list_add(&folio->lru, &demote_folios); folio_unlock(folio); + stat->nr_demote += nr_pages; continue; } @@ -4495,6 +4498,8 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap int type; int scanned; int reclaimed; + unsigned long nr_taken = sc->nr_scanned; + unsigned int total_reclaimed = 0; LIST_HEAD(list); LIST_HEAD(clean); struct folio *folio; @@ -4521,10 +4526,7 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap return scanned; retry: reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false); - sc->nr_reclaimed += reclaimed; - trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, - scanned, reclaimed, &stat, sc->priority, - type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); + total_reclaimed += reclaimed; list_for_each_entry_safe_reverse(folio, next, &list, lru) { if (!folio_evictable(folio)) { @@ -4582,6 +4584,20 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap goto retry; } + /** + * MGLRU scan_folios return nr_scanned no only contains + * isolated folios. To get actually touched folios in + * shrink_folios_list, we can record last nr_scanned which + * sc saved, and sc nr_scanned will update for each folios + * which we touched. New count sub last can get right nr_taken + */ + nr_taken = sc->nr_scanned - nr_taken; + + sc->nr_reclaimed += total_reclaimed; + trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, nr_taken, + total_reclaimed, &stat, + sc->priority, type); + return scanned; } -- 2.34.1