Add vm events for scanning pages for recharge, successfully recharging pages, and cancelling a recharge due to failure to charge the target memcg. Signed-off-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx> --- include/linux/vm_event_item.h | 5 +++++ mm/memcontrol.c | 6 ++++++ mm/vmstat.c | 6 +++++- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 8abfa1240040..cd80c00c50c2 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -60,6 +60,11 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PAGEOUTRUN, PGROTATED, DROP_PAGECACHE, DROP_SLAB, OOM_KILL, +#ifdef CONFIG_MEMCG + RECHARGE_PGSCANNED, + RECHARGE_PGMOVED, + RECHARGE_PGCANCELLED, +#endif #ifdef CONFIG_NUMA_BALANCING NUMA_PTE_UPDATES, NUMA_HUGE_PTE_UPDATES, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cf9fb51ecfcc..2fe9c6f1be80 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6394,6 +6394,8 @@ static bool mem_cgroup_recharge_folio(struct folio *folio, old_memcg, new_memcg); cancel_charge(err ? new_memcg : old_memcg, nr_pages); out: + count_vm_events(err ? RECHARGE_PGCANCELLED : RECHARGE_PGMOVED, + nr_pages); return err == 0; } @@ -6469,6 +6471,7 @@ static bool memcg_recharge_lruvec_list(struct lruvec *lruvec, int isolated_idx = NR_ISOLATED_ANON + is_file_lru(lru); struct mem_cgroup *memcg = lruvec_memcg(lruvec); unsigned long *nr_recharged = arg; + unsigned long nr_scanned = 0; unsigned long nr_staged = 0; LIST_HEAD(folios_skipped); LIST_HEAD(folios_staged); @@ -6505,6 +6508,7 @@ static bool memcg_recharge_lruvec_list(struct lruvec *lruvec, continue; } + nr_scanned += folio_nr_pages(folio); if (unlikely(!folio_try_get(folio))) { list_move(&folio->lru, &folios_skipped); continue; @@ -6543,6 +6547,7 @@ static bool memcg_recharge_lruvec_list(struct lruvec *lruvec, } mem_cgroup_end_move_charge(memcg); mod_lruvec_state(lruvec, isolated_idx, -nr_staged); + count_vm_events(RECHARGE_PGSCANNED, nr_scanned); return false; } @@ -6679,6 +6684,7 @@ void folio_memcg_deferred_recharge(struct folio *folio) if (unlikely(!memcg_recharge_wq)) return; + count_vm_events(RECHARGE_PGSCANNED, folio_nr_pages(folio)); if (unlikely(!folio_try_get(folio))) return; diff --git a/mm/vmstat.c b/mm/vmstat.c index b731d57996c5..e425a1aa7890 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1303,7 +1303,11 @@ const char * const vmstat_text[] = { "drop_pagecache", "drop_slab", "oom_kill", - +#ifdef CONFIG_MEMCG + "recharge_pgs_scanned", + "recharge_pgs_moved", + "recharge_pgs_cancelled", +#endif #ifdef CONFIG_NUMA_BALANCING "numa_pte_updates", "numa_huge_pte_updates", -- 2.41.0.255.g8b1d071c50-goog