The patch titled Subject: memcg: pr_warn_once for unexpected events and stats has been added to the -mm mm-unstable branch. Its filename is memcg-pr_warn_once-for-unexpected-events-and-stats.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/memcg-pr_warn_once-for-unexpected-events-and-stats.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Shakeel Butt <shakeel.butt@xxxxxxxxx> Subject: memcg: pr_warn_once for unexpected events and stats Date: Fri, 26 Apr 2024 17:37:31 -0700 To reduce memory usage by the memcg events and stats, the kernel uses indirection table and only allocate stats and events which are being used by the memcg code. To make this more robust, let's add warnings where unexpected stats and events indexes are used. Link: https://lkml.kernel.org/r/20240427003733.3898961-6-shakeel.butt@xxxxxxxxx Signed-off-by: Shakeel Butt <shakeel.butt@xxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Muchun Song <muchun.song@xxxxxxxxx> Cc: Roman Gushchin <roman.gushchin@xxxxxxxxx> Cc: Yosry Ahmed <yosryahmed@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/memcontrol.c | 43 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 9 deletions(-) --- a/mm/memcontrol.c~memcg-pr_warn_once-for-unexpected-events-and-stats +++ a/mm/memcontrol.c @@ -670,9 +670,11 @@ unsigned long lruvec_page_state(struct l return node_page_state(lruvec_pgdat(lruvec), idx); i = memcg_stats_index(idx); - if (i >= 0) { + if (likely(i >= 0)) { pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); x = READ_ONCE(pn->lruvec_stats->state[i]); + } else { + pr_warn_once("%s: stat item index: %d\n", __func__, idx); } #ifdef CONFIG_SMP if (x < 0) @@ -692,9 +694,11 @@ unsigned long lruvec_page_state_local(st return node_page_state(lruvec_pgdat(lruvec), idx); i = memcg_stats_index(idx); - if (i >= 0) { + if (likely(i >= 0)) { pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); x = READ_ONCE(pn->lruvec_stats->state_local[i]); + } else { + pr_warn_once("%s: stat item index: %d\n", __func__, idx); } #ifdef CONFIG_SMP if (x < 0) @@ -921,8 +925,10 @@ unsigned long memcg_page_state(struct me long x; int i = memcg_stats_index(idx); - if (i < 0) + if (unlikely(i < 0)) { + pr_warn_once("%s: stat item index: %d\n", __func__, idx); return 0; + } x = READ_ONCE(memcg->vmstats->state[i]); #ifdef CONFIG_SMP @@ -958,8 +964,13 @@ void __mod_memcg_state(struct mem_cgroup { int i = memcg_stats_index(idx); - if (mem_cgroup_disabled() || i < 0) + if (mem_cgroup_disabled()) + return; + + if (unlikely(i < 0)) { + pr_warn_once("%s: stat item index: %d\n", __func__, idx); return; + } __this_cpu_add(memcg->vmstats_percpu->state[i], val); memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val)); @@ -971,8 +982,10 @@ static unsigned long memcg_page_state_lo long x; int i = memcg_stats_index(idx); - if (i < 0) + if (unlikely(i < 0)) { + pr_warn_once("%s: stat item index: %d\n", __func__, idx); return 0; + } x = READ_ONCE(memcg->vmstats->state_local[i]); #ifdef CONFIG_SMP @@ -990,8 +1003,10 @@ static void __mod_memcg_lruvec_state(str struct mem_cgroup *memcg; int i = memcg_stats_index(idx); - if (i < 0) + if (unlikely(i < 0)) { + pr_warn_once("%s: stat item index: %d\n", __func__, idx); return; + } pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); memcg = pn->memcg; @@ -1103,8 +1118,13 @@ void __count_memcg_events(struct mem_cgr { int index = memcg_events_index(idx); - if (mem_cgroup_disabled() || index < 0) + if (mem_cgroup_disabled()) + return; + + if (unlikely(index < 0)) { + pr_warn_once("%s: event item index: %d\n", __func__, idx); return; + } memcg_stats_lock(); __this_cpu_add(memcg->vmstats_percpu->events[index], count); @@ -1116,8 +1136,11 @@ static unsigned long memcg_events(struct { int index = memcg_events_index(event); - if (index < 0) + if (unlikely(index < 0)) { + pr_warn_once("%s: event item index: %d\n", __func__, event); return 0; + } + return READ_ONCE(memcg->vmstats->events[index]); } @@ -1125,8 +1148,10 @@ static unsigned long memcg_events_local( { int index = memcg_events_index(event); - if (index < 0) + if (unlikely(index < 0)) { + pr_warn_once("%s: event item index: %d\n", __func__, event); return 0; + } return READ_ONCE(memcg->vmstats->events_local[index]); } _ Patches currently in -mm which might be from shakeel.butt@xxxxxxxxx are memcg-simple-cleanup-of-stats-update-functions.patch memcg-reduce-memory-size-of-mem_cgroup_events_index.patch memcg-dynamically-allocate-lruvec_stats.patch memcg-reduce-memory-for-the-lruvec-and-memcg-stats.patch memcg-cleanup-__mod_memcg_lruvec_state.patch memcg-pr_warn_once-for-unexpected-events-and-stats.patch memcg-use-proper-type-for-mod_memcg_state.patch mm-cleanup-workingset_nodes-in-workingset.patch