On Tue 27-07-21 18:22:43, Shakeel Butt wrote: > We used to have per-cpu memcg and lruvec stats and the readers have to > traverse and sum the stats from each cpu. This summing was racy and may > expose transient negative values. So, an explicit check was added to > avoid such scenarios. Now these stats are moved to rstat infrastructure > and are no more per-cpu, so we can remove the fixup for transient > negative values. > > Signed-off-by: Shakeel Butt <shakeelb@xxxxxxxxxx> Acked-by: Michal Hocko <mhocko@xxxxxxxx> > --- > include/linux/memcontrol.h | 15 ++------------- > 1 file changed, 2 insertions(+), 13 deletions(-) > > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h > index 7028d8e4a3d7..5f2a39a43d47 100644 > --- a/include/linux/memcontrol.h > +++ b/include/linux/memcontrol.h > @@ -991,30 +991,19 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg, > > static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) > { > - long x = READ_ONCE(memcg->vmstats.state[idx]); > -#ifdef CONFIG_SMP > - if (x < 0) > - x = 0; > -#endif > - return x; > + return READ_ONCE(memcg->vmstats.state[idx]); > } > > static inline unsigned long lruvec_page_state(struct lruvec *lruvec, > enum node_stat_item idx) > { > struct mem_cgroup_per_node *pn; > - long x; > > if (mem_cgroup_disabled()) > return node_page_state(lruvec_pgdat(lruvec), idx); > > pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); > - x = READ_ONCE(pn->lruvec_stats.state[idx]); > -#ifdef CONFIG_SMP > - if (x < 0) > - x = 0; > -#endif > - return x; > + return READ_ONCE(pn->lruvec_stats.state[idx]); > } > > static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, > -- > 2.32.0.432.gabb21c7263-goog -- Michal Hocko SUSE Labs