On Tue, Oct 15, 2024 at 01:07:30AM GMT, Yosry Ahmed wrote: > On Wed, Oct 9, 2024 at 5:36 PM Shakeel Butt <shakeel.butt@xxxxxxxxx> wrote: > > > > The memcg stats are maintained in rstat infrastructure which provides > > very fast updates side and reasonable read side. However memcg added > > plethora of stats and made the read side, which is cgroup rstat flush, > > very slow. To solve that, threshold was added in the memcg stats read > > side i.e. no need to flush the stats if updates are within the > > threshold. > > > > This threshold based improvement worked for sometime but more stats were > > added to memcg and also the read codepath was getting triggered in the > > performance sensitive paths which made threshold based ratelimiting > > ineffective. We need more visibility into the hot and cold stats i.e. > > stats with a lot of updates. Let's add trace to get that visibility. > > > > Signed-off-by: Shakeel Butt <shakeel.butt@xxxxxxxxx> > > One question below, otherwise: > > Reviewed-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx> > > > --- > > include/trace/events/memcg.h | 59 ++++++++++++++++++++++++++++++++++++ > > mm/memcontrol.c | 13 ++++++-- > > 2 files changed, 70 insertions(+), 2 deletions(-) > > create mode 100644 include/trace/events/memcg.h > > > > diff --git a/include/trace/events/memcg.h b/include/trace/events/memcg.h > > new file mode 100644 > > index 000000000000..913db9aba580 > > --- /dev/null > > +++ b/include/trace/events/memcg.h > > @@ -0,0 +1,59 @@ > > +/* SPDX-License-Identifier: GPL-2.0 */ > > +#undef TRACE_SYSTEM > > +#define TRACE_SYSTEM memcg > > + > > +#if !defined(_TRACE_MEMCG_H) || defined(TRACE_HEADER_MULTI_READ) > > +#define _TRACE_MEMCG_H > > + > > +#include <linux/memcontrol.h> > > +#include <linux/tracepoint.h> > > + > > + > > +DECLARE_EVENT_CLASS(memcg_rstat, > > + > > + TP_PROTO(struct mem_cgroup *memcg, int item, int val), > > + > > + TP_ARGS(memcg, item, val), > > + > > + TP_STRUCT__entry( > > + __field(u64, id) > > + __field(int, item) > > + __field(int, val) > > + ), > > + > > + TP_fast_assign( > > + __entry->id = cgroup_id(memcg->css.cgroup); > > + __entry->item = item; > > + __entry->val = val; > > + ), > > + > > + TP_printk("memcg_id=%llu item=%d val=%d", > > + __entry->id, __entry->item, __entry->val) > > +); > > + > > +DEFINE_EVENT(memcg_rstat, mod_memcg_state, > > + > > + TP_PROTO(struct mem_cgroup *memcg, int item, int val), > > + > > + TP_ARGS(memcg, item, val) > > +); > > + > > +DEFINE_EVENT(memcg_rstat, mod_memcg_lruvec_state, > > + > > + TP_PROTO(struct mem_cgroup *memcg, int item, int val), > > + > > + TP_ARGS(memcg, item, val) > > +); > > + > > +DEFINE_EVENT(memcg_rstat, count_memcg_events, > > + > > + TP_PROTO(struct mem_cgroup *memcg, int item, int val), > > + > > + TP_ARGS(memcg, item, val) > > +); > > + > > + > > +#endif /* _TRACE_MEMCG_H */ > > + > > +/* This part must be outside protection */ > > +#include <trace/define_trace.h> > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > > index c098fd7f5c5e..17af08367c68 100644 > > --- a/mm/memcontrol.c > > +++ b/mm/memcontrol.c > > @@ -71,6 +71,10 @@ > > > > #include <linux/uaccess.h> > > > > +#define CREATE_TRACE_POINTS > > +#include <trace/events/memcg.h> > > +#undef CREATE_TRACE_POINTS > > + > > #include <trace/events/vmscan.h> > > > > struct cgroup_subsys memory_cgrp_subsys __read_mostly; > > @@ -682,7 +686,9 @@ void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, > > return; > > > > __this_cpu_add(memcg->vmstats_percpu->state[i], val); > > - memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val)); > > + val = memcg_state_val_in_pages(idx, val); > > + memcg_rstat_updated(memcg, val); > > + trace_mod_memcg_state(memcg, idx, val); > > } > > > > /* idx can be of type enum memcg_stat_item or node_stat_item. */ > > @@ -741,7 +747,9 @@ static void __mod_memcg_lruvec_state(struct lruvec *lruvec, > > /* Update lruvec */ > > __this_cpu_add(pn->lruvec_stats_percpu->state[i], val); > > > > - memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val)); > > + val = memcg_state_val_in_pages(idx, val); > > + memcg_rstat_updated(memcg, val); > > + trace_mod_memcg_lruvec_state(memcg, idx, val); > > memcg_stats_unlock(); > > } > > > > @@ -832,6 +840,7 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, > > memcg_stats_lock(); > > __this_cpu_add(memcg->vmstats_percpu->events[i], count); > > memcg_rstat_updated(memcg, count); > > + trace_count_memcg_events(memcg, idx, count); > > count here is an unsigned long, and we are casting it to int, right? > > Would it be slightly better if the tracepoint uses a long instead of > int? It's still not ideal but probably better than int. > Do you mean something line the following? If this looks good to you then we can ask Andrew to squash this in the patch. diff --git a/include/trace/events/memcg.h b/include/trace/events/memcg.h index 913db9aba580..37812900acce 100644 --- a/include/trace/events/memcg.h +++ b/include/trace/events/memcg.h @@ -11,14 +11,14 @@ DECLARE_EVENT_CLASS(memcg_rstat, - TP_PROTO(struct mem_cgroup *memcg, int item, int val), + TP_PROTO(struct mem_cgroup *memcg, int item, long val), TP_ARGS(memcg, item, val), TP_STRUCT__entry( __field(u64, id) __field(int, item) - __field(int, val) + __field(long, val) ), TP_fast_assign( @@ -33,21 +33,21 @@ DECLARE_EVENT_CLASS(memcg_rstat, DEFINE_EVENT(memcg_rstat, mod_memcg_state, - TP_PROTO(struct mem_cgroup *memcg, int item, int val), + TP_PROTO(struct mem_cgroup *memcg, int item, long val), TP_ARGS(memcg, item, val) ); DEFINE_EVENT(memcg_rstat, mod_memcg_lruvec_state, - TP_PROTO(struct mem_cgroup *memcg, int item, int val), + TP_PROTO(struct mem_cgroup *memcg, int item, long val), TP_ARGS(memcg, item, val) ); DEFINE_EVENT(memcg_rstat, count_memcg_events, - TP_PROTO(struct mem_cgroup *memcg, int item, int val), + TP_PROTO(struct mem_cgroup *memcg, int item, long val), TP_ARGS(memcg, item, val) );