On Tue, Apr 13, 2021 at 09:20:24PM -0400, Waiman Long wrote: > In memcg_slab_free_hook()/pcpu_memcg_free_hook(), obj_cgroup_uncharge() > is followed by mod_objcg_state()/mod_memcg_state(). Each of these > function call goes through a separate irq_save/irq_restore cycle. That > is inefficient. Introduce a new function obj_cgroup_uncharge_mod_state() > that combines them with a single irq_save/irq_restore cycle. > > Signed-off-by: Waiman Long <longman@xxxxxxxxxx> > Reviewed-by: Shakeel Butt <shakeelb@xxxxxxxxxx> > Acked-by: Roman Gushchin <guro@xxxxxx> > --- > include/linux/memcontrol.h | 2 ++ > mm/memcontrol.c | 31 +++++++++++++++++++++++++++---- > mm/percpu.c | 9 ++------- > mm/slab.h | 6 +++--- > 4 files changed, 34 insertions(+), 14 deletions(-) > > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h > index 95f12996e66c..6890f999c1a3 100644 > --- a/include/linux/memcontrol.h > +++ b/include/linux/memcontrol.h > @@ -1592,6 +1592,8 @@ struct obj_cgroup *get_obj_cgroup_from_current(void); > > int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); > void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); > +void obj_cgroup_uncharge_mod_state(struct obj_cgroup *objcg, size_t size, > + struct pglist_data *pgdat, int idx); > > extern struct static_key_false memcg_kmem_enabled_key; > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index d66e1e38f8ac..b19100c68aa0 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -3225,12 +3225,9 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, > return false; > } > > -static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) > +static void __refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) > { > struct memcg_stock_pcp *stock; > - unsigned long flags; > - > - local_irq_save(flags); > > stock = this_cpu_ptr(&memcg_stock); > if (stock->cached_objcg != objcg) { /* reset if necessary */ > @@ -3243,7 +3240,14 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) > > if (stock->nr_bytes > PAGE_SIZE) > drain_obj_stock(stock); > +} > + > +static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) > +{ > + unsigned long flags; > > + local_irq_save(flags); > + __refill_obj_stock(objcg, nr_bytes); > local_irq_restore(flags); > } > > @@ -3292,6 +3296,25 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) > refill_obj_stock(objcg, size); > } > > +void obj_cgroup_uncharge_mod_state(struct obj_cgroup *objcg, size_t size, > + struct pglist_data *pgdat, int idx) > +{ > + unsigned long flags; > + struct mem_cgroup *memcg; > + struct lruvec *lruvec = NULL; > + > + local_irq_save(flags); > + __refill_obj_stock(objcg, size); > + > + rcu_read_lock(); > + memcg = obj_cgroup_memcg(objcg); > + if (pgdat) > + lruvec = mem_cgroup_lruvec(memcg, pgdat); > + __mod_memcg_lruvec_state(memcg, lruvec, idx, -(int)size); > + rcu_read_unlock(); > + local_irq_restore(flags); > +} > + > #endif /* CONFIG_MEMCG_KMEM */ > > /* > diff --git a/mm/percpu.c b/mm/percpu.c > index 23308113a5ff..fd7aad6d7f90 100644 > --- a/mm/percpu.c > +++ b/mm/percpu.c > @@ -1631,13 +1631,8 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) > objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT]; > chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL; > > - obj_cgroup_uncharge(objcg, size * num_possible_cpus()); > - > - rcu_read_lock(); > - mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, > - -(size * num_possible_cpus())); > - rcu_read_unlock(); > - > + obj_cgroup_uncharge_mod_state(objcg, size * num_possible_cpus(), > + NULL, MEMCG_PERCPU_B); > obj_cgroup_put(objcg); > } > > diff --git a/mm/slab.h b/mm/slab.h > index bc6c7545e487..677cdc52e641 100644 > --- a/mm/slab.h > +++ b/mm/slab.h > @@ -366,9 +366,9 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig, > continue; > > objcgs[off] = NULL; > - obj_cgroup_uncharge(objcg, obj_full_size(s)); > - mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s), > - -obj_full_size(s)); > + obj_cgroup_uncharge_mod_state(objcg, obj_full_size(s), > + page_pgdat(page), > + cache_vmstat_idx(s)); > obj_cgroup_put(objcg); > } > } > -- > 2.18.1 > Please feel free to add: Tested-by: Masayoshi Mizuma <m.mizuma@xxxxxxxxxxxxxx> Thanks! Masa