Since slab objects and kmem pages are charged to object cgroup instead of memory cgroup, memcg_reparent_objcgs() will reparent this cgroup and all its descendants to the parent cgroup. This already makes further list_lru_add()'s add elements to the parent's list. So we do not need to change kmemcg_id of an offline cgroup to its parent's id. It is just waste CPU cycles. Just remove those redundant code. Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> --- mm/memcontrol.c | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 64ada9e650a5..21e12312509c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3485,8 +3485,7 @@ static int memcg_online_kmem(struct mem_cgroup *memcg) static void memcg_offline_kmem(struct mem_cgroup *memcg) { - struct cgroup_subsys_state *css; - struct mem_cgroup *parent, *child; + struct mem_cgroup *parent; int kmemcg_id; if (memcg->kmem_state != KMEM_ONLINE) @@ -3503,22 +3502,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) kmemcg_id = memcg->kmemcg_id; BUG_ON(kmemcg_id < 0); - /* - * Change kmemcg_id of this cgroup and all its descendants to the - * parent's id, and then move all entries from this cgroup's list_lrus - * to ones of the parent. After we have finished, all list_lrus - * corresponding to this cgroup are guaranteed to remain empty. The - * ordering is imposed by list_lru_node->lock taken by - * memcg_drain_all_list_lrus(). - */ - rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ - css_for_each_descendant_pre(css, &memcg->css) { - child = mem_cgroup_from_css(css); - BUG_ON(child->kmemcg_id != kmemcg_id); - child->kmemcg_id = parent->kmemcg_id; - } - rcu_read_unlock(); - + /* memcg_reparent_objcgs() must be called before this. */ memcg_drain_all_list_lrus(kmemcg_id, parent); memcg_free_cache_id(kmemcg_id); -- 2.11.0