When we uncharge a page, we wake up oom victims when the memcg oom handling is outsourced to the userspace. The uncharge_batch do that for normal and kmem pages but not slab pages. It is likely an omission. So add the missing memcg_oom_recover() to __memcg_kmem_uncharge(). And the function of memory.oom_control is only suitable for cgroup v1. So guard this test (memcg->under_oom) by the cgroup_subsys_on_dfl(memory_cgrp_subsys). Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> --- mm/memcontrol.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7afca9677693..a3f26522765a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3126,8 +3126,10 @@ static int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, */ static void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) { - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { page_counter_uncharge(&memcg->kmem, nr_pages); + memcg_oom_recover(memcg); + } refill_stock(memcg, nr_pages); } @@ -6806,11 +6808,15 @@ static void uncharge_batch(const struct uncharge_gather *ug) if (!mem_cgroup_is_root(ug->memcg)) { page_counter_uncharge(&ug->memcg->memory, ug->nr_pages); - if (do_memsw_account()) - page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages); - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) - page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); - memcg_oom_recover(ug->memcg); + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { + if (!cgroup_memory_noswap) + page_counter_uncharge(&ug->memcg->memsw, + ug->nr_pages); + if (ug->nr_kmem) + page_counter_uncharge(&ug->memcg->kmem, + ug->nr_kmem); + memcg_oom_recover(ug->memcg); + } } local_irq_save(flags); -- 2.11.0