The patch titled Subject: mm, oom: remove redundant task_in_mem_cgroup() check has been added to the -mm tree. Its filename is mm-oom-remove-redundant-task_in_mem_cgroup-check.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-oom-remove-redundant-task_in_mem_cgroup-check.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-oom-remove-redundant-task_in_mem_cgroup-check.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Shakeel Butt <shakeelb@xxxxxxxxxx> Subject: mm, oom: remove redundant task_in_mem_cgroup() check oom_unkillable_task() can be called from three different contexts i.e. global OOM, memcg OOM and oom_score procfs interface. At the moment oom_unkillable_task() does a task_in_mem_cgroup() check on the given process. Since there is no reason to perform task_in_mem_cgroup() check for global OOM and oom_score procfs interface, those contexts provide NULL memcg and skips the task_in_mem_cgroup() check. However for memcg OOM context, the oom_unkillable_task() is always called from mem_cgroup_scan_tasks() and thus task_in_mem_cgroup() check becomes redundant. So, just remove the task_in_mem_cgroup() check altogether. Link: http://lkml.kernel.org/r/20190624212631.87212-2-shakeelb@xxxxxxxxxx Signed-off-by: Shakeel Butt <shakeelb@xxxxxxxxxx> Signed-off-by: Tetsuo Handa <penguin-kernel@xxxxxxxxxxxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Nick Piggin <npiggin@xxxxxxx> Cc: Paul Jackson <pj@xxxxxxx> Cc: Roman Gushchin <guro@xxxxxx> Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- fs/proc/base.c | 2 +- include/linux/memcontrol.h | 7 ------- include/linux/oom.h | 2 +- mm/memcontrol.c | 26 -------------------------- mm/oom_kill.c | 19 +++++++------------ 5 files changed, 9 insertions(+), 47 deletions(-) --- a/fs/proc/base.c~mm-oom-remove-redundant-task_in_mem_cgroup-check +++ a/fs/proc/base.c @@ -532,7 +532,7 @@ static int proc_oom_score(struct seq_fil unsigned long totalpages = totalram_pages() + total_swap_pages; unsigned long points = 0; - points = oom_badness(task, NULL, NULL, totalpages) * + points = oom_badness(task, NULL, totalpages) * 1000 / totalpages; seq_printf(m, "%lu\n", points); --- a/include/linux/memcontrol.h~mm-oom-remove-redundant-task_in_mem_cgroup-check +++ a/include/linux/memcontrol.h @@ -394,7 +394,6 @@ out: struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); -bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); @@ -874,12 +873,6 @@ static inline bool mm_match_cgroup(struc { return true; } - -static inline bool task_in_mem_cgroup(struct task_struct *task, - const struct mem_cgroup *memcg) -{ - return true; -} static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) { --- a/include/linux/oom.h~mm-oom-remove-redundant-task_in_mem_cgroup-check +++ a/include/linux/oom.h @@ -108,7 +108,7 @@ static inline vm_fault_t check_stable_ad bool __oom_reap_task_mm(struct mm_struct *mm); extern unsigned long oom_badness(struct task_struct *p, - struct mem_cgroup *memcg, const nodemask_t *nodemask, + const nodemask_t *nodemask, unsigned long totalpages); extern bool out_of_memory(struct oom_control *oc); --- a/mm/memcontrol.c~mm-oom-remove-redundant-task_in_mem_cgroup-check +++ a/mm/memcontrol.c @@ -1259,32 +1259,6 @@ void mem_cgroup_update_lru_size(struct l *lru_size += nr_pages; } -bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) -{ - struct mem_cgroup *task_memcg; - struct task_struct *p; - bool ret; - - p = find_lock_task_mm(task); - if (p) { - task_memcg = get_mem_cgroup_from_mm(p->mm); - task_unlock(p); - } else { - /* - * All threads may have already detached their mm's, but the oom - * killer still needs to detect if they have already been oom - * killed to prevent needlessly killing additional tasks. - */ - rcu_read_lock(); - task_memcg = mem_cgroup_from_task(task); - css_get(&task_memcg->css); - rcu_read_unlock(); - } - ret = mem_cgroup_is_descendant(task_memcg, memcg); - css_put(&task_memcg->css); - return ret; -} - /** * mem_cgroup_margin - calculate chargeable space of a memory cgroup * @memcg: the memory cgroup --- a/mm/oom_kill.c~mm-oom-remove-redundant-task_in_mem_cgroup-check +++ a/mm/oom_kill.c @@ -153,17 +153,13 @@ static inline bool is_memcg_oom(struct o /* return true if the task is not adequate as candidate victim task. */ static bool oom_unkillable_task(struct task_struct *p, - struct mem_cgroup *memcg, const nodemask_t *nodemask) + const nodemask_t *nodemask) { if (is_global_init(p)) return true; if (p->flags & PF_KTHREAD) return true; - /* When mem_cgroup_out_of_memory() and p is not member of the group */ - if (memcg && !task_in_mem_cgroup(p, memcg)) - return true; - /* p may not have freeable memory in nodemask */ if (!has_intersects_mems_allowed(p, nodemask)) return true; @@ -194,20 +190,19 @@ static bool is_dump_unreclaim_slabs(void * oom_badness - heuristic function to determine which candidate task to kill * @p: task struct of which task we should calculate * @totalpages: total present RAM allowed for page allocation - * @memcg: task's memory controller, if constrained * @nodemask: nodemask passed to page allocator for mempolicy ooms * * The heuristic for determining which task to kill is made to be as simple and * predictable as possible. The goal is to return the highest value for the * task consuming the most memory to avoid subsequent oom failures. */ -unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, +unsigned long oom_badness(struct task_struct *p, const nodemask_t *nodemask, unsigned long totalpages) { long points; long adj; - if (oom_unkillable_task(p, memcg, nodemask)) + if (oom_unkillable_task(p, nodemask)) return 0; p = find_lock_task_mm(p); @@ -318,7 +313,7 @@ static int oom_evaluate_task(struct task struct oom_control *oc = arg; unsigned long points; - if (oom_unkillable_task(task, NULL, oc->nodemask)) + if (oom_unkillable_task(task, oc->nodemask)) goto next; /* @@ -342,7 +337,7 @@ static int oom_evaluate_task(struct task goto select; } - points = oom_badness(task, NULL, oc->nodemask, oc->totalpages); + points = oom_badness(task, oc->nodemask, oc->totalpages); if (!points || points < oc->chosen_points) goto next; @@ -387,7 +382,7 @@ static int dump_task(struct task_struct struct oom_control *oc = arg; struct task_struct *task; - if (oom_unkillable_task(p, NULL, oc->nodemask)) + if (oom_unkillable_task(p, oc->nodemask)) return 0; task = find_lock_task_mm(p); @@ -1085,7 +1080,7 @@ bool out_of_memory(struct oom_control *o check_panic_on_oom(oc); if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && - current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) && + current->mm && !oom_unkillable_task(current, oc->nodemask) && current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { get_task_struct(current); oc->chosen = current; _ Patches currently in -mm which might be from shakeelb@xxxxxxxxxx are slub-dont-panic-for-memcg-kmem-cache-creation-failure.patch memcg-oom-no-oom-kill-for-__gfp_retry_mayfail.patch memcg-fsnotify-no-oom-kill-for-remote-memcg-charging.patch mm-memcg-introduce-memoryeventslocal.patch mm-oom-refactor-dump_tasks-for-memcg-ooms.patch mm-oom-remove-redundant-task_in_mem_cgroup-check.patch oom-decouple-mems_allowed-from-oom_unkillable_task.patch