On 2018/08/07 6:50, Tetsuo Handa wrote: > list_for_each_entry_safe(p, tmp, &oom_victim_list, oom_victim_list) { > struct mm_struct *mm = p->signal->oom_mm; > > if (oom_unkillable_task(p, oc->memcg, oc->nodemask)) > continue; > ret = true; > + /* > + * Since memcg OOM allows forced charge, we can safely wait > + * until __mmput() completes. > + */ > + if (is_memcg_oom(oc)) > + return true; Oops. If this OOM victim was blocked on some lock which current thread is holding, waiting forever unconditionally is not safe. > #ifdef CONFIG_MMU > /* > * Since the OOM reaper exists, we can safely wait until > * MMF_OOM_SKIP is set. > */ > if (!test_bit(MMF_OOM_SKIP, &mm->flags)) { > if (!oom_reap_target) { > get_task_struct(p); > oom_reap_target = p; > trace_wake_reaper(p->pid); > wake_up(&oom_reaper_wait); > } > #endif > continue; > } > #endif > /* We can wait as long as OOM score is decreasing over time. */ > if (!victim_mm_stalling(p, mm)) > continue; > gaveup = true; > list_del(&p->oom_victim_list); > /* Drop a reference taken by mark_oom_victim(). */ > put_task_struct(p); > } > if (gaveup) > debug_show_all_locks(); > > return ret; > } >