On Sun 03-07-16 11:37:33, Tetsuo Handa wrote: > >From 3be379c6b42a0901cd81fb2c743e321b6fbdec5b Mon Sep 17 00:00:00 2001 > From: Tetsuo Handa <penguin-kernel@xxxxxxxxxxxxxxxxxxx> > Date: Sat, 2 Jul 2016 22:55:17 +0900 > Subject: [PATCH 2/8] mm,oom_reaper: Reduce find_lock_task_mm() usage. > > Since holding mm_struct with elevated mm_count for a second is harmless, > we can determine mm_struct and hold it upon entry of oom_reap_task(). > This patch has no functional change. Future patch in this series will > eliminate find_lock_task_mm() usage from the OOM reaper. OK, this seems to reduce the code size and makes the code more readable. > Signed-off-by: Tetsuo Handa <penguin-kernel@xxxxxxxxxxxxxxxxxxx> Acked-by: Michal Hocko <mhocko@xxxxxxxx> > --- > mm/oom_kill.c | 79 ++++++++++++++++++++++++++++------------------------------- > 1 file changed, 37 insertions(+), 42 deletions(-) > > diff --git a/mm/oom_kill.c b/mm/oom_kill.c > index 16340f2..76c765e 100644 > --- a/mm/oom_kill.c > +++ b/mm/oom_kill.c > @@ -451,12 +451,10 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); > static struct task_struct *oom_reaper_list; > static DEFINE_SPINLOCK(oom_reaper_lock); > > -static bool __oom_reap_task(struct task_struct *tsk) > +static bool __oom_reap_task(struct task_struct *tsk, struct mm_struct *mm) > { > struct mmu_gather tlb; > struct vm_area_struct *vma; > - struct mm_struct *mm = NULL; > - struct task_struct *p; > struct zap_details details = {.check_swap_entries = true, > .ignore_dirty = true}; > bool ret = true; > @@ -477,22 +475,9 @@ static bool __oom_reap_task(struct task_struct *tsk) > */ > mutex_lock(&oom_lock); > > - /* > - * Make sure we find the associated mm_struct even when the particular > - * thread has already terminated and cleared its mm. > - * We might have race with exit path so consider our work done if there > - * is no mm. > - */ > - p = find_lock_task_mm(tsk); > - if (!p) > - goto unlock_oom; > - mm = p->mm; > - atomic_inc(&mm->mm_count); > - task_unlock(p); > - > if (!down_read_trylock(&mm->mmap_sem)) { > ret = false; > - goto mm_drop; > + goto unlock_oom; > } > > /* > @@ -502,7 +487,7 @@ static bool __oom_reap_task(struct task_struct *tsk) > */ > if (!mmget_not_zero(mm)) { > up_read(&mm->mmap_sem); > - goto mm_drop; > + goto unlock_oom; > } > > tlb_gather_mmu(&tlb, mm, 0, -1); > @@ -550,8 +535,6 @@ static bool __oom_reap_task(struct task_struct *tsk) > * put the oom_reaper out of the way. > */ > mmput_async(mm); > -mm_drop: > - mmdrop(mm); > unlock_oom: > mutex_unlock(&oom_lock); > return ret; > @@ -561,36 +544,45 @@ unlock_oom: > static void oom_reap_task(struct task_struct *tsk) > { > int attempts = 0; > + struct mm_struct *mm = NULL; > + struct task_struct *p = find_lock_task_mm(tsk); > + > + /* > + * Make sure we find the associated mm_struct even when the particular > + * thread has already terminated and cleared its mm. > + * We might have race with exit path so consider our work done if there > + * is no mm. > + */ > + if (!p) > + goto done; > + mm = p->mm; > + atomic_inc(&mm->mm_count); > + task_unlock(p); > > /* Retry the down_read_trylock(mmap_sem) a few times */ > - while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task(tsk)) > + while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task(tsk, mm)) > schedule_timeout_idle(HZ/10); > > - if (attempts > MAX_OOM_REAP_RETRIES) { > - struct task_struct *p; > + if (attempts <= MAX_OOM_REAP_RETRIES) > + goto done; > > - pr_info("oom_reaper: unable to reap pid:%d (%s)\n", > - task_pid_nr(tsk), tsk->comm); > + pr_info("oom_reaper: unable to reap pid:%d (%s)\n", > + task_pid_nr(tsk), tsk->comm); > > - /* > - * If we've already tried to reap this task in the past and > - * failed it probably doesn't make much sense to try yet again > - * so hide the mm from the oom killer so that it can move on > - * to another task with a different mm struct. > - */ > - p = find_lock_task_mm(tsk); > - if (p) { > - if (test_and_set_bit(MMF_OOM_NOT_REAPABLE, &p->mm->flags)) { > - pr_info("oom_reaper: giving up pid:%d (%s)\n", > - task_pid_nr(tsk), tsk->comm); > - set_bit(MMF_OOM_REAPED, &p->mm->flags); > - } > - task_unlock(p); > - } > - > - debug_show_all_locks(); > + /* > + * If we've already tried to reap this task in the past and > + * failed it probably doesn't make much sense to try yet again > + * so hide the mm from the oom killer so that it can move on > + * to another task with a different mm struct. > + */ > + if (test_and_set_bit(MMF_OOM_NOT_REAPABLE, &mm->flags)) { > + pr_info("oom_reaper: giving up pid:%d (%s)\n", > + task_pid_nr(tsk), tsk->comm); > + set_bit(MMF_OOM_REAPED, &mm->flags); > } > + debug_show_all_locks(); > > +done: > /* > * Clear TIF_MEMDIE because the task shouldn't be sitting on a > * reasonably reclaimable memory anymore or it is not a good candidate > @@ -602,6 +594,9 @@ static void oom_reap_task(struct task_struct *tsk) > > /* Drop a reference taken by wake_oom_reaper */ > put_task_struct(tsk); > + /* Drop a reference taken above. */ > + if (mm) > + mmdrop(mm); > } > > static int oom_reaper(void *unused) > -- > 1.8.3.1 -- Michal Hocko SUSE Labs -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>