2011/5/20 KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx>: > CAI Qian reported his kernel did hang-up if he ran fork intensive > workload and then invoke oom-killer. > > The problem is, current oom calculation uses 0-1000 normalized value > (The unit is a permillage of system-ram). Its low precision make > a lot of same oom score. IOW, in his case, all processes have smaller > oom score than 1 and internal calculation round it to 1. > > Thus oom-killer kill ineligible process. This regression is caused by > commit a63d83f427 (oom: badness heuristic rewrite). > > The solution is, the internal calculation just use number of pages > instead of permillage of system-ram. And convert it to permillage > value at displaying time. > > This patch doesn't change any ABI (included Â/proc/<pid>/oom_score_adj) > even though current logic has a lot of my dislike thing. > > Reported-by: CAI Qian <caiqian@xxxxxxxxxx> > Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx> > --- > Âfs/proc/base.c   Â|  13 ++++++---- > Âinclude/linux/oom.h |  Â7 +---- > Âmm/oom_kill.c    |  60 +++++++++++++++++++++++++++++++++----------------- > Â3 files changed, 49 insertions(+), 31 deletions(-) > > diff --git a/fs/proc/base.c b/fs/proc/base.c > index dfa5327..d6b0424 100644 > --- a/fs/proc/base.c > +++ b/fs/proc/base.c > @@ -476,14 +476,17 @@ static const struct file_operations proc_lstats_operations = { > > Âstatic int proc_oom_score(struct task_struct *task, char *buffer) > Â{ > -    unsigned long points = 0; > +    unsigned long points; > +    unsigned long ratio = 0; > +    unsigned long totalpages = totalram_pages + total_swap_pages + 1; Does we need +1? oom_badness does have the check. > >    Âread_lock(&tasklist_lock); > -    if (pid_alive(task)) > -        points = oom_badness(task, NULL, NULL, > -                    totalram_pages + total_swap_pages); > +    if (pid_alive(task)) { > +        points = oom_badness(task, NULL, NULL, totalpages); > +        ratio = points * 1000 / totalpages; > +    } >    Âread_unlock(&tasklist_lock); > -    return sprintf(buffer, "%lu\n", points); > +    return sprintf(buffer, "%lu\n", ratio); > Â} > > Âstruct limit_names { > diff --git a/include/linux/oom.h b/include/linux/oom.h > index 5e3aa83..0f5b588 100644 > --- a/include/linux/oom.h > +++ b/include/linux/oom.h > @@ -40,7 +40,8 @@ enum oom_constraint { >    ÂCONSTRAINT_MEMCG, > Â}; > > -extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, > +/* The badness from the OOM killer */ > +extern unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *mem, >            Âconst nodemask_t *nodemask, unsigned long totalpages); > Âextern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); > Âextern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); > @@ -62,10 +63,6 @@ static inline void oom_killer_enable(void) >    Âoom_killer_disabled = false; > Â} > > -/* The badness from the OOM killer */ > -extern unsigned long badness(struct task_struct *p, struct mem_cgroup *mem, > -           const nodemask_t *nodemask, unsigned long uptime); > - > Âextern struct task_struct *find_lock_task_mm(struct task_struct *p); > > Â/* sysctls */ > diff --git a/mm/oom_kill.c b/mm/oom_kill.c > index e6a6c6f..8bbc3df 100644 > --- a/mm/oom_kill.c > +++ b/mm/oom_kill.c > @@ -132,10 +132,12 @@ static bool oom_unkillable_task(struct task_struct *p, > Â* predictable as possible. ÂThe goal is to return the highest value for the > Â* task consuming the most memory to avoid subsequent oom failures. > Â*/ > -unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, > +unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *mem, >           Âconst nodemask_t *nodemask, unsigned long totalpages) > Â{ > -    int points; > +    unsigned long points; > +    unsigned long score_adj = 0; > + > >    Âif (oom_unkillable_task(p, mem, nodemask)) >        Âreturn 0; > @@ -160,7 +162,7 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, >     */ >    Âif (p->flags & PF_OOM_ORIGIN) { >        Âtask_unlock(p); > -        return 1000; > +        return ULONG_MAX; >    Â} > >    Â/* > @@ -176,33 +178,49 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, >     */ >    Âpoints = get_mm_rss(p->mm) + p->mm->nr_ptes; >    Âpoints += get_mm_counter(p->mm, MM_SWAPENTS); > - > -    points *= 1000; > -    points /= totalpages; >    Âtask_unlock(p); > >    Â/* >     * Root processes get 3% bonus, just like the __vm_enough_memory() >     * implementation used by LSMs. > +    Â* > +    Â* XXX: Too large bonus, example, if the system have tera-bytes memory.. >     */ > -    if (has_capability_noaudit(p, CAP_SYS_ADMIN)) > -        points -= 30; > +    if (has_capability_noaudit(p, CAP_SYS_ADMIN)) { > +        if (points >= totalpages / 32) > +            points -= totalpages / 32; > +        else > +            points = 0; Odd. Why do we initialize points with 0? I think the idea is good. -- Kind regards, Minchan Kim -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxxx For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href