Use helper function to check if we need to deal with oom condition. Signed-off-by: Qiang Huang <h.huangqiang@xxxxxxxxxx> --- include/linux/oom.h | 5 +++++ mm/memcontrol.c | 9 +-------- mm/page_alloc.c | 2 +- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/include/linux/oom.h b/include/linux/oom.h index da60007..d061c63 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -82,6 +82,11 @@ static inline void oom_killer_enable(void) oom_killer_disabled = false; } +static inline bool may_oom(gfp_t gfp_mask) +{ + return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY); +} + extern struct task_struct *find_lock_task_mm(struct task_struct *p); /* sysctls */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b73988a..e07fcfa 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2910,21 +2910,14 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) struct res_counter *fail_res; struct mem_cgroup *_memcg; int ret = 0; - bool may_oom; ret = res_counter_charge(&memcg->kmem, size, &fail_res); if (ret) return ret; - /* - * Conditions under which we can wait for the oom_killer. Those are - * the same conditions tested by the core page allocator - */ - may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY); - _memcg = memcg; ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT, - &_memcg, may_oom); + &_memcg, may_oom(gfp)); if (ret == -EINTR) { /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b7c612d..42af675 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2589,7 +2589,7 @@ rebalance: * running out of options and have to consider going OOM */ if (!did_some_progress) { - if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { + if (may_oom(gfp_mask)) { if (oom_killer_disabled) goto nopage; /* Coredumps can quickly deplete all memory reserves */ -- 1.8.3 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>