Change the return type of out_of_memory() from bool to enum oom_status for later use. The definition of enum oom_status is moved from mm/memcontrol.c to include/linux/oom.h. No functional change. Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx> --- include/linux/memcontrol.h | 1 + include/linux/oom.h | 9 ++++++++- mm/memcontrol.c | 30 +++++++++++++----------------- mm/oom_kill.c | 14 +++++++------- mm/page_alloc.c | 3 ++- 5 files changed, 31 insertions(+), 26 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1b4150ff64be..98bd8fb2f5c7 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -21,6 +21,7 @@ #include <linux/vmstat.h> #include <linux/writeback.h> #include <linux/page-flags.h> +#include <linux/oom.h> struct mem_cgroup; struct page; diff --git a/include/linux/oom.h b/include/linux/oom.h index c696c265f019..3dca5ce189e6 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -22,6 +22,13 @@ enum oom_constraint { CONSTRAINT_MEMCG, }; +enum oom_status { + OOM_SUCCESS, + OOM_FAILED, + OOM_ASYNC, + OOM_SKIPPED +}; + /* * Details of the page allocation that triggered the oom killer that are used to * determine what should be killed. @@ -110,7 +117,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm); extern unsigned long oom_badness(struct task_struct *p, unsigned long totalpages); -extern bool out_of_memory(struct oom_control *oc); +enum oom_status out_of_memory(struct oom_control *oc); extern void exit_oom_victim(void); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5beea03dd58a..22418b55804f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1570,8 +1570,9 @@ unsigned long mem_cgroup_size(struct mem_cgroup *memcg) return page_counter_read(&memcg->memory); } -static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, - int order) +static enum oom_status mem_cgroup_out_of_memory(struct mem_cgroup *memcg, + gfp_t gfp_mask, + int order) { struct oom_control oc = { .zonelist = NULL, @@ -1580,16 +1581,20 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, .gfp_mask = gfp_mask, .order = order, }; - bool ret; + enum oom_status ret; if (mutex_lock_killable(&oom_lock)) - return true; + return OOM_SUCCESS; /* * A few threads which were not waiting at mutex_lock_killable() can * fail to bail out. Therefore, check again after holding oom_lock. */ - ret = should_force_charge() || out_of_memory(&oc); + if (should_force_charge()) + return OOM_SUCCESS; + + ret = out_of_memory(&oc); mutex_unlock(&oom_lock); + return ret; } @@ -1767,13 +1772,6 @@ static void memcg_oom_recover(struct mem_cgroup *memcg) __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); } -enum oom_status { - OOM_SUCCESS, - OOM_FAILED, - OOM_ASYNC, - OOM_SKIPPED -}; - static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) { enum oom_status ret; @@ -1821,10 +1819,7 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int mem_cgroup_oom_notify(memcg); mem_cgroup_unmark_under_oom(memcg); - if (mem_cgroup_out_of_memory(memcg, mask, order)) - ret = OOM_SUCCESS; - else - ret = OOM_FAILED; + ret = mem_cgroup_out_of_memory(memcg, mask, order); if (locked) mem_cgroup_oom_unlock(memcg); @@ -6102,7 +6097,8 @@ static ssize_t memory_max_write(struct kernfs_open_file *of, } memcg_memory_event(memcg, MEMCG_OOM); - if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) + if (mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0) != + OOM_SUCCESS) break; } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index dfc357614e56..d5a941bea2d7 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -1042,18 +1042,18 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier); * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ -bool out_of_memory(struct oom_control *oc) +enum oom_status out_of_memory(struct oom_control *oc) { unsigned long freed = 0; if (oom_killer_disabled) - return false; + return OOM_FAILED; if (!is_memcg_oom(oc)) { blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0) /* Got some memory back in the last second. */ - return true; + return OOM_SUCCESS; } /* @@ -1064,7 +1064,7 @@ bool out_of_memory(struct oom_control *oc) if (task_will_free_mem(current)) { mark_oom_victim(current); wake_oom_reaper(current); - return true; + return OOM_SUCCESS; } /* @@ -1075,7 +1075,7 @@ bool out_of_memory(struct oom_control *oc) * invoke the OOM killer even if it is a GFP_NOFS allocation. */ if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) - return true; + return OOM_SUCCESS; /* * Check if there were limitations on the allocation (only relevant for @@ -1093,7 +1093,7 @@ bool out_of_memory(struct oom_control *oc) get_task_struct(current); oc->chosen = current; oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); - return true; + return OOM_SUCCESS; } select_bad_process(oc); @@ -1112,7 +1112,7 @@ bool out_of_memory(struct oom_control *oc) if (oc->chosen && oc->chosen != (void *)-1UL) oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : "Memory cgroup out of memory"); - return !!oc->chosen; + return oc->chosen ? OOM_SUCCESS : OOM_FAILED; } /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 69827d4fa052..0926117eb921 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3917,7 +3917,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, goto out; /* Exhausted what can be done so it's blame time */ - if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { + if (out_of_memory(&oc) == OOM_SUCCESS || + WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { *did_some_progress = 1; /* -- 2.18.2