Sometimes we use memory.force_empty to drop pages in a memcg to work around some memory pressure issues. When we use force_empty, we want the pages can be reclaimed ASAP, however force_empty reclaims pages as a regular reclaimer which scans the page cache LRUs from DEF_PRIORITY priority and finally it will drop to 0 to do full scan. That is a waste of time, we'd better do full scan initially in force_empty. Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx> --- include/linux/swap.h | 3 ++- mm/memcontrol.c | 16 ++++++++++------ mm/vmscan.c | 5 +++-- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 5b3216ba39a9..d88430f1b964 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -364,7 +364,8 @@ extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, - bool may_swap); + bool may_swap, + int priority); extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 13f559af1ab6..c873a98f8c7e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2237,7 +2237,8 @@ static void reclaim_high(struct mem_cgroup *memcg, READ_ONCE(memcg->memory.high)) continue; memcg_memory_event(memcg, MEMCG_HIGH); - try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); + try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true, + DEF_PRIORITY); } while ((memcg = parent_mem_cgroup(memcg)) && !mem_cgroup_is_root(memcg)); } @@ -2515,7 +2516,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, memcg_memory_event(mem_over_limit, MEMCG_MAX); nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, - gfp_mask, may_swap); + gfp_mask, may_swap, + DEF_PRIORITY); if (mem_cgroup_margin(mem_over_limit) >= nr_pages) goto retry; @@ -3089,7 +3091,8 @@ static int mem_cgroup_resize_max(struct mem_cgroup *memcg, } if (!try_to_free_mem_cgroup_pages(memcg, 1, - GFP_KERNEL, !memsw)) { + GFP_KERNEL, !memsw, + DEF_PRIORITY)) { ret = -EBUSY; break; } @@ -3222,7 +3225,8 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg) return -EINTR; progress = try_to_free_mem_cgroup_pages(memcg, 1, - GFP_KERNEL, true); + GFP_KERNEL, true, + 0); if (!progress) { nr_retries--; /* maybe some writeback is necessary */ @@ -6065,7 +6069,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, } reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, - GFP_KERNEL, true); + GFP_KERNEL, true, DEF_PRIORITY); if (!reclaimed && !nr_retries--) break; @@ -6113,7 +6117,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of, if (nr_reclaims) { if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, - GFP_KERNEL, true)) + GFP_KERNEL, true, DEF_PRIORITY)) nr_reclaims--; continue; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 749d239c62b2..49298bb2892d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3315,7 +3315,8 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, - bool may_swap) + bool may_swap, + int priority) { unsigned long nr_reclaimed; unsigned long pflags; @@ -3326,7 +3327,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), .reclaim_idx = MAX_NR_ZONES - 1, .target_mem_cgroup = memcg, - .priority = DEF_PRIORITY, + .priority = priority, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = may_swap, -- 2.18.1