The rotate_reclaimable_page function moves just written out pages, which the VM wanted to reclaim, to the end of the inactive list. That way the VM will find those pages first next time it needs to free memory. This patch apply the rule in memcg. It can help to prevent unnecessary working page eviction of memcg. Acked-by: Balbir Singh <balbir@xxxxxxxxxxxxxxxxxx> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx> Reviewed-by: Rik van Riel <riel@xxxxxxxxxx> Cc: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Signed-off-by: Minchan Kim <minchan.kim@xxxxxxxxx> --- Changelog since v4: - add acked-by and reviewed-by - change description - suggested by Rik include/linux/memcontrol.h | 6 ++++++ mm/memcontrol.c | 27 +++++++++++++++++++++++++++ mm/swap.c | 3 ++- 3 files changed, 35 insertions(+), 1 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 3da48ae..5a5ce70 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -62,6 +62,7 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); +extern void mem_cgroup_rotate_reclaimable_page(struct page *page); extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); extern void mem_cgroup_del_lru(struct page *page); extern void mem_cgroup_move_lists(struct page *page, @@ -215,6 +216,11 @@ static inline void mem_cgroup_del_lru_list(struct page *page, int lru) return ; } +static inline inline void mem_cgroup_rotate_reclaimable_page(struct page *page) +{ + return ; +} + static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) { return ; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 686f1ce..ab8bdff 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -813,6 +813,33 @@ void mem_cgroup_del_lru(struct page *page) mem_cgroup_del_lru_list(page, page_lru(page)); } +/* + * Writeback is about to end against a page which has been marked for immediate + * reclaim. If it still appears to be reclaimable, move it to the tail of the + * inactive list. + */ +void mem_cgroup_rotate_reclaimable_page(struct page *page) +{ + struct mem_cgroup_per_zone *mz; + struct page_cgroup *pc; + enum lru_list lru = page_lru_base_type(page); + + if (mem_cgroup_disabled()) + return; + + pc = lookup_page_cgroup(page); + /* + * Used bit is set without atomic ops but after smp_wmb(). + * For making pc->mem_cgroup visible, insert smp_rmb() here. + */ + smp_rmb(); + /* unused or root page is not rotated. */ + if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup)) + return; + mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); + list_move_tail(&pc->lru, &mz->lists[lru]); +} + void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) { struct mem_cgroup_per_zone *mz; diff --git a/mm/swap.c b/mm/swap.c index 4aea806..1b9e4eb 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -200,8 +200,9 @@ static void pagevec_move_tail(struct pagevec *pvec) spin_lock(&zone->lru_lock); } if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - int lru = page_lru_base_type(page); + enum lru_list lru = page_lru_base_type(page); list_move_tail(&page->lru, &zone->lru[lru].list); + mem_cgroup_rotate_reclaimable_page(page); pgmoved++; } } -- 1.7.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxxx For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>