The patch titled Subject: mm/memcg: move cgroup high memory limit setting into struct page_counter has been added to the -mm tree. Its filename is mm-move-cgroup-high-memory-limit-setting-into-struct-page_counter.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-move-cgroup-high-memory-limit-setting-into-struct-page_counter.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-move-cgroup-high-memory-limit-setting-into-struct-page_counter.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Jakub Kicinski <kuba@xxxxxxxxxx> Subject: mm/memcg: move cgroup high memory limit setting into struct page_counter High memory limit is currently recorded directly in struct mem_cgroup. We are about to add a high limit for swap, move the field to struct page_counter and add some helpers. Link: http://lkml.kernel.org/r/20200527195846.102707-4-kuba@xxxxxxxxxx Signed-off-by: Jakub Kicinski <kuba@xxxxxxxxxx> Reviewed-by: Shakeel Butt <shakeelb@xxxxxxxxxx> Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Chris Down <chris@xxxxxxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/memcontrol.h | 3 --- include/linux/page_counter.h | 8 ++++++++ mm/memcontrol.c | 19 +++++++++++-------- 3 files changed, 19 insertions(+), 11 deletions(-) --- a/include/linux/memcontrol.h~mm-move-cgroup-high-memory-limit-setting-into-struct-page_counter +++ a/include/linux/memcontrol.h @@ -215,9 +215,6 @@ struct mem_cgroup { struct page_counter kmem; struct page_counter tcpmem; - /* Upper bound of normal memory consumption range */ - unsigned long high; - /* Range enforcement for interrupt charges */ struct work_struct high_work; --- a/include/linux/page_counter.h~mm-move-cgroup-high-memory-limit-setting-into-struct-page_counter +++ a/include/linux/page_counter.h @@ -10,6 +10,7 @@ struct page_counter { atomic_long_t usage; unsigned long min; unsigned long low; + unsigned long high; unsigned long max; struct page_counter *parent; @@ -55,6 +56,13 @@ bool page_counter_try_charge(struct page void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); + +static inline void page_counter_set_high(struct page_counter *counter, + unsigned long nr_pages) +{ + WRITE_ONCE(counter->high, nr_pages); +} + int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); int page_counter_memparse(const char *buf, const char *max, unsigned long *nr_pages); --- a/mm/memcontrol.c~mm-move-cgroup-high-memory-limit-setting-into-struct-page_counter +++ a/mm/memcontrol.c @@ -2252,7 +2252,8 @@ static void reclaim_high(struct mem_cgro gfp_t gfp_mask) { do { - if (page_counter_read(&memcg->memory) <= READ_ONCE(memcg->high)) + if (page_counter_read(&memcg->memory) <= + READ_ONCE(memcg->memory.high)) continue; memcg_memory_event(memcg, MEMCG_HIGH); try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); @@ -2345,7 +2346,7 @@ static u64 mem_find_max_overage(struct m do { overage = calculate_overage(page_counter_read(&memcg->memory), - READ_ONCE(memcg->high)); + READ_ONCE(memcg->memory.high)); max_overage = max(overage, max_overage); } while ((memcg = parent_mem_cgroup(memcg)) && !mem_cgroup_is_root(memcg)); @@ -2604,7 +2605,8 @@ done_restock: * reclaim, the cost of mismatch is negligible. */ do { - if (page_counter_read(&memcg->memory) > READ_ONCE(memcg->high)) { + if (page_counter_read(&memcg->memory) > + READ_ONCE(memcg->memory.high)) { /* Don't bother a random interrupted task */ if (in_interrupt()) { schedule_work(&memcg->high_work); @@ -4349,7 +4351,7 @@ void mem_cgroup_wb_stats(struct bdi_writ while ((parent = parent_mem_cgroup(memcg))) { unsigned long ceiling = min(READ_ONCE(memcg->memory.max), - READ_ONCE(memcg->high)); + READ_ONCE(memcg->memory.high)); unsigned long used = page_counter_read(&memcg->memory); *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); @@ -5074,7 +5076,7 @@ mem_cgroup_css_alloc(struct cgroup_subsy if (IS_ERR(memcg)) return ERR_CAST(memcg); - WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX); + page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); memcg->soft_limit = PAGE_COUNTER_MAX; if (parent) { memcg->swappiness = mem_cgroup_swappiness(parent); @@ -5227,7 +5229,7 @@ static void mem_cgroup_css_reset(struct page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); page_counter_set_min(&memcg->memory, 0); page_counter_set_low(&memcg->memory, 0); - WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX); + page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); memcg->soft_limit = PAGE_COUNTER_MAX; memcg_wb_domain_size_changed(memcg); } @@ -6026,7 +6028,8 @@ static ssize_t memory_low_write(struct k static int memory_high_show(struct seq_file *m, void *v) { - return seq_puts_memcg_tunable(m, READ_ONCE(mem_cgroup_from_seq(m)->high)); + return seq_puts_memcg_tunable(m, + READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); } static ssize_t memory_high_write(struct kernfs_open_file *of, @@ -6043,7 +6046,7 @@ static ssize_t memory_high_write(struct if (err) return err; - WRITE_ONCE(memcg->high, high); + page_counter_set_high(&memcg->memory, high); for (;;) { unsigned long nr_pages = page_counter_read(&memcg->memory); _ Patches currently in -mm which might be from kuba@xxxxxxxxxx are mm-prepare-for-swap-over-high-accounting-and-penalty-calculation.patch mm-move-penalty-delay-clamping-out-of-calculate_high_delay.patch mm-move-cgroup-high-memory-limit-setting-into-struct-page_counter.patch mm-automatically-penalize-tasks-with-high-swap-use.patch