When reaching shrink_slab, we should descent in children memcg searching for objects that could be shrunk. This is true even if the memcg does not have kmem limits on, since the kmem res_counter will also be billed against the user res_counter of the parent. It is possible that we will free objects and not free any pages, that will just harm the child groups without helping the parent group at all. But at this point, we basically are prepared to pay the price. Signed-off-by: Glauber Costa <glommer@xxxxxxxxxx> Cc: Dave Chinner <dchinner@xxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/memcontrol.h | 6 ++++ mm/memcontrol.c | 13 +++++++++ mm/vmscan.c | 70 +++++++++++++++++++++++++++++++++++----------- 3 files changed, 72 insertions(+), 17 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 3eeece8..c8b1412 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -441,6 +441,7 @@ static inline bool memcg_kmem_enabled(void) return static_key_false(&memcg_kmem_enabled_key); } +bool memcg_kmem_should_reclaim(struct mem_cgroup *memcg); bool memcg_kmem_is_active(struct mem_cgroup *memcg); /* @@ -585,6 +586,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) } #else +static inline bool memcg_kmem_should_reclaim(struct mem_cgroup *memcg) +{ + return false; +} + static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) { return false; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c563550..b8980d1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3043,6 +3043,19 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, } #ifdef CONFIG_MEMCG_KMEM +bool memcg_kmem_should_reclaim(struct mem_cgroup *memcg) +{ + struct mem_cgroup *iter; + + for_each_mem_cgroup_tree(iter, memcg) { + if (memcg_kmem_is_active(iter)) { + mem_cgroup_iter_break(memcg, iter); + return true; + } + } + return false; +} + static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) { return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && diff --git a/mm/vmscan.c b/mm/vmscan.c index eae8304..3c67f36 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -148,7 +148,7 @@ static bool global_reclaim(struct scan_control *sc) static bool has_kmem_reclaim(struct scan_control *sc) { return !sc->target_mem_cgroup || - memcg_kmem_is_active(sc->target_mem_cgroup); + memcg_kmem_should_reclaim(sc->target_mem_cgroup); } static unsigned long @@ -346,12 +346,39 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, * * Returns the number of slab objects which we shrunk. */ +static unsigned long +shrink_slab_one(struct shrink_control *shrinkctl, struct shrinker *shrinker, + unsigned long nr_pages_scanned, unsigned long lru_pages) +{ + unsigned long freed = 0; + + if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) { + shrinkctl->nid = 0; + + return shrink_slab_node(shrinkctl, shrinker, + nr_pages_scanned, lru_pages, + &shrinker->nr_deferred); + } + + for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) { + if (!node_online(shrinkctl->nid)) + continue; + + freed += shrink_slab_node(shrinkctl, shrinker, + nr_pages_scanned, lru_pages, + &shrinker->nr_deferred_node[shrinkctl->nid]); + } + + return freed; +} + unsigned long shrink_slab(struct shrink_control *shrinkctl, unsigned long nr_pages_scanned, unsigned long lru_pages) { struct shrinker *shrinker; unsigned long freed = 0; + struct mem_cgroup *root = shrinkctl->target_mem_cgroup; if (nr_pages_scanned == 0) nr_pages_scanned = SWAP_CLUSTER_MAX; @@ -363,6 +390,9 @@ unsigned long shrink_slab(struct shrink_control *shrinkctl, } list_for_each_entry(shrinker, &shrinker_list, list) { + struct mem_cgroup *memcg; + + /* * If we don't have a target mem cgroup, we scan them all. * Otherwise we will limit our scan to shrinkers marked as @@ -372,23 +402,29 @@ unsigned long shrink_slab(struct shrink_control *shrinkctl, !(shrinker->flags & SHRINKER_MEMCG_AWARE)) continue; - if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) { - shrinkctl->nid = 0; - - freed += shrink_slab_node(shrinkctl, shrinker, - nr_pages_scanned, lru_pages, - &shrinker->nr_deferred); - continue; - } - - for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) { - if (!node_online(shrinkctl->nid)) - continue; + /* + * In a hierarchical chain, it might be that not all + * memcgs are kmem active. kmemcg design mandates that + * when one memcg is active, its children will be + * active as well. But it is perfectly possible that + * its parent is not. + * + * We also need to make sure we scan at least once, for + * the global case. So if we don't have a target memcg + * (saved in root), we proceed normally and expect to + * break in the next round. + */ + memcg = mem_cgroup_iter(root, NULL, NULL); + do { + if (!root || memcg_kmem_is_active(memcg)) + freed += shrink_slab_one(shrinkctl, shrinker, + nr_pages_scanned, lru_pages); + memcg = mem_cgroup_iter(root, memcg, NULL); + shrinkctl->target_mem_cgroup = memcg; + } while (memcg); - freed += shrink_slab_node(shrinkctl, shrinker, - nr_pages_scanned, lru_pages, - &shrinker->nr_deferred_node[shrinkctl->nid]); - } + /* restore original state */ + shrinkctl->target_mem_cgroup = root; } up_read(&shrinker_rwsem); out: -- 1.8.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>