Introduce set_shrinker_bit() function to set shrinker-related bit in memcg shrinker bitmap, and set the bit after the first item is added and in case of reparenting destroyed memcg's items. This will allow next patch to make shrinkers be called only, in case of they have charged objects at the moment, and to improve shrink_slab() performance. Signed-off-by: Kirill Tkhai <ktkhai@xxxxxxxxxxxxx> --- include/linux/memcontrol.h | 14 ++++++++++++++ mm/list_lru.c | 22 ++++++++++++++++++++-- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e51c6e953d7a..7ae1b94becf3 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1275,6 +1275,18 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) extern int memcg_expand_shrinker_maps(int new_id); +static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id) +{ + if (shrinker_id >= 0 && memcg && memcg != root_mem_cgroup) { + struct memcg_shrinker_map *map; + + rcu_read_lock(); + map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); + set_bit(shrinker_id, map->map); + rcu_read_unlock(); + } +} #else #define for_each_memcg_cache_index(_idx) \ for (; NULL; ) @@ -1297,6 +1309,8 @@ static inline void memcg_put_cache_ids(void) { } +static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id) { } #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/mm/list_lru.c b/mm/list_lru.c index cab8fad7f7e2..7df71ab0de1c 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -31,6 +31,11 @@ static void list_lru_unregister(struct list_lru *lru) mutex_unlock(&list_lrus_mutex); } +static int lru_shrinker_id(struct list_lru *lru) +{ + return lru->shrinker_id; +} + static inline bool list_lru_memcg_aware(struct list_lru *lru) { /* @@ -94,6 +99,11 @@ static void list_lru_unregister(struct list_lru *lru) { } +static int lru_shrinker_id(struct list_lru *lru) +{ + return -1; +} + static inline bool list_lru_memcg_aware(struct list_lru *lru) { return false; @@ -119,13 +129,17 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) { int nid = page_to_nid(virt_to_page(item)); struct list_lru_node *nlru = &lru->node[nid]; + struct mem_cgroup *memcg; struct list_lru_one *l; spin_lock(&nlru->lock); if (list_empty(item)) { - l = list_lru_from_kmem(nlru, item, NULL); + l = list_lru_from_kmem(nlru, item, &memcg); list_add_tail(item, &l->list); - l->nr_items++; + /* Set shrinker bit if the first element was added */ + if (!l->nr_items++) + memcg_set_shrinker_bit(memcg, nid, + lru_shrinker_id(lru)); nlru->nr_items++; spin_unlock(&nlru->lock); return true; @@ -520,6 +534,7 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, struct list_lru_node *nlru = &lru->node[nid]; int dst_idx = dst_memcg->kmemcg_id; struct list_lru_one *src, *dst; + bool set; /* * Since list_lru_{add,del} may be called under an IRQ-safe lock, @@ -531,7 +546,10 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, dst = list_lru_from_memcg_idx(nlru, dst_idx); list_splice_init(&src->list, &dst->list); + set = (!dst->nr_items && src->nr_items); dst->nr_items += src->nr_items; + if (set) + memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); src->nr_items = 0; spin_unlock_irq(&nlru->lock);