The patch introduces shrinker::id number, which is used to enumerate memcg-aware shrinkers. The number start from 0, and the code tries to maintain it as small as possible. This will be used as to represent a memcg-aware shrinkers in memcg shrinkers map. Signed-off-by: Kirill Tkhai <ktkhai@xxxxxxxxxxxxx> --- include/linux/shrinker.h | 2 ++ mm/vmscan.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index a3894918a436..86b651fa2846 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -66,6 +66,8 @@ struct shrinker { /* These are for internal use */ struct list_head list; + /* ID in shrinkers_id_idr */ + int id; /* objs pending delete, per node */ atomic_long_t *nr_deferred; }; diff --git a/mm/vmscan.c b/mm/vmscan.c index 8b920ce3ae02..4f02fe83537e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -169,6 +169,43 @@ unsigned long vm_total_pages; static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); +#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) +static DEFINE_IDR(shrinkers_id_idr); + +static int add_memcg_shrinker(struct shrinker *shrinker) +{ + int id, ret; + + down_write(&shrinker_rwsem); + ret = id = idr_alloc(&shrinkers_id_idr, shrinker, 0, 0, GFP_KERNEL); + if (ret < 0) + goto unlock; + shrinker->id = id; + ret = 0; +unlock: + up_write(&shrinker_rwsem); + return ret; +} + +static void del_memcg_shrinker(struct shrinker *shrinker) +{ + int id = shrinker->id; + + down_write(&shrinker_rwsem); + idr_remove(&shrinkers_id_idr, id); + up_write(&shrinker_rwsem); +} +#else /* CONFIG_MEMCG && !CONFIG_SLOB */ +static int add_memcg_shrinker(struct shrinker *shrinker) +{ + return 0; +} + +static void del_memcg_shrinker(struct shrinker *shrinker) +{ +} +#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ + #ifdef CONFIG_MEMCG static bool global_reclaim(struct scan_control *sc) { @@ -306,6 +343,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone int register_shrinker(struct shrinker *shrinker) { size_t size = sizeof(*shrinker->nr_deferred); + int ret; if (shrinker->flags & SHRINKER_NUMA_AWARE) size *= nr_node_ids; @@ -314,10 +352,21 @@ int register_shrinker(struct shrinker *shrinker) if (!shrinker->nr_deferred) return -ENOMEM; + if (shrinker->flags & SHRINKER_MEMCG_AWARE) { + ret = add_memcg_shrinker(shrinker); + if (ret) + goto free_deferred; + } + down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); up_write(&shrinker_rwsem); return 0; + +free_deferred: + kfree(shrinker->nr_deferred); + shrinker->nr_deferred = NULL; + return -ENOMEM; } EXPORT_SYMBOL(register_shrinker); @@ -328,6 +377,8 @@ void unregister_shrinker(struct shrinker *shrinker) { if (!shrinker->nr_deferred) return; + if (shrinker->flags & SHRINKER_MEMCG_AWARE) + del_memcg_shrinker(shrinker); down_write(&shrinker_rwsem); list_del(&shrinker->list); up_write(&shrinker_rwsem);