Now nr_deferred is available on per memcg level for memcg aware shrinkers, so don't need allocate shrinker->nr_deferred for such shrinkers anymore. Signed-off-by: Yang Shi <shy828301@xxxxxxxxx> --- mm/vmscan.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index bce8cf44eca2..8d5bfd818acd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -420,7 +420,15 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone */ int prealloc_shrinker(struct shrinker *shrinker) { - unsigned int size = sizeof(*shrinker->nr_deferred); + unsigned int size; + + if (is_deferred_memcg_aware(shrinker)) { + if (prealloc_memcg_shrinker(shrinker)) + return -ENOMEM; + return 0; + } + + size = sizeof(*shrinker->nr_deferred); if (shrinker->flags & SHRINKER_NUMA_AWARE) size *= nr_node_ids; @@ -429,26 +437,18 @@ int prealloc_shrinker(struct shrinker *shrinker) if (!shrinker->nr_deferred) return -ENOMEM; - if (shrinker->flags & SHRINKER_MEMCG_AWARE) { - if (prealloc_memcg_shrinker(shrinker)) - goto free_deferred; - } - return 0; - -free_deferred: - kfree(shrinker->nr_deferred); - shrinker->nr_deferred = NULL; - return -ENOMEM; } void free_prealloced_shrinker(struct shrinker *shrinker) { - if (!shrinker->nr_deferred) + if (is_deferred_memcg_aware(shrinker)) { + unregister_memcg_shrinker(shrinker); return; + } - if (shrinker->flags & SHRINKER_MEMCG_AWARE) - unregister_memcg_shrinker(shrinker); + if (!shrinker->nr_deferred) + return; kfree(shrinker->nr_deferred); shrinker->nr_deferred = NULL; -- 2.26.2