The patch titled Subject: mm: memcg/slab: generalize postponed non-root kmem_cache deactivation has been added to the -mm tree. Its filename is mm-generalize-postponed-non-root-kmem_cache-deactivation.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-generalize-postponed-non-root-kmem_cache-deactivation.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-generalize-postponed-non-root-kmem_cache-deactivation.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Roman Gushchin <guro@xxxxxx> Subject: mm: memcg/slab: generalize postponed non-root kmem_cache deactivation Currently SLUB uses a work scheduled after an RCU grace period to deactivate a non-root kmem_cache. This mechanism can be reused for kmem_caches release, but requires generalization for SLAB case. Introduce kmemcg_cache_deactivate() function, which calls allocator-specific __kmem_cache_deactivate() and schedules execution of __kmem_cache_deactivate_after_rcu() with all necessary locks in a worker context after an rcu grace period. Here is the new calling scheme: kmemcg_cache_deactivate() __kmemcg_cache_deactivate() SLAB/SLUB-specific kmemcg_rcufn() rcu kmemcg_workfn() work __kmemcg_cache_deactivate_after_rcu() SLAB/SLUB-specific instead of: __kmemcg_cache_deactivate() SLAB/SLUB-specific slab_deactivate_memcg_cache_rcu_sched() SLUB-only kmemcg_rcufn() rcu kmemcg_workfn() work kmemcg_cache_deact_after_rcu() SLUB-only For consistency, all allocator-specific functions start with "__". Link: http://lkml.kernel.org/r/20190611231813.3148843-4-guro@xxxxxx Signed-off-by: Roman Gushchin <guro@xxxxxx> Acked-by: Vladimir Davydov <vdavydov.dev@xxxxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Shakeel Butt <shakeelb@xxxxxxxxxx> Cc: Waiman Long <longman@xxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/slab.c | 4 ++++ mm/slab.h | 3 +-- mm/slab_common.c | 27 ++++++++------------------- mm/slub.c | 8 +------- 4 files changed, 14 insertions(+), 28 deletions(-) --- a/mm/slab.c~mm-generalize-postponed-non-root-kmem_cache-deactivation +++ a/mm/slab.c @@ -2252,6 +2252,10 @@ void __kmemcg_cache_deactivate(struct km { __kmem_cache_shrink(cachep); } + +void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s) +{ +} #endif int __kmem_cache_shutdown(struct kmem_cache *cachep) --- a/mm/slab_common.c~mm-generalize-postponed-non-root-kmem_cache-deactivation +++ a/mm/slab_common.c @@ -708,7 +708,7 @@ static void kmemcg_workfn(struct work_st put_online_mems(); put_online_cpus(); - /* done, put the ref from slab_deactivate_memcg_cache_rcu_sched() */ + /* done, put the ref from kmemcg_cache_deactivate() */ css_put(&s->memcg_params.memcg->css); } @@ -726,31 +726,21 @@ static void kmemcg_rcufn(struct rcu_head queue_work(memcg_kmem_cache_wq, &s->memcg_params.work); } -/** - * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a - * sched RCU grace period - * @s: target kmem_cache - * @work_fn: deactivation function to call - * - * Schedule @work_fn to be invoked with online cpus, mems and slab_mutex - * held after a sched RCU grace period. The slab is guaranteed to stay - * alive until @work_fn is finished. This is to be used from - * __kmemcg_cache_deactivate(). - */ -void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, - void (*work_fn)(struct kmem_cache *)) +static void kmemcg_cache_deactivate(struct kmem_cache *s) { if (WARN_ON_ONCE(is_root_cache(s)) || WARN_ON_ONCE(s->memcg_params.work_fn)) return; + __kmemcg_cache_deactivate(s); + if (s->memcg_params.root_cache->memcg_params.dying) return; /* pin memcg so that @s doesn't get destroyed in the middle */ css_get(&s->memcg_params.memcg->css); - s->memcg_params.work_fn = work_fn; + s->memcg_params.work_fn = __kmemcg_cache_deactivate_after_rcu; call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn); } @@ -773,7 +763,7 @@ void memcg_deactivate_kmem_caches(struct if (!c) continue; - __kmemcg_cache_deactivate(c); + kmemcg_cache_deactivate(c); arr->entries[idx] = NULL; } mutex_unlock(&slab_mutex); @@ -866,11 +856,10 @@ static void flush_memcg_workqueue(struct mutex_unlock(&slab_mutex); /* - * SLUB deactivates the kmem_caches through call_rcu. Make + * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make * sure all registered rcu callbacks have been invoked. */ - if (IS_ENABLED(CONFIG_SLUB)) - rcu_barrier(); + rcu_barrier(); /* * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB --- a/mm/slab.h~mm-generalize-postponed-non-root-kmem_cache-deactivation +++ a/mm/slab.h @@ -172,6 +172,7 @@ int __kmem_cache_shutdown(struct kmem_ca void __kmem_cache_release(struct kmem_cache *); int __kmem_cache_shrink(struct kmem_cache *); void __kmemcg_cache_deactivate(struct kmem_cache *s); +void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s); void slab_kmem_cache_release(struct kmem_cache *); struct seq_file; @@ -290,8 +291,6 @@ static __always_inline void memcg_unchar extern void slab_init_memcg_params(struct kmem_cache *); extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); -extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, - void (*work_fn)(struct kmem_cache *)); #else /* CONFIG_MEMCG_KMEM */ --- a/mm/slub.c~mm-generalize-postponed-non-root-kmem_cache-deactivation +++ a/mm/slub.c @@ -4022,7 +4022,7 @@ int __kmem_cache_shrink(struct kmem_cach } #ifdef CONFIG_MEMCG -static void kmemcg_cache_deact_after_rcu(struct kmem_cache *s) +void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s) { /* * Called with all the locks held after a sched RCU grace period. @@ -4048,12 +4048,6 @@ void __kmemcg_cache_deactivate(struct km */ slub_set_cpu_partial(s, 0); s->min_partial = 0; - - /* - * s->cpu_partial is checked locklessly (see put_cpu_partial), so - * we have to make sure the change is visible before shrinking. - */ - slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu); } #endif /* CONFIG_MEMCG */ _ Patches currently in -mm which might be from guro@xxxxxx are mm-postpone-kmem_cache-memcg-pointer-initialization-to-memcg_link_cache.patch mm-rename-slab-delayed-deactivation-functions-and-fields.patch mm-generalize-postponed-non-root-kmem_cache-deactivation.patch mm-introduce-__memcg_kmem_uncharge_memcg.patch mm-unify-slab-and-slub-page-accounting.patch mm-dont-check-the-dying-flag-on-kmem_cache-creation.patch mm-synchronize-access-to-kmem_cache-dying-flag-using-a-spinlock.patch mm-rework-non-root-kmem_cache-lifecycle-management.patch mm-stop-setting-page-mem_cgroup-pointer-for-slab-pages.patch mm-reparent-memcg-kmem_caches-on-cgroup-removal.patch