Currently the memcg_params.dying flag and the corresponding workqueue used for the asynchronous deactivation of kmem_caches is synchronized using the slab_mutex. It makes impossible to check this flag from the irq context, which will be required in order to implement asynchronous release of kmem_caches. So let's switch over to the irq-save flavor of the spinlock-based synchronization. Signed-off-by: Roman Gushchin <guro@xxxxxx> --- mm/slab_common.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/mm/slab_common.c b/mm/slab_common.c index 09b26673b63f..2914a8f0aa85 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -130,6 +130,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, #ifdef CONFIG_MEMCG_KMEM LIST_HEAD(slab_root_caches); +static DEFINE_SPINLOCK(memcg_kmem_wq_lock); void slab_init_memcg_params(struct kmem_cache *s) { @@ -629,6 +630,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, struct memcg_cache_array *arr; struct kmem_cache *s = NULL; char *cache_name; + bool dying; int idx; get_online_cpus(); @@ -640,7 +642,13 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, * The memory cgroup could have been offlined while the cache * creation work was pending. */ - if (memcg->kmem_state != KMEM_ONLINE || root_cache->memcg_params.dying) + if (memcg->kmem_state != KMEM_ONLINE) + goto out_unlock; + + spin_lock_irq(&memcg_kmem_wq_lock); + dying = root_cache->memcg_params.dying; + spin_unlock_irq(&memcg_kmem_wq_lock); + if (dying) goto out_unlock; idx = memcg_cache_id(memcg); @@ -735,14 +743,17 @@ static void kmemcg_cache_deactivate(struct kmem_cache *s) __kmemcg_cache_deactivate(s); + spin_lock_irq(&memcg_kmem_wq_lock); if (s->memcg_params.root_cache->memcg_params.dying) - return; + goto unlock; /* pin memcg so that @s doesn't get destroyed in the middle */ css_get(&s->memcg_params.memcg->css); s->memcg_params.work_fn = __kmemcg_cache_deactivate_after_rcu; call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn); +unlock: + spin_unlock_irq(&memcg_kmem_wq_lock); } void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) @@ -852,9 +863,9 @@ static int shutdown_memcg_caches(struct kmem_cache *s) static void flush_memcg_workqueue(struct kmem_cache *s) { - mutex_lock(&slab_mutex); + spin_lock_irq(&memcg_kmem_wq_lock); s->memcg_params.dying = true; - mutex_unlock(&slab_mutex); + spin_unlock_irq(&memcg_kmem_wq_lock); /* * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make -- 2.20.1