Commit 5a836bf6b09f ("mm: slub: move flush_cpu_slab() invocations __free_slab() invocations out of IRQ context") moved all flush_cpu_slab() invocations to the global workqueue to avoid a problem related with deactivate_slab()/__free_slab() being called from an IRQ context on PREEMPT_RT kernels. When the flush_all_cpu_locked() function is called from a task context the flush_cpu_slab() function should be called directly, without deferring it to the global workqueue, otherwise a workqueue with the WQ_MEM_RECLAIM bit set could end up flushing a workqueue without the aforementioned bit, triggering a dependency issue. workqueue: WQ_MEM_RECLAIM nvme-delete-wq:nvme_delete_ctrl_work [nvme_core] is flushing !WQ_MEM_RECLAIM events:flush_cpu_slab WARNING: CPU: 37 PID: 410 at kernel/workqueue.c:2637 check_flush_dependency+0x10a/0x120 Workqueue: nvme-delete-wq nvme_delete_ctrl_work [nvme_core] RIP: 0010:check_flush_dependency+0x10a/0x120[ 453.262125] Call Trace: __flush_work.isra.0+0xbf/0x220 ? __queue_work+0x1dc/0x420 flush_all_cpus_locked+0xfb/0x120 __kmem_cache_shutdown+0x2b/0x320 kmem_cache_destroy+0x49/0x100 bioset_exit+0x143/0x190 blk_release_queue+0xb9/0x100 kobject_cleanup+0x37/0x130 nvme_fc_ctrl_free+0xc6/0x150 [nvme_fc] nvme_free_ctrl+0x1ac/0x2b0 [nvme_core] Fixes: 5a836bf6b09f ("mm: slub: move flush_cpu_slab() invocations __free_slab() invocations out of IRQ context") Signed-off-by: Maurizio Lombardi <mlombard@xxxxxxxxxx> --- mm/slub.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 862dbd9af4f5..d46ee90651d2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2681,30 +2681,34 @@ struct slub_flush_work { bool skip; }; +static void flush_cpu_slab(void *d) +{ + struct kmem_cache *s = d; + struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); + + if (c->slab) + flush_slab(s, c); + + unfreeze_partials(s); +} + /* * Flush cpu slab. * * Called from CPU work handler with migration disabled. */ -static void flush_cpu_slab(struct work_struct *w) +static void flush_cpu_slab_work(struct work_struct *w) { - struct kmem_cache *s; - struct kmem_cache_cpu *c; struct slub_flush_work *sfw; sfw = container_of(w, struct slub_flush_work, work); - s = sfw->s; - c = this_cpu_ptr(s->cpu_slab); - - if (c->slab) - flush_slab(s, c); - - unfreeze_partials(s); + flush_cpu_slab(sfw->s); } -static bool has_cpu_slab(int cpu, struct kmem_cache *s) +static bool has_cpu_slab(int cpu, void *info) { + struct kmem_cache *s = info; struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); return c->slab || slub_percpu_partial(c); @@ -2721,13 +2725,18 @@ static void flush_all_cpus_locked(struct kmem_cache *s) lockdep_assert_cpus_held(); mutex_lock(&flush_lock); + if (in_task()) { + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1); + goto unlock_exit; + } + for_each_online_cpu(cpu) { sfw = &per_cpu(slub_flush, cpu); if (!has_cpu_slab(cpu, s)) { sfw->skip = true; continue; } - INIT_WORK(&sfw->work, flush_cpu_slab); + INIT_WORK(&sfw->work, flush_cpu_slab_work); sfw->skip = false; sfw->s = s; schedule_work_on(cpu, &sfw->work); @@ -2740,6 +2749,7 @@ static void flush_all_cpus_locked(struct kmem_cache *s) flush_work(&sfw->work); } +unlock_exit: mutex_unlock(&flush_lock); } -- 2.31.1