Add extra wq flushing for fallback_work, that's not necessary but safer if invariants of io_fallback_req_func() change. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index fabb3580e27c..b61ffb1e7990 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1262,7 +1262,6 @@ static __cold void io_fallback_req_func(struct work_struct *work) mutex_unlock(&ctx->uring_lock); } percpu_ref_put(&ctx->refs); - } static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) @@ -9215,6 +9214,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) if (ctx->rsrc_backup_node) io_rsrc_node_destroy(ctx->rsrc_backup_node); flush_delayed_work(&ctx->rsrc_put_work); + flush_delayed_work(&ctx->fallback_work); WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist)); -- 2.33.0