io_ring_exit_work() already does uring_lock lock/unlock, no need to repeat it for lock waiting trick in io_ring_ctx_free(). Move the waiting with comments and spinlocking into io_ring_exit_work. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index e9bfe137270c..9ebdd288653f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8455,16 +8455,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) static void io_ring_ctx_free(struct io_ring_ctx *ctx) { - /* - * Some may use context even when all refs and requests have been put, - * and they are free to do so while still holding uring_lock or - * completion_lock, see __io_req_task_submit(). Wait for them to finish. - */ - mutex_lock(&ctx->uring_lock); - mutex_unlock(&ctx->uring_lock); - spin_lock_irq(&ctx->completion_lock); - spin_unlock_irq(&ctx->completion_lock); - io_sq_thread_finish(ctx); io_sqe_buffers_unregister(ctx); @@ -8615,6 +8605,12 @@ static void io_ring_exit_work(struct work_struct *work) WARN_ON_ONCE(time_after(jiffies, timeout)); } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); + /* + * Some may use context even when all refs and requests have been put, + * and they are free to do so while still holding uring_lock or + * completion_lock, see __io_req_task_submit(). Apart from other work, + * this lock/unlock section also waits them to finish. + */ mutex_lock(&ctx->uring_lock); while (!list_empty(&ctx->tctx_list)) { WARN_ON_ONCE(time_after(jiffies, timeout)); @@ -8635,6 +8631,8 @@ static void io_ring_exit_work(struct work_struct *work) mutex_lock(&ctx->uring_lock); } mutex_unlock(&ctx->uring_lock); + spin_lock_irq(&ctx->completion_lock); + spin_unlock_irq(&ctx->completion_lock); io_ring_ctx_free(ctx); } -- 2.24.0