We use system_unbound_wq to run io_ring_exit_work(), so it's hard to monitor whether removal hang or not. Add WARN_ONCE to catch hangs. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 46a2417187ff..b4820f6261fe 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8547,6 +8547,8 @@ static void io_ring_exit_work(struct work_struct *work) struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); struct io_tctx_exit exit; struct io_tctx_node *node; + const u64 bias_ms = MSEC_PER_SEC * 60 * 5; + ktime_t start = ktime_get(); int ret; /* @@ -8557,10 +8559,13 @@ static void io_ring_exit_work(struct work_struct *work) */ do { io_uring_try_cancel_requests(ctx, NULL, NULL); + WARN_ON_ONCE(ktime_ms_delta(ktime_get(), start) > bias_ms); } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); mutex_lock(&ctx->uring_lock); while (!list_empty(&ctx->tctx_list)) { + WARN_ON_ONCE(ktime_ms_delta(ktime_get(), start) > bias_ms); + node = list_first_entry(&ctx->tctx_list, struct io_tctx_node, ctx_node); exit.ctx = ctx; -- 2.24.0