Let's keep checks for whether to break the iopoll loop or not same for normal and defer tw, this includes ->cached_cq_tail checks guarding against polling more than asked for. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- io_uring/io_uring.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 8233a375e8c9..2fb5f1e78fb2 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1428,21 +1428,21 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) */ if (wq_list_empty(&ctx->iopoll_list) || io_task_work_pending(ctx)) { + u32 tail = ctx->cached_cq_tail; + if (!llist_empty(&ctx->work_llist)) __io_run_local_work(ctx, true); + if (task_work_pending(current) || wq_list_empty(&ctx->iopoll_list)) { - u32 tail = ctx->cached_cq_tail; - mutex_unlock(&ctx->uring_lock); io_run_task_work(); mutex_lock(&ctx->uring_lock); - - /* some requests don't go through iopoll_list */ - if (tail != ctx->cached_cq_tail || - wq_list_empty(&ctx->iopoll_list)) - break; } + /* some requests don't go through iopoll_list */ + if (tail != ctx->cached_cq_tail || + wq_list_empty(&ctx->iopoll_list)) + break; } ret = io_do_iopoll(ctx, !min); if (ret < 0) -- 2.37.2