Now that we have multishot poll requests, one sqe can emit multiple cqes. given below example: sqe0(multishot poll)-->sqe1-->sqe2(drain req) sqe2 is designed to issue after sqe0 and sqe1 completed, but since sqe0 is a multishot poll request, sqe2 may be issued after sqe0's event triggered twice before sqe1 completed. This isn't what users leverage drain requests for. Here a simple solution is to ignore all multishot poll cqes, which means drain requests won't wait those request to be done. Signed-off-by: Hao Xu <haoxu@xxxxxxxxxxxxxxxxx> --- fs/io_uring.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 513096759445..cd6d44cf5940 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -455,6 +455,7 @@ struct io_ring_ctx { struct callback_head *exit_task_work; struct wait_queue_head hash_wait; + unsigned multishot_cqes; /* Keep this last, we don't need it for the fast path */ struct work_struct exit_work; @@ -1181,8 +1182,8 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq) if (unlikely(req->flags & REQ_F_IO_DRAIN)) { struct io_ring_ctx *ctx = req->ctx; - return seq != ctx->cached_cq_tail - + READ_ONCE(ctx->cached_cq_overflow); + return seq + ctx->multishot_cqes != ctx->cached_cq_tail + + READ_ONCE(ctx->cached_cq_overflow); } return false; @@ -4897,6 +4898,7 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) { struct io_ring_ctx *ctx = req->ctx; unsigned flags = IORING_CQE_F_MORE; + bool multishot_poll = !(req->poll.events & EPOLLONESHOT); if (!error && req->poll.canceled) { error = -ECANCELED; @@ -4911,6 +4913,9 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) req->poll.done = true; flags = 0; } + if (multishot_poll) + ctx->multishot_cqes++; + io_commit_cqring(ctx); return !(flags & IORING_CQE_F_MORE); } -- 1.8.3.1