When no of queued for the batch completion requests need to post an CQE, see IOSQE_CQE_SKIP_SUCCESS, avoid grabbing ->completion_lock and other commit/post. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 172c857e8b3f..8983a5a6851a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -317,6 +317,7 @@ struct io_submit_state { bool plug_started; bool need_plug; + bool flush_cqes; /* * Batch completion logic @@ -1858,6 +1859,8 @@ static void io_req_complete_state(struct io_kiocb *req, long res, req->result = res; req->compl.cflags = cflags; req->flags |= REQ_F_COMPLETE_INLINE; + if (!(req->flags & IOSQE_CQE_SKIP_SUCCESS)) + req->ctx->submit_state.flush_cqes = true; } static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, @@ -2354,17 +2357,19 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) int i, nr = state->compl_nr; struct req_batch rb; - spin_lock(&ctx->completion_lock); - for (i = 0; i < nr; i++) { - struct io_kiocb *req = state->compl_reqs[i]; + if (state->flush_cqes) { + spin_lock(&ctx->completion_lock); + for (i = 0; i < nr; i++) { + struct io_kiocb *req = state->compl_reqs[i]; - if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe(ctx, req->user_data, req->result, - req->compl.cflags); + if (!(req->flags & REQ_F_CQE_SKIP)) + __io_fill_cqe(ctx, req->user_data, req->result, + req->compl.cflags); + } + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + io_cqring_ev_posted(ctx); } - io_commit_cqring(ctx); - spin_unlock(&ctx->completion_lock); - io_cqring_ev_posted(ctx); io_init_req_batch(&rb); for (i = 0; i < nr; i++) { @@ -2376,6 +2381,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) io_req_free_batch_finish(ctx, &rb); state->compl_nr = 0; + state->flush_cqes = false; } /* -- 2.33.0