Add helper of io_req_commit_cqe() for simplifying __io_submit_flush_completions() a bit. No functional change, and the added helper will be reused in sqe group code with same lock rule. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- io_uring/io_uring.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 5d69851b5131..7597344a6440 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -861,6 +861,20 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags) return posted; } +static __always_inline void io_req_commit_cqe(struct io_ring_ctx *ctx, + struct io_kiocb *req) +{ + if (unlikely(!io_fill_cqe_req(ctx, req))) { + if (ctx->lockless_cq) { + spin_lock(&ctx->completion_lock); + io_req_cqe_overflow(req); + spin_unlock(&ctx->completion_lock); + } else { + io_req_cqe_overflow(req); + } + } +} + static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) { struct io_ring_ctx *ctx = req->ctx; @@ -1413,16 +1427,8 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx) struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); - if (!(req->flags & REQ_F_CQE_SKIP) && - unlikely(!io_fill_cqe_req(ctx, req))) { - if (ctx->lockless_cq) { - spin_lock(&ctx->completion_lock); - io_req_cqe_overflow(req); - spin_unlock(&ctx->completion_lock); - } else { - io_req_cqe_overflow(req); - } - } + if (!(req->flags & REQ_F_CQE_SKIP)) + io_req_commit_cqe(ctx, req); } __io_cq_unlock_post(ctx); -- 2.42.0