Split io_req_complete_post(), this is a prep for the next patch. Signed-off-by: Hao Xu <haoxu@xxxxxxxxxxxxxxxxx> --- fs/io_uring.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 58ce58e7c65d..4ee5bbe36e3b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1793,12 +1793,11 @@ static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data return __io_cqring_fill_event(ctx, user_data, res, cflags); } -static void io_req_complete_post(struct io_kiocb *req, long res, +static void __io_req_complete_post(struct io_kiocb *req, long res, unsigned int cflags) { struct io_ring_ctx *ctx = req->ctx; - spin_lock(&ctx->completion_lock); __io_cqring_fill_event(ctx, req->user_data, res, cflags); /* * If we're the last reference to this request, add to our locked @@ -1819,6 +1818,15 @@ static void io_req_complete_post(struct io_kiocb *req, long res, ctx->locked_free_nr++; percpu_ref_put(&ctx->refs); } +} + +static void io_req_complete_post(struct io_kiocb *req, long res, + unsigned int cflags) +{ + struct io_ring_ctx *ctx = req->ctx; + + spin_lock(&ctx->completion_lock); + __io_req_complete_post(req, res, cflags); io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); io_cqring_ev_posted(ctx); -- 2.24.4