We don't want raw fill_cqe calls. In preparation for upcoming features, get rid of fill cqe by using io_req_task_complete Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 690bfeaa609a..0e04e0997d7d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1191,6 +1191,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags); static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer); static void io_eventfd_signal(struct io_ring_ctx *ctx); +static void io_req_tw_queue_complete(struct io_kiocb *req, u32 res); static struct kmem_cache *req_cachep; @@ -1746,8 +1747,7 @@ static void io_kill_timeout(struct io_kiocb *req, int status) atomic_set(&req->ctx->cq_timeouts, atomic_read(&req->ctx->cq_timeouts) + 1); list_del_init(&req->timeout.list); - io_fill_cqe_req(req, status, 0); - io_put_req_deferred(req); + io_req_tw_queue_complete(req, status); } } @@ -2595,6 +2595,19 @@ static void io_req_task_submit(struct io_kiocb *req, bool *locked) io_req_complete_failed(req, -EFAULT); } +static void io_req_task_complete(struct io_kiocb *req, bool *locked) +{ + int res = req->result; + + if (*locked) { + io_req_complete_state(req, res, io_put_kbuf(req, 0)); + io_req_add_compl_list(req); + } else { + io_req_complete_post(req, res, + io_put_kbuf(req, IO_URING_F_UNLOCKED)); + } +} + static void io_req_task_queue_fail(struct io_kiocb *req, int ret) { req->result = ret; @@ -2602,6 +2615,13 @@ static void io_req_task_queue_fail(struct io_kiocb *req, int ret) io_req_task_work_add(req, false); } +static void io_req_tw_queue_complete(struct io_kiocb *req, u32 res) +{ + req->result = res; + req->io_task_work.func = io_req_task_complete; + io_req_task_work_add(req, false); +} + static void io_req_task_queue(struct io_kiocb *req) { req->io_task_work.func = io_req_task_submit; @@ -2987,19 +3007,6 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res) return false; } -static inline void io_req_task_complete(struct io_kiocb *req, bool *locked) -{ - int res = req->result; - - if (*locked) { - io_req_complete_state(req, res, io_put_kbuf(req, 0)); - io_req_add_compl_list(req); - } else { - io_req_complete_post(req, res, - io_put_kbuf(req, IO_URING_F_UNLOCKED)); - } -} - static void __io_complete_rw(struct io_kiocb *req, long res, unsigned int issue_flags) { @@ -6458,9 +6465,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS)) req_set_fail(req); - req->result = -ETIME; - req->io_task_work.func = io_req_task_complete; - io_req_task_work_add(req, false); + io_req_tw_queue_complete(req, -ETIME); return HRTIMER_NORESTART; } -- 2.35.1