We have several spots where a call to io_fill_cqe_req() is immediately followed by io_put_req_deferred(). Replace them with __io_req_complete_post() and get rid of io_put_req_deferred() and io_fill_cqe_req(). > size ./fs/io_uring.o text data bss dec hex filename 86942 13734 8 100684 1894c ./fs/io_uring.o > size ./fs/io_uring.o text data bss dec hex filename 86438 13654 8 100100 18704 ./fs/io_uring.o Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 42 ++++++++---------------------------------- 1 file changed, 8 insertions(+), 34 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 986a2d640702..92d7c7a0d234 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1188,10 +1188,8 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, bool cancel_all); static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); -static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags); - +static void __io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags); static void io_put_req(struct io_kiocb *req); -static void io_put_req_deferred(struct io_kiocb *req); static void io_dismantle_req(struct io_kiocb *req); static void io_queue_linked_timeout(struct io_kiocb *req); static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type, @@ -1773,8 +1771,7 @@ static void io_kill_timeout(struct io_kiocb *req, int status) atomic_set(&req->ctx->cq_timeouts, atomic_read(&req->ctx->cq_timeouts) + 1); list_del_init(&req->timeout.list); - io_fill_cqe_req(req, status, 0); - io_put_req_deferred(req); + __io_req_complete_post(req, status, 0); } } @@ -2137,12 +2134,6 @@ static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags); } -static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) -{ - if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe_req(req, res, cflags); -} - static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { @@ -2376,9 +2367,7 @@ static bool io_kill_linked_timeout(struct io_kiocb *req) link->timeout.head = NULL; if (hrtimer_try_to_cancel(&io->timer) != -1) { list_del(&link->timeout.list); - /* leave REQ_F_CQE_SKIP to io_fill_cqe_req */ - io_fill_cqe_req(link, -ECANCELED, 0); - io_put_req_deferred(link); + __io_req_complete_post(link, -ECANCELED, 0); return true; } } @@ -2404,11 +2393,11 @@ static void io_fail_links(struct io_kiocb *req) trace_io_uring_fail_link(req->ctx, req, req->cqe.user_data, req->opcode, link); - if (!ignore_cqes) { + if (ignore_cqes) + link->flags |= REQ_F_CQE_SKIP; + else link->flags &= ~REQ_F_CQE_SKIP; - io_fill_cqe_req(link, res, 0); - } - io_put_req_deferred(link); + __io_req_complete_post(link, res, 0); link = nxt; } } @@ -2424,9 +2413,7 @@ static bool io_disarm_next(struct io_kiocb *req) req->flags &= ~REQ_F_ARM_LTIMEOUT; if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { io_remove_next_linked(req); - /* leave REQ_F_CQE_SKIP to io_fill_cqe_req */ - io_fill_cqe_req(link, -ECANCELED, 0); - io_put_req_deferred(link); + __io_req_complete_post(link, -ECANCELED, 0); posted = true; } } else if (req->flags & REQ_F_LINK_TIMEOUT) { @@ -2695,11 +2682,6 @@ static void io_free_req(struct io_kiocb *req) __io_free_req(req); } -static void io_free_req_work(struct io_kiocb *req, bool *locked) -{ - io_free_req(req); -} - static void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node) __must_hold(&ctx->uring_lock) @@ -2799,14 +2781,6 @@ static inline void io_put_req(struct io_kiocb *req) io_free_req(req); } -static inline void io_put_req_deferred(struct io_kiocb *req) -{ - if (req_ref_put_and_test(req)) { - req->io_task_work.func = io_free_req_work; - io_req_task_work_add(req, false); - } -} - static unsigned io_cqring_events(struct io_ring_ctx *ctx) { /* See comment at the top of this file */ -- 2.35.2