Add io_submit_fail_link() helper and put linking fail logic into this helper. This way simplifies io_submit_fail_init(), and becomes easier to add sqe group failing logic. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- io_uring/io_uring.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 4848cd84af3d..34020cadee03 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2095,22 +2095,17 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, return def->prep(req, sqe); } -static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, +static __cold int io_submit_fail_link(struct io_submit_link *link, struct io_kiocb *req, int ret) { - struct io_ring_ctx *ctx = req->ctx; - struct io_submit_link *link = &ctx->submit_state.link; struct io_kiocb *head = link->head; - trace_io_uring_req_failed(sqe, req, ret); - /* * Avoid breaking links in the middle as it renders links with SQPOLL * unusable. Instead of failing eagerly, continue assembling the link if * applicable and mark the head with REQ_F_FAIL. The link flushing code * should find the flag and handle the rest. */ - req_fail_link_node(req, ret); if (head && !(head->flags & REQ_F_FAIL)) req_fail_link_node(head, -ECANCELED); @@ -2129,9 +2124,24 @@ static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, else link->head = req; link->last = req; + return 0; } +static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, + struct io_kiocb *req, int ret) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_submit_link *link = &ctx->submit_state.link; + + trace_io_uring_req_failed(sqe, req, ret); + + req_fail_link_node(req, ret); + + /* cover both linked and non-linked request */ + return io_submit_fail_link(link, req, ret); +} + /* * Return NULL if nothing to be queued, otherwise return request for queueing */ static struct io_kiocb *io_link_sqe(struct io_submit_link *link, -- 2.42.0