It's more convenient to have it in the submission state, than passing as a pointer, so move it. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 725e852e22c5..cbe639caa096 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -219,6 +219,8 @@ struct io_submit_state { struct file *ring_file; int ring_fd; + + struct io_kiocb *link; }; struct io_ring_ctx { @@ -4721,11 +4723,11 @@ static inline void io_queue_link_head(struct io_kiocb *req) #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ IOSQE_IO_HARDLINK | IOSQE_ASYNC) -static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **link) +static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) { const struct cred *old_creds = NULL; struct io_ring_ctx *ctx = req->ctx; + struct io_submit_state *state = &ctx->submit_state; unsigned int sqe_flags; int ret, id; @@ -4770,8 +4772,8 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, * submitted sync once the chain is complete. If none of those * conditions are true (normal request), then just queue it. */ - if (*link) { - struct io_kiocb *head = *link; + if (state->link) { + struct io_kiocb *head = state->link; /* * Taking sequential execution of a link, draining both sides @@ -4801,7 +4803,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, /* last request of a link, enqueue the link */ if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) { io_queue_link_head(head); - *link = NULL; + state->link = NULL; } } else { if (unlikely(ctx->drain_next)) { @@ -4814,7 +4816,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = io_req_defer_prep(req, sqe); if (ret) req->flags |= REQ_F_FAIL_LINK; - *link = req; + state->link = req; } else { io_queue_sqe(req, sqe); } @@ -4836,6 +4838,8 @@ static void io_submit_end(struct io_ring_ctx *ctx) if (state->free_reqs) kmem_cache_free_bulk(req_cachep, state->free_reqs, &state->reqs[state->cur_req]); + if (state->link) + io_queue_link_head(state->link); } /* @@ -4852,6 +4856,7 @@ static void io_submit_start(struct io_ring_ctx *ctx, unsigned int max_ios, state->ring_file = ring_file; state->ring_fd = ring_fd; + state->link = NULL; } static void io_commit_sqring(struct io_ring_ctx *ctx) @@ -4915,7 +4920,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, struct mm_struct **mm, bool async) { struct blk_plug plug; - struct io_kiocb *link = NULL; int i, submitted = 0; bool mm_fault = false; @@ -4973,7 +4977,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, req->needs_fixed_file = async; trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, true, async); - if (!io_submit_sqe(req, sqe, &link)) + if (!io_submit_sqe(req, sqe)) break; } @@ -4982,8 +4986,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, percpu_ref_put_many(&ctx->refs, nr - ref_used); } - if (link) - io_queue_link_head(link); io_submit_end(ctx); if (nr > IO_PLUG_THRESHOLD) -- 2.24.0