Don't hand code wq_stack_add_head() to ->free_list, which serves for recycling io_kiocb, add a helper doing it for us. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 3ccc13acb498..a751ca167d21 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1493,6 +1493,11 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res) req->cqe.res = res; } +static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) +{ + wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); +} + static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) { struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); @@ -2225,7 +2230,6 @@ static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { - struct io_submit_state *state = &ctx->submit_state; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; void *reqs[IO_REQ_ALLOC_BATCH]; int ret, i; @@ -2259,7 +2263,7 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) struct io_kiocb *req = reqs[i]; io_preinit_req(req, ctx); - wq_stack_add_head(&req->comp_list, &state->free_list); + io_req_add_to_cache(req, ctx); } return true; } @@ -2702,7 +2706,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, } task_refs++; node = req->comp_list.next; - wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); + io_req_add_to_cache(req, ctx); } while (node); if (task) @@ -7853,7 +7857,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) req = io_alloc_req(ctx); sqe = io_get_sqe(ctx); if (unlikely(!sqe)) { - wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); + io_req_add_to_cache(req, ctx); break; } /* will complete beyond this point, count as submitted */ -- 2.35.1