Inline struct io_comp_state into struct io_submit_state. They are already coupled tightly, together with mixed responsibilities it only brings confusion having them separately. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 61 +++++++++++++++++++++++---------------------------- 1 file changed, 27 insertions(+), 34 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 33a1c45ecd13..c14206003725 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -305,13 +305,6 @@ struct io_sq_data { #define IO_REQ_CACHE_SIZE 32 #define IO_REQ_ALLOC_BATCH 8 -struct io_comp_state { - struct io_kiocb *reqs[IO_COMPL_BATCH]; - unsigned int nr; - /* inline/task_work completion list, under ->uring_lock */ - struct list_head free_list; -}; - struct io_submit_link { struct io_kiocb *head; struct io_kiocb *last; @@ -332,7 +325,10 @@ struct io_submit_state { /* * Batch completion logic */ - struct io_comp_state comp; + struct io_kiocb *compl_reqs[IO_COMPL_BATCH]; + unsigned int compl_nr; + /* inline/task_work completion list, under ->uring_lock */ + struct list_head free_list; /* * File reference cache @@ -1223,7 +1219,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work); init_llist_head(&ctx->rsrc_put_llist); INIT_LIST_HEAD(&ctx->tctx_list); - INIT_LIST_HEAD(&ctx->submit_state.comp.free_list); + INIT_LIST_HEAD(&ctx->submit_state.free_list); INIT_LIST_HEAD(&ctx->locked_free_list); INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); return ctx; @@ -1733,10 +1729,10 @@ static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) } static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, - struct io_comp_state *cs) + struct io_submit_state *state) { spin_lock_irq(&ctx->completion_lock); - list_splice_init(&ctx->locked_free_list, &cs->free_list); + list_splice_init(&ctx->locked_free_list, &state->free_list); ctx->locked_free_nr = 0; spin_unlock_irq(&ctx->completion_lock); } @@ -1745,7 +1741,6 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; - struct io_comp_state *cs = &state->comp; int nr; /* @@ -1754,11 +1749,11 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) * side cache. */ if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) - io_flush_cached_locked_reqs(ctx, cs); + io_flush_cached_locked_reqs(ctx, state); nr = state->free_reqs; - while (!list_empty(&cs->free_list)) { - struct io_kiocb *req = list_first_entry(&cs->free_list, + while (!list_empty(&state->free_list)) { + struct io_kiocb *req = list_first_entry(&state->free_list, struct io_kiocb, inflight_entry); list_del(&req->inflight_entry); @@ -1941,7 +1936,7 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx) { if (!ctx) return; - if (ctx->submit_state.comp.nr) { + if (ctx->submit_state.compl_nr) { mutex_lock(&ctx->uring_lock); io_submit_flush_completions(ctx); mutex_unlock(&ctx->uring_lock); @@ -2138,19 +2133,19 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, if (state->free_reqs != ARRAY_SIZE(state->reqs)) state->reqs[state->free_reqs++] = req; else - list_add(&req->inflight_entry, &state->comp.free_list); + list_add(&req->inflight_entry, &state->free_list); } static void io_submit_flush_completions(struct io_ring_ctx *ctx) __must_hold(&req->ctx->uring_lock) { - struct io_comp_state *cs = &ctx->submit_state.comp; - int i, nr = cs->nr; + struct io_submit_state *state = &ctx->submit_state; + int i, nr = state->compl_nr; struct req_batch rb; spin_lock_irq(&ctx->completion_lock); for (i = 0; i < nr; i++) { - struct io_kiocb *req = cs->reqs[i]; + struct io_kiocb *req = state->compl_reqs[i]; __io_cqring_fill_event(ctx, req->user_data, req->result, req->compl.cflags); @@ -2161,7 +2156,7 @@ static void io_submit_flush_completions(struct io_ring_ctx *ctx) io_init_req_batch(&rb); for (i = 0; i < nr; i++) { - struct io_kiocb *req = cs->reqs[i]; + struct io_kiocb *req = state->compl_reqs[i]; /* submission and completion refs */ if (req_ref_sub_and_test(req, 2)) @@ -2169,7 +2164,7 @@ static void io_submit_flush_completions(struct io_ring_ctx *ctx) } io_req_free_batch_finish(ctx, &rb); - cs->nr = 0; + state->compl_nr = 0; } /* @@ -6479,10 +6474,10 @@ static void __io_queue_sqe(struct io_kiocb *req) /* drop submission reference */ if (req->flags & REQ_F_COMPLETE_INLINE) { struct io_ring_ctx *ctx = req->ctx; - struct io_comp_state *cs = &ctx->submit_state.comp; + struct io_submit_state *state = &ctx->submit_state; - cs->reqs[cs->nr++] = req; - if (cs->nr == ARRAY_SIZE(cs->reqs)) + state->compl_reqs[state->compl_nr++] = req; + if (state->compl_nr == ARRAY_SIZE(state->compl_reqs)) io_submit_flush_completions(ctx); } else { io_put_req(req); @@ -6686,7 +6681,7 @@ static void io_submit_state_end(struct io_submit_state *state, { if (state->link.head) io_queue_sqe(state->link.head); - if (state->comp.nr) + if (state->compl_nr) io_submit_flush_completions(ctx); if (state->plug_started) blk_finish_plug(&state->plug); @@ -8646,19 +8641,17 @@ static void io_req_cache_free(struct list_head *list) static void io_req_caches_free(struct io_ring_ctx *ctx) { - struct io_submit_state *submit_state = &ctx->submit_state; - struct io_comp_state *cs = &ctx->submit_state.comp; + struct io_submit_state *state = &ctx->submit_state; mutex_lock(&ctx->uring_lock); - if (submit_state->free_reqs) { - kmem_cache_free_bulk(req_cachep, submit_state->free_reqs, - submit_state->reqs); - submit_state->free_reqs = 0; + if (state->free_reqs) { + kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); + state->free_reqs = 0; } - io_flush_cached_locked_reqs(ctx, cs); - io_req_cache_free(&cs->free_list); + io_flush_cached_locked_reqs(ctx, state); + io_req_cache_free(&state->free_list); mutex_unlock(&ctx->uring_lock); } -- 2.32.0