As now submit_state is retained across syscalls, we can save ourself from initialising it from ground up for each io_submit_sqes(). Set some fields during ctx allocation, and just keep them always consistent. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index f0cc5ccd6fe4..7076564aa944 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1294,6 +1294,7 @@ static inline bool io_is_timeout_noseq(struct io_kiocb *req) static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) { + struct io_submit_state *submit_state; struct io_ring_ctx *ctx; int hash_bits; @@ -1345,6 +1346,12 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->rsrc_ref_list); INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work); init_llist_head(&ctx->rsrc_put_llist); + + submit_state = &ctx->submit_state; + INIT_LIST_HEAD(&submit_state->comp.list); + submit_state->comp.nr = 0; + submit_state->file_refs = 0; + submit_state->free_reqs = 0; return ctx; err: if (ctx->fallback_req) @@ -6667,8 +6674,10 @@ static void io_submit_state_end(struct io_submit_state *state, if (state->plug_started) blk_finish_plug(&state->plug); io_state_file_put(state); - if (state->free_reqs) + if (state->free_reqs) { kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); + state->free_reqs = 0; + } } /* @@ -6678,10 +6687,6 @@ static void io_submit_state_start(struct io_submit_state *state, unsigned int max_ios) { state->plug_started = false; - state->comp.nr = 0; - INIT_LIST_HEAD(&state->comp.list); - state->free_reqs = 0; - state->file_refs = 0; state->ios_left = max_ios; } -- 2.24.0