Just assign where we setup the ring anyway, splitting the init into two doesn't really buy us anything and it's a bit more fragile. With this, io_init_req() handles the whole thing while the cacheline is pulled in anyway. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- io_uring/io_uring.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index a0b64831c455..7f5d0b833955 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1034,19 +1034,6 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res) io_req_complete_defer(req); } -/* - * Don't initialise the fields below on every allocation, but do that in - * advance and keep them valid across allocations. - */ -static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) -{ - req->ctx = ctx; - req->link = NULL; - req->async_data = NULL; - /* not necessary, but safer to zero */ - req->cqe.res = 0; -} - static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, struct io_submit_state *state) { @@ -1097,7 +1084,6 @@ __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) for (i = 0; i < ret; i++) { struct io_kiocb *req = reqs[i]; - io_preinit_req(req, ctx); io_req_add_to_cache(req, ctx); } return true; @@ -2172,14 +2158,17 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, int personality; u8 opcode; - /* req is partially pre-initialised, see io_preinit_req() */ req->opcode = opcode = READ_ONCE(sqe->opcode); /* same numerical values with corresponding REQ_F_*, safe to copy */ req->flags = sqe_flags = READ_ONCE(sqe->flags); req->cqe.user_data = READ_ONCE(sqe->user_data); + req->cqe.res = 0; + req->ctx = ctx; req->file = NULL; req->rsrc_node = NULL; req->task = current; + req->async_data = NULL; + req->link = NULL; if (unlikely(opcode >= IORING_OP_LAST)) { req->opcode = 0; -- 2.39.2