Looking at the assembly, the compiler decided to reload req->opcode in io_op_defs[opcode].needs_file instead of one it had in a register, so store it in a temp variable so it can be optimised out. Also move the personality block later, it's better for spilling/etc. as it only depends on @sqe, which we're keeping anyway. By the way, zero req->opcode if it over IORING_OP_LAST, not a problem, at the moment but is safer. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 3f8bfa309b16..1f4aa2cdaaa2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6978,9 +6978,10 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, { unsigned int sqe_flags; int personality; + u8 opcode; /* req is partially pre-initialised, see io_preinit_req() */ - req->opcode = READ_ONCE(sqe->opcode); + req->opcode = opcode = READ_ONCE(sqe->opcode); /* same numerical values with corresponding REQ_F_*, safe to copy */ req->flags = sqe_flags = READ_ONCE(sqe->flags); req->user_data = READ_ONCE(sqe->user_data); @@ -6988,14 +6989,16 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, req->fixed_rsrc_refs = NULL; req->task = current; - if (unlikely(req->opcode >= IORING_OP_LAST)) + if (unlikely(opcode >= IORING_OP_LAST)) { + req->opcode = 0; return -EINVAL; + } if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { /* enforce forwards compatibility on users */ if (sqe_flags & ~SQE_VALID_FLAGS) return -EINVAL; if ((sqe_flags & IOSQE_BUFFER_SELECT) && - !io_op_defs[req->opcode].buffer_select) + !io_op_defs[opcode].buffer_select) return -EOPNOTSUPP; if (sqe_flags & IOSQE_IO_DRAIN) io_init_req_drain(req); @@ -7014,23 +7017,14 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, } } - personality = READ_ONCE(sqe->personality); - if (personality) { - req->creds = xa_load(&ctx->personalities, personality); - if (!req->creds) - return -EINVAL; - get_cred(req->creds); - req->flags |= REQ_F_CREDS; - } - - if (io_op_defs[req->opcode].needs_file) { + if (io_op_defs[opcode].needs_file) { struct io_submit_state *state = &ctx->submit_state; /* * Plug now if we have more than 2 IO left after this, and the * target is potentially a read/write to block based storage. */ - if (state->need_plug && io_op_defs[req->opcode].plug) { + if (state->need_plug && io_op_defs[opcode].plug) { state->plug_started = true; state->need_plug = false; blk_start_plug(&state->plug); @@ -7042,6 +7036,15 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, return -EBADF; } + personality = READ_ONCE(sqe->personality); + if (personality) { + req->creds = xa_load(&ctx->personalities, personality); + if (!req->creds) + return -EINVAL; + get_cred(req->creds); + req->flags |= REQ_F_CREDS; + } + return io_req_prep(req, sqe); } -- 2.33.0