From: Anuj Gupta <anuj20.g@xxxxxxxxxxx> Add IORING_OP_URING_CMD_FIXED opcode that enables performing the operation with previously registered buffers. Signed-off-by: Anuj Gupta <anuj20.g@xxxxxxxxxxx> --- fs/io_uring.c | 29 ++++++++++++++++++++++++++++- include/uapi/linux/io_uring.h | 6 +++++- 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index cc6735913c4b..2870a891e441 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1122,6 +1122,10 @@ static const struct io_op_def io_op_defs[] = { .needs_file = 1, .offsets = 1, }, + [IORING_OP_URING_CMD_FIXED] = { + .needs_file = 1, + .offsets = 1, + }, }; /* requests with any of those set should undergo io_disarm_next() */ @@ -4133,6 +4137,7 @@ EXPORT_SYMBOL_GPL(io_uring_cmd_done); static int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { + struct io_ring_ctx *ctx = req->ctx; const struct io_uring_cmd_sqe *csqe = (const void *) sqe; struct io_uring_cmd *cmd = &req->uring_cmd; @@ -4145,7 +4150,13 @@ static int io_uring_cmd_prep(struct io_kiocb *req, } cmd->op = READ_ONCE(csqe->op); - cmd->len = READ_ONCE(csqe->len); + if (req->opcode == IORING_OP_URING_CMD_FIXED) { + req->imu = NULL; + io_req_set_rsrc_node(req, ctx); + req->buf_index = READ_ONCE(csqe->buf_index); + req->uring_cmd.flags |= URING_CMD_FIXEDBUFS; + } else + cmd->len = READ_ONCE(csqe->len); /* * The payload is the last 40 bytes of an io_uring_cmd_sqe, with the @@ -4160,6 +4171,20 @@ static int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) struct file *file = req->file; int ret; + if (req->opcode == IORING_OP_URING_CMD_FIXED) { + u32 index, buf_index = req->buf_index; + struct io_ring_ctx *ctx = req->ctx; + struct io_mapped_ubuf *imu = req->imu; + + if (likely(!imu)) { + if (unlikely(buf_index >= ctx->nr_user_bufs)) + return -EFAULT; + index = array_index_nospec(buf_index, ctx->nr_user_bufs); + imu = READ_ONCE(ctx->user_bufs[index]); + req->imu = imu; + } + } + ret = file->f_op->async_cmd(&req->uring_cmd, issue_flags); /* queued async, consumer will call io_uring_cmd_done() when complete */ if (ret == -EIOCBQUEUED) @@ -6675,6 +6700,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) case IORING_OP_LINKAT: return io_linkat_prep(req, sqe); case IORING_OP_URING_CMD: + case IORING_OP_URING_CMD_FIXED: return io_uring_cmd_prep(req, sqe); } @@ -6960,6 +6986,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) ret = io_linkat(req, issue_flags); break; case IORING_OP_URING_CMD: + case IORING_OP_URING_CMD_FIXED: ret = io_uring_cmd(req, issue_flags); break; default: diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 7191419f2236..cb331f201255 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -79,7 +79,10 @@ struct io_uring_cmd_sqe { __u64 user_data; __u16 op; __u16 personality; - __u32 len; + union { + __u32 len; + __u16 buf_index; + }; __u64 pdu[5]; }; @@ -164,6 +167,7 @@ enum { IORING_OP_SYMLINKAT, IORING_OP_LINKAT, IORING_OP_URING_CMD, + IORING_OP_URING_CMD_FIXED, /* this goes last, obviously */ IORING_OP_LAST, -- 2.25.1