Add IORING_OP_URING_CMD_FIXED opcode that enables performing the operation with previously registered buffers. Signed-off-by: Kanchan Joshi <joshi.k@xxxxxxxxxxx> Signed-off-by: Anuj Gupta <anuj20.g@xxxxxxxxxxx> --- fs/io_uring.c | 29 ++++++++++++++++++++++++++++- include/linux/io_uring.h | 1 + include/uapi/linux/io_uring.h | 1 + 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index ead0cbae8416..6a1dcea0f538 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1107,6 +1107,9 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_URING_CMD] = { .needs_file = 1, }, + [IORING_OP_URING_CMD_FIXED] = { + .needs_file = 1, + }, }; /* requests with any of those set should undergo io_disarm_next() */ @@ -4148,16 +4151,25 @@ EXPORT_SYMBOL_GPL(io_uring_cmd_done); static int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { + struct io_ring_ctx *ctx = req->ctx; struct io_uring_cmd *ioucmd = &req->uring_cmd; if (!req->file->f_op->async_cmd || !(req->ctx->flags & IORING_SETUP_SQE128)) return -EOPNOTSUPP; if (req->ctx->flags & IORING_SETUP_IOPOLL) return -EOPNOTSUPP; + if (req->opcode == IORING_OP_URING_CMD_FIXED) { + req->imu = NULL; + io_req_set_rsrc_node(req, ctx); + req->buf_index = READ_ONCE(sqe->buf_index); + ioucmd->flags = IO_URING_F_UCMD_FIXEDBUFS; + } else { + ioucmd->flags = 0; + } + ioucmd->cmd = (void *) &sqe->cmd; ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); ioucmd->cmd_len = READ_ONCE(sqe->cmd_len); - ioucmd->flags = 0; return 0; } @@ -4167,6 +4179,19 @@ static int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) int ret; struct io_uring_cmd *ioucmd = &req->uring_cmd; + if (req->opcode == IORING_OP_URING_CMD_FIXED) { + u32 index, buf_index = req->buf_index; + struct io_ring_ctx *ctx = req->ctx; + struct io_mapped_ubuf *imu = req->imu; + + if (likely(!imu)) { + if (unlikely(buf_index >= ctx->nr_user_bufs)) + return -EFAULT; + index = array_index_nospec(buf_index, ctx->nr_user_bufs); + imu = READ_ONCE(ctx->user_bufs[index]); + req->imu = imu; + } + } ioucmd->flags |= issue_flags; ret = file->f_op->async_cmd(ioucmd); /* queued async, consumer will call io_uring_cmd_done() when complete */ @@ -6656,6 +6681,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) case IORING_OP_LINKAT: return io_linkat_prep(req, sqe); case IORING_OP_URING_CMD: + case IORING_OP_URING_CMD_FIXED: return io_uring_cmd_prep(req, sqe); } @@ -6941,6 +6967,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) ret = io_linkat(req, issue_flags); break; case IORING_OP_URING_CMD: + case IORING_OP_URING_CMD_FIXED: ret = io_uring_cmd(req, issue_flags); break; default: diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 1888a5ea7dbe..abad6175739e 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -8,6 +8,7 @@ enum io_uring_cmd_flags { IO_URING_F_COMPLETE_DEFER = 1, IO_URING_F_UNLOCKED = 2, + IO_URING_F_UCMD_FIXEDBUFS = 4, /* int's last bit, sign checks are usually faster than a bit test */ IO_URING_F_NONBLOCK = INT_MIN, }; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 9bf1d6c0ed7f..ee84be4b6be8 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -155,6 +155,7 @@ enum { IORING_OP_SYMLINKAT, IORING_OP_LINKAT, IORING_OP_URING_CMD, + IORING_OP_URING_CMD_FIXED, /* this goes last, obviously */ IORING_OP_LAST, -- 2.25.1