Support readv/writev with fixed buffers, and introduce IOSQE_FIXED_BUFFER, consistent with fixed files. Signed-off-by: Bijan Mottahedeh <bijan.mottahedeh@xxxxxxxxxx> --- fs/io_uring.c | 59 ++++++++++++++++++++++++++++++++++++++++--- include/uapi/linux/io_uring.h | 3 +++ 2 files changed, 58 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 6020fd2..12c4144 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -625,6 +625,7 @@ enum { REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, + REQ_F_FIXED_BUFFER_BIT = IOSQE_FIXED_BUFFER_BIT, REQ_F_FAIL_LINK_BIT, REQ_F_INFLIGHT_BIT, @@ -681,8 +682,12 @@ enum { REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT), /* linked timeout is active, i.e. prepared by link's head */ REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT), + /* ctx owns buffer */ + REQ_F_FIXED_BUFFER = BIT(REQ_F_FIXED_BUFFER_BIT), }; +#define REQ_F_FIXED_RSRC (REQ_F_FIXED_FILE | REQ_F_FIXED_BUFFER) + struct async_poll { struct io_poll_iocb poll; struct io_poll_iocb *double_poll; @@ -3191,6 +3196,46 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, return __io_iov_buffer_select(req, iov, needs_lock); } +static ssize_t io_import_iovec_fixed(int rw, struct io_kiocb *req, void *buf, + unsigned segs, unsigned fast_segs, + struct iovec **iovec, + struct iov_iter *iter) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_mapped_ubuf *imu; + struct iovec *iov; + u16 index, buf_index; + ssize_t ret; + unsigned long seg; + + if (unlikely(!ctx->buf_data)) + return -EFAULT; + + ret = import_iovec(rw, buf, segs, fast_segs, iovec, iter); + if (ret < 0) + return ret; + + iov = (struct iovec *)iter->iov; + + for (seg = 0; seg < iter->nr_segs; seg++) { + buf_index = *(u16 *)(&iov[seg].iov_base); + if (unlikely(buf_index < 0 || buf_index >= ctx->nr_user_bufs)) + return -EFAULT; + + index = array_index_nospec(buf_index, ctx->nr_user_bufs); + imu = io_buf_from_index(ctx, index); + if (!imu->ubuf || !imu->len) + return -EFAULT; + if (iov[seg].iov_len > imu->len) + return -EFAULT; + + iov[seg].iov_base = (void *)imu->ubuf; + ret += iov[seg].iov_len; + } + + return ret; +} + static ssize_t io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, struct iov_iter *iter, bool needs_lock) @@ -3201,6 +3246,12 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, u8 opcode; opcode = req->opcode; + + if ((opcode == IORING_OP_READV || opcode == IORING_OP_WRITEV) && + req->flags & REQ_F_FIXED_BUFFER) + return (io_import_iovec_fixed(rw, req, buf, sqe_len, + UIO_FASTIOV, iovec, iter)); + if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { *iovec = NULL; return io_import_fixed(req, rw, iter); @@ -5692,7 +5743,7 @@ static int io_timeout_remove_prep(struct io_kiocb *req, { if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + if (unlikely(req->flags & (REQ_F_FIXED_RSRC | REQ_F_BUFFER_SELECT))) return -EINVAL; if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags) return -EINVAL; @@ -5867,7 +5918,7 @@ static int io_async_cancel_prep(struct io_kiocb *req, { if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + if (unlikely(req->flags & (REQ_F_FIXED_RSRC | REQ_F_BUFFER_SELECT))) return -EINVAL; if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags) return -EINVAL; @@ -5889,7 +5940,7 @@ static int io_rsrc_update_prep(struct io_kiocb *req, { if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL)) return -EINVAL; - if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + if (unlikely(req->flags & (REQ_F_FIXED_RSRC | REQ_F_BUFFER_SELECT))) return -EINVAL; if (sqe->ioprio || sqe->rw_flags) return -EINVAL; @@ -6740,7 +6791,7 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx, #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ IOSQE_IO_HARDLINK | IOSQE_ASYNC | \ - IOSQE_BUFFER_SELECT) + IOSQE_BUFFER_SELECT | IOSQE_FIXED_BUFFER) static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe, diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 17682b5..41da59c 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -70,6 +70,7 @@ enum { IOSQE_IO_HARDLINK_BIT, IOSQE_ASYNC_BIT, IOSQE_BUFFER_SELECT_BIT, + IOSQE_FIXED_BUFFER_BIT, }; /* @@ -87,6 +88,8 @@ enum { #define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT) /* select buffer from sqe->buf_group */ #define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT) +/* use fixed buffer set */ +#define IOSQE_FIXED_BUFFER (1U << IOSQE_FIXED_BUFFER_BIT) /* * io_uring_setup() flags -- 1.8.3.1