Apply fixed_rsrc functionality for fixed buffers support. Signed-off-by: Bijan Mottahedeh <bijan.mottahedeh@xxxxxxxxxx> --- fs/io_uring.c | 221 ++++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 183 insertions(+), 38 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 416c350..2f02e11 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -104,6 +104,14 @@ #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ IORING_REGISTER_LAST + IORING_OP_LAST) +/* + * Shift of 7 is 128 entries, or exactly one page on 64-bit archs + */ +#define IORING_BUF_TABLE_SHIFT 7 /* struct io_mapped_ubuf */ +#define IORING_MAX_BUFS_TABLE (1U << IORING_BUF_TABLE_SHIFT) +#define IORING_BUF_TABLE_MASK (IORING_MAX_BUFS_TABLE - 1) +#define IORING_MAX_FIXED_BUFS UIO_MAXIOV + struct io_uring { u32 head ____cacheline_aligned_in_smp; u32 tail ____cacheline_aligned_in_smp; @@ -202,11 +210,15 @@ struct io_rsrc_put { union { void *rsrc; struct file *file; + struct io_mapped_ubuf *buf; }; }; struct fixed_rsrc_table { - struct file **files; + union { + struct file **files; + struct io_mapped_ubuf *bufs; + }; }; struct fixed_rsrc_ref_node { @@ -333,8 +345,8 @@ struct io_ring_ctx { unsigned nr_user_files; /* if used, fixed mapped user buffers */ + struct fixed_rsrc_data *buf_data; unsigned nr_user_bufs; - struct io_mapped_ubuf *user_bufs; struct user_struct *user; @@ -1015,6 +1027,8 @@ static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node( struct io_ring_ctx *ctx); static void init_fixed_file_ref_node(struct io_ring_ctx *ctx, struct fixed_rsrc_ref_node *ref_node); +static void init_fixed_buf_ref_node(struct io_ring_ctx *ctx, + struct fixed_rsrc_ref_node *ref_node); static void __io_complete_rw(struct io_kiocb *req, long res, long res2, struct io_comp_state *cs); @@ -2988,6 +3002,15 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, io_rw_done(kiocb, ret); } +static inline struct io_mapped_ubuf *io_buf_from_index(struct io_ring_ctx *ctx, + int index) +{ + struct fixed_rsrc_table *table; + + table = &ctx->buf_data->table[index >> IORING_BUF_TABLE_SHIFT]; + return &table->bufs[index & IORING_BUF_TABLE_MASK]; +} + static ssize_t io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter) { @@ -3001,7 +3024,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw, if (unlikely(buf_index >= ctx->nr_user_bufs)) return -EFAULT; index = array_index_nospec(buf_index, ctx->nr_user_bufs); - imu = &ctx->user_bufs[index]; + imu = io_buf_from_index(ctx, index); buf_addr = req->rw.addr; /* overflow */ @@ -6086,7 +6109,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", req->opcode); - return-EINVAL; + return -EINVAL; } static int io_req_defer_prep(struct io_kiocb *req, @@ -8391,28 +8414,66 @@ static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries) return pages; } -static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx) +static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu) { - int i, j; + unsigned int i; - if (!ctx->user_bufs) - return -ENXIO; + for (i = 0; i < imu->nr_bvecs; i++) + unpin_user_page(imu->bvec[i].bv_page); - for (i = 0; i < ctx->nr_user_bufs; i++) { - struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; + if (imu->acct_pages) + io_unaccount_mem(ctx, imu->nr_bvecs, ACCT_PINNED); + kvfree(imu->bvec); + imu->nr_bvecs = 0; +} - for (j = 0; j < imu->nr_bvecs; j++) - unpin_user_page(imu->bvec[j].bv_page); +static void io_buffers_unmap(struct io_ring_ctx *ctx) +{ + unsigned int i; + struct io_mapped_ubuf *imu; - if (imu->acct_pages) - io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED); - kvfree(imu->bvec); - imu->nr_bvecs = 0; + for (i = 0; i < ctx->nr_user_bufs; i++) { + imu = io_buf_from_index(ctx, i); + io_buffer_unmap(ctx, imu); } +} - kfree(ctx->user_bufs); - ctx->user_bufs = NULL; +static void io_buffers_map_free(struct io_ring_ctx *ctx) +{ + struct fixed_rsrc_data *data = ctx->buf_data; + unsigned int nr_tables, i; + + if (!data) + return; + + nr_tables = DIV_ROUND_UP(ctx->nr_user_bufs, IORING_MAX_BUFS_TABLE); + for (i = 0; i < nr_tables; i++) + kfree(data->table[i].bufs); + free_fixed_rsrc_data(data); + ctx->buf_data = NULL; ctx->nr_user_bufs = 0; +} + +static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx) +{ + struct fixed_rsrc_data *data = ctx->buf_data; + struct fixed_rsrc_ref_node *backup_node; + int ret; + + if (!data) + return -ENXIO; + backup_node = alloc_fixed_rsrc_ref_node(ctx); + if (!backup_node) + return -ENOMEM; + init_fixed_buf_ref_node(ctx, backup_node); + + ret = io_rsrc_ref_quiesce(data, ctx, backup_node); + if (ret) + return ret; + + io_buffers_unmap(ctx); + io_buffers_map_free(ctx); + return 0; } @@ -8465,7 +8526,9 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages, /* check previously registered pages */ for (i = 0; i < ctx->nr_user_bufs; i++) { - struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; + struct io_mapped_ubuf *imu; + + imu = io_buf_from_index(ctx, i); for (j = 0; j < imu->nr_bvecs; j++) { if (!PageCompound(imu->bvec[j].bv_page)) @@ -8600,19 +8663,66 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, return ret; } -static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args) +static void io_free_buf_tables(struct fixed_rsrc_data *buf_data, + unsigned int nr_tables) { - if (ctx->user_bufs) - return -EBUSY; - if (!nr_args || nr_args > UIO_MAXIOV) - return -EINVAL; + int i; - ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf), - GFP_KERNEL); - if (!ctx->user_bufs) - return -ENOMEM; + for (i = 0; i < nr_tables; i++) { + struct fixed_rsrc_table *table = &buf_data->table[i]; - return 0; + kfree(table->bufs); + } +} + +static int io_alloc_buf_tables(struct fixed_rsrc_data *buf_data, + unsigned int nr_tables, unsigned int nr_bufs) +{ + int i; + + for (i = 0; i < nr_tables; i++) { + struct fixed_rsrc_table *table = &buf_data->table[i]; + unsigned int this_bufs; + + this_bufs = min(nr_bufs, IORING_MAX_BUFS_TABLE); + table->bufs = kcalloc(this_bufs, sizeof(struct io_mapped_ubuf), + GFP_KERNEL); + if (!table->bufs) + break; + nr_bufs -= this_bufs; + } + + if (i == nr_tables) + return 0; + + io_free_buf_tables(buf_data, i); + return 1; +} + +static struct fixed_rsrc_data *io_buffers_map_alloc(struct io_ring_ctx *ctx, + unsigned int nr_args) +{ + unsigned int nr_tables; + struct fixed_rsrc_data *buf_data; + + buf_data = alloc_fixed_rsrc_data(ctx); + if (!buf_data) + return NULL; + + nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_BUFS_TABLE); + buf_data->table = kcalloc(nr_tables, sizeof(*buf_data->table), + GFP_KERNEL); + if (!buf_data->table) + goto out; + + if (io_alloc_buf_tables(buf_data, nr_tables, nr_args)) + goto out; + + return buf_data; +out: + free_fixed_rsrc_data(ctx->buf_data); + ctx->buf_data = NULL; + return NULL; } static int io_buffer_validate(struct iovec *iov) @@ -8632,39 +8742,73 @@ static int io_buffer_validate(struct iovec *iov) return 0; } +static void io_ring_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) +{ + io_buffer_unmap(ctx, prsrc->buf); +} + +static void init_fixed_buf_ref_node(struct io_ring_ctx *ctx, + struct fixed_rsrc_ref_node *ref_node) +{ + ref_node->rsrc_data = ctx->buf_data; + ref_node->rsrc_put = io_ring_buf_put; +} + static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, unsigned int nr_args) { int i, ret; struct iovec iov; struct page *last_hpage = NULL; + struct fixed_rsrc_ref_node *ref_node; + struct fixed_rsrc_data *buf_data; - ret = io_buffers_map_alloc(ctx, nr_args); - if (ret) - return ret; + if (ctx->buf_data) + return -EBUSY; + if (!nr_args || nr_args > IORING_MAX_FIXED_BUFS) + return -EINVAL; - for (i = 0; i < nr_args; i++) { - struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; + buf_data = io_buffers_map_alloc(ctx, nr_args); + if (!buf_data) + return -ENOMEM; + ctx->buf_data = buf_data; + + for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) { + struct io_mapped_ubuf *imu; ret = io_copy_iov(ctx, &iov, arg, i); if (ret) break; + /* allow sparse sets */ + if (!iov.iov_base && !iov.iov_len) + continue; + ret = io_buffer_validate(&iov); if (ret) break; + imu = io_buf_from_index(ctx, i); + ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage); if (ret) break; + } - ctx->nr_user_bufs++; + if (ret) { + io_sqe_buffers_unregister(ctx); + return ret; } - if (ret) + ref_node = alloc_fixed_rsrc_ref_node(ctx); + if (!ref_node) { io_sqe_buffers_unregister(ctx); + return -ENOMEM; + } + init_fixed_buf_ref_node(ctx, ref_node); - return ret; + io_sqe_rsrc_set_node(ctx, buf_data, ref_node); + return 0; } static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg) @@ -9508,7 +9652,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) } seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs); for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) { - struct io_mapped_ubuf *buf = &ctx->user_bufs[i]; + struct io_mapped_ubuf *buf = io_buf_from_index(ctx, i); seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, (unsigned int) buf->len); @@ -10025,6 +10169,7 @@ static bool io_register_op_must_quiesce(int op) switch (op) { case IORING_UNREGISTER_FILES: case IORING_REGISTER_FILES_UPDATE: + case IORING_UNREGISTER_BUFFERS: case IORING_REGISTER_PROBE: case IORING_REGISTER_PERSONALITY: case IORING_UNREGISTER_PERSONALITY: -- 1.8.3.1