Re: [PATCH 3/6] io_uring: add support for kernel registered bvecs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Feb 03, 2025 at 07:45:14AM -0800, Keith Busch wrote:
> From: Keith Busch <kbusch@xxxxxxxxxx>
> 
> Provide an interface for the kernel to leverage the existing
> pre-registered buffers that io_uring provides. User space can reference
> these later to achieve zero-copy IO.
> 
> User space must register an empty fixed buffer table with io_uring in
> order for the kernel to make use of it.
> 
> Signed-off-by: Keith Busch <kbusch@xxxxxxxxxx>
> ---
>  include/linux/io_uring.h       |   1 +
>  include/linux/io_uring_types.h |   3 +
>  io_uring/rsrc.c                | 114 +++++++++++++++++++++++++++++++--
>  io_uring/rsrc.h                |   1 +
>  4 files changed, 114 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
> index 85fe4e6b275c7..b5637a2aae340 100644
> --- a/include/linux/io_uring.h
> +++ b/include/linux/io_uring.h
> @@ -5,6 +5,7 @@
>  #include <linux/sched.h>
>  #include <linux/xarray.h>
>  #include <uapi/linux/io_uring.h>
> +#include <linux/blk-mq.h>
>  
>  #if defined(CONFIG_IO_URING)
>  void __io_uring_cancel(bool cancel_all);
> diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
> index 623d8e798a11a..7e5a5a70c35f2 100644
> --- a/include/linux/io_uring_types.h
> +++ b/include/linux/io_uring_types.h
> @@ -695,4 +695,7 @@ static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx)
>  	return ctx->flags & IORING_SETUP_CQE32;
>  }
>  
> +int io_buffer_register_bvec(struct io_ring_ctx *ctx, const struct request *rq, unsigned int tag);
> +void io_buffer_unregister_bvec(struct io_ring_ctx *ctx, unsigned int tag);
> +
>  #endif
> diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
> index 4d0e1c06c8bc6..8c4c374abcc10 100644
> --- a/io_uring/rsrc.c
> +++ b/io_uring/rsrc.c
> @@ -111,7 +111,10 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
>  		if (!refcount_dec_and_test(&imu->refs))
>  			return;
>  		for (i = 0; i < imu->nr_bvecs; i++)
> -			unpin_user_page(imu->bvec[i].bv_page);
> +			if (node->type == IORING_RSRC_KBUF)
> +				put_page(imu->bvec[i].bv_page);
> +			else
> +				unpin_user_page(imu->bvec[i].bv_page);
>  		if (imu->acct_pages)
>  			io_unaccount_mem(ctx, imu->acct_pages);
>  		kvfree(imu);
> @@ -240,6 +243,13 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
>  		struct io_rsrc_node *node;
>  		u64 tag = 0;
>  
> +		i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
> +		node = io_rsrc_node_lookup(&ctx->buf_table, i);
> +		if (node && node->type != IORING_RSRC_BUFFER) {
> +			err = -EBUSY;
> +			break;
> +		}
> +
>  		uvec = u64_to_user_ptr(user_data);
>  		iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
>  		if (IS_ERR(iov)) {
> @@ -258,6 +268,7 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
>  			err = PTR_ERR(node);
>  			break;
>  		}
> +
>  		if (tag) {
>  			if (!node) {
>  				err = -EINVAL;
> @@ -265,7 +276,6 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
>  			}
>  			node->tag = tag;
>  		}
> -		i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
>  		io_reset_rsrc_node(ctx, &ctx->buf_table, i);
>  		ctx->buf_table.nodes[i] = node;
>  		if (ctx->compat)
> @@ -453,6 +463,7 @@ void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
>  			fput(io_slot_file(node));
>  		break;
>  	case IORING_RSRC_BUFFER:
> +	case IORING_RSRC_KBUF:
>  		if (node->buf)
>  			io_buffer_unmap(ctx, node);
>  		break;
> @@ -860,6 +871,92 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
>  	return ret;
>  }
>  
> +static struct io_rsrc_node *io_buffer_alloc_node(struct io_ring_ctx *ctx,
> +						 unsigned int nr_bvecs,
> +						 unsigned int len)
> +{
> +	struct io_mapped_ubuf *imu;
> +	struct io_rsrc_node *node;
> +
> +	node = io_rsrc_node_alloc(IORING_RSRC_KBUF);
> +	if (!node)
> +		return NULL;
> +
> +	imu = kvmalloc(struct_size(imu, bvec, nr_bvecs), GFP_KERNEL);
> +	if (!imu) {
> +		io_put_rsrc_node(ctx, node);
> +		return NULL;
> +	}
> +
> +	imu->ubuf = 0;
> +	imu->len = len;
> +	imu->acct_pages = 0;
> +	imu->nr_bvecs = nr_bvecs;
> +	refcount_set(&imu->refs, 1);
> +
> +	node->buf = imu;
> +	return node;
> +}
> +
> +int io_buffer_register_bvec(struct io_ring_ctx *ctx, const struct request *rq,
> +			    unsigned int index)
> +{
> +	struct io_rsrc_data *data = &ctx->buf_table;
> +	u16 nr_bvecs = blk_rq_nr_phys_segments(rq);
> +	struct req_iterator rq_iter;
> +	struct io_rsrc_node *node;
> +	struct bio_vec bv;
> +	int i = 0;
> +
> +	lockdep_assert_held(&ctx->uring_lock);
> +
> +	if (WARN_ON_ONCE(!data->nr))
> +		return -EINVAL;
> +	if (WARN_ON_ONCE(index >= data->nr))
> +		return -EINVAL;
> +
> +	node = data->nodes[index];
> +	if (WARN_ON_ONCE(node))
> +		return -EBUSY;
> +
> +	node = io_buffer_alloc_node(ctx, nr_bvecs, blk_rq_bytes(rq));
> +	if (!node)
> +		return -ENOMEM;
> +
> +	rq_for_each_bvec(bv, rq, rq_iter) {
> +		get_page(bv.bv_page);
> +		node->buf->bvec[i].bv_page = bv.bv_page;
> +		node->buf->bvec[i].bv_len = bv.bv_len;
> +		node->buf->bvec[i].bv_offset = bv.bv_offset;
> +		i++;

In this patchset, ublk request buffer may cross uring OPs, so it is inevitable
for buggy application to complete IO command & ublk request before
io_uring read/write OP using the buffer/page is completed .

That is probably the reason why page reference is increased here, then
bvec page lifetime becomes not aligned with request any more from block
layer viewpoint.

Not sure this way is safe:

1) for current block storage driver, when request is completed, all
request bvec page ownership is transferred to upper layer(FS, application, ...),
but it becomes not true for ublk zero copy with this patchset 

2) BIO_PAGE_PINNED may not be set for bio, so upper layer might think that
bvec pages can be reused or reclaimed after this ublk bio is completed.



Thanks,
Ming





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux