[PATCH 3/3] io_uring: add IORING_REGISTER_COPY_BUFFERS method

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Buffers can get registered with io_uring, which allows to skip the
repeated pin_pages, unpin/unref pages for each O_DIRECT operation. This
reduces the overhead of O_DIRECT IO.

However, registrering buffers can take some time. Normally this isn't an
issue as it's done at initialization time (and hence less critical), but
for cases where rings can be created and destroyed as part of an IO
thread pool, registering the same buffers for multiple rings become a
more time sensitive proposition. As an example, let's say an application
has an IO memory pool of 500G. Initial registration takes:

Got 500 huge pages (each 1024MB)
Registered 500 pages in 409 msec

or about 0.4 seconds. If we go higher to 900 1GB huge pages being
registered:

Registered 900 pages in 738 msec

which is, as expected, a fully linear scaling.

Rather than have each ring pin/map/register the same buffer pool,
provide an io_uring_register(2) opcode to simply duplicate the buffers
that are registered with another ring. Adding the same 900GB of
registered buffers to the target ring can then be accomplished in:

Copied 900 pages in 17 usec

While timing differs a bit, this provides around a 25,000-40,000x
speedup for this use case.

Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
 include/uapi/linux/io_uring.h |  8 ++++
 io_uring/register.c           |  6 +++
 io_uring/rsrc.c               | 84 +++++++++++++++++++++++++++++++++++
 io_uring/rsrc.h               |  1 +
 4 files changed, 99 insertions(+)

diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index a275f91d2ac0..7b15216a3d7f 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -609,6 +609,9 @@ enum io_uring_register_op {
 
 	IORING_REGISTER_CLOCK			= 29,
 
+	/* copy registered buffers from source ring to current ring */
+	IORING_REGISTER_COPY_BUFFERS		= 30,
+
 	/* this goes last */
 	IORING_REGISTER_LAST,
 
@@ -694,6 +697,11 @@ struct io_uring_clock_register {
 	__u32	__resv[3];
 };
 
+struct io_uring_copy_buffers {
+	__u32	src_fd;
+	__u32	pad[7];
+};
+
 struct io_uring_buf {
 	__u64	addr;
 	__u32	len;
diff --git a/io_uring/register.c b/io_uring/register.c
index 57cb85c42526..c8670de33343 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -542,6 +542,12 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
 			break;
 		ret = io_register_clock(ctx, arg);
 		break;
+	case IORING_REGISTER_COPY_BUFFERS:
+		ret = -EINVAL;
+		if (!arg || nr_args != 1)
+			break;
+		ret = io_register_copy_buffers(ctx, arg);
+		break;
 	default:
 		ret = -EINVAL;
 		break;
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 28f98de3c304..457492c6a329 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -1137,3 +1137,87 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
 
 	return 0;
 }
+
+/*
+ * Return with both ctx and src_ctx locked, locking the lowest valued ctx
+ * first to prevent deadlocks for the same operation with ctxs switched.
+ */
+static int lock_src_ctx(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx)
+{
+	if (ctx == src_ctx) {
+		return -EINVAL;
+	} else if (ctx > src_ctx) {
+		mutex_unlock(&ctx->uring_lock);
+		mutex_lock(&src_ctx->uring_lock);
+		mutex_lock(&ctx->uring_lock);
+	} else {
+		mutex_lock(&src_ctx->uring_lock);
+	}
+
+	return 0;
+}
+
+static int io_copy_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx)
+{
+	struct io_rsrc_data *data;
+	int i, ret, nbufs;
+
+	nbufs = src_ctx->nr_user_bufs;
+	if (!nbufs)
+		return -ENXIO;
+	ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, NULL, nbufs, &data);
+	if (ret)
+		return ret;
+	ret = io_buffers_map_alloc(ctx, nbufs);
+	if (ret) {
+		io_rsrc_data_free(data);
+		return ret;
+	}
+
+	for (i = 0; i < nbufs; i++) {
+		struct io_mapped_ubuf *src = src_ctx->user_bufs[i];
+
+		refcount_inc(&src->refs);
+		ctx->user_bufs[i] = src;
+	}
+	ctx->buf_data = data;
+	ctx->nr_user_bufs = nbufs;
+	return 0;
+}
+
+/*
+ * Copy the registered buffers from the source ring whose file descriptor
+ * is given in the src_fd to the current ring. This is identical to registering
+ * the buffers with ctx, except faster as mappings already exist.
+ *
+ * Since the memory is already accounted once, don't account it again.
+ */
+int io_register_copy_buffers(struct io_ring_ctx *ctx, void __user *arg)
+{
+	struct io_uring_copy_buffers buf;
+	struct io_ring_ctx *src_ctx;
+	struct fd f;
+	int ret;
+
+	if (ctx->user_bufs || ctx->nr_user_bufs)
+		return -EBUSY;
+	if (copy_from_user(&buf, arg, sizeof(buf)))
+		return -EFAULT;
+	if (memchr_inv(buf.pad, 0, sizeof(buf.pad)))
+		return -EINVAL;
+
+	f = fdget(buf.src_fd);
+	if (!f.file)
+		return -EBADF;
+	if (!io_is_uring_fops(f.file)) {
+		fdput(f);
+		return -EBADF;
+	}
+
+	src_ctx = f.file->private_data;
+	ret = lock_src_ctx(ctx, src_ctx);
+	if (!ret)
+		ret = io_copy_buffers(ctx, src_ctx);
+	mutex_unlock(&src_ctx->uring_lock);
+	return ret;
+}
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 98a253172c27..93546ab337a6 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -68,6 +68,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
 			   struct io_mapped_ubuf *imu,
 			   u64 buf_addr, size_t len);
 
+int io_register_copy_buffers(struct io_ring_ctx *ctx, void __user *arg);
 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
-- 
2.45.2





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux