On 2/18/25 22:42, Keith Busch wrote:
From: Keith Busch <kbusch@xxxxxxxxxx>
Similar to the fixed file path, requests may depend on a previous one
to set up an index, so we need to allow linking them. The prep callback
happens too soon for linked commands, so the lookup needs to be deferred
to the issue path. Change the prep callbacks to just set the buf_index
and let generic io_uring code handle the fixed buffer node setup, just
like it already does for fixed files.
Signed-off-by: Keith Busch <kbusch@xxxxxxxxxx>
It wasn't great before, and it'd be harder to follow if we shove it
into the issue path like that. Add additional overhead in the common
path and that it's not super flexible, like the notification problem
and what we need out of it for other features.
We're better to remove the lookup vs import split like below.
Here is a branch, let's do it on top.
https://github.com/isilence/linux.git regbuf-import
diff --git a/io_uring/net.c b/io_uring/net.c
index ce0a39972cce..322cf023233a 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1360,24 +1360,10 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
int ret;
if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
- struct io_ring_ctx *ctx = req->ctx;
- struct io_rsrc_node *node;
-
- ret = -EFAULT;
- io_ring_submit_lock(ctx, issue_flags);
- node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
- if (node) {
- io_req_assign_buf_node(sr->notif, node);
- ret = 0;
- }
- io_ring_submit_unlock(ctx, issue_flags);
-
- if (unlikely(ret))
- return ret;
-
- ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter,
- node->buf, (u64)(uintptr_t)sr->buf,
- sr->len);
+ sr->notif->buf_index = req->buf_index;
+ ret = io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
+ (u64)(uintptr_t)sr->buf, sr->len,
+ ITER_SOURCE, issue_flags);
if (unlikely(ret))
return ret;
kmsg->msg.sg_from_iter = io_sg_from_iter;
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index af39b69eb4fd..a4557ed14bea 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -860,9 +860,9 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
return ret;
}
-int io_import_fixed(int ddir, struct iov_iter *iter,
- struct io_mapped_ubuf *imu,
- u64 buf_addr, size_t len)
+static int io_import_fixed_imu(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ u64 buf_addr, size_t len)
{
u64 buf_end;
size_t offset;
@@ -919,6 +919,35 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
return 0;
}
+static inline struct io_rsrc_node *io_find_buf_node(struct io_kiocb *req,
+ unsigned issue_flags)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_rsrc_node *node;
+
+ if (req->buf_node)
+ return req->buf_node;
+
+ io_ring_submit_lock(ctx, issue_flags);
+ node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
+ if (node)
+ io_req_assign_buf_node(req, node);
+ io_ring_submit_unlock(ctx, issue_flags);
+ return node;
+}
+
+int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter,
+ u64 buf_addr, size_t len, int ddir,
+ unsigned issue_flags)
+{
+ struct io_rsrc_node *node;
+
+ node = io_find_buf_node(req, issue_flags);
+ if (!node)
+ return -EFAULT;
+ return io_import_fixed_imu(ddir, iter, node->buf, buf_addr, len);
+}
+
/* Lock two rings at once. The rings must be different! */
static void lock_two_rings(struct io_ring_ctx *ctx1, struct io_ring_ctx *ctx2)
{
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index a6d883c62b22..ce199eb0ac9f 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -50,9 +50,9 @@ void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node);
void io_rsrc_data_free(struct io_ring_ctx *ctx, struct io_rsrc_data *data);
int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr);
-int io_import_fixed(int ddir, struct iov_iter *iter,
- struct io_mapped_ubuf *imu,
- u64 buf_addr, size_t len);
+int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter,
+ u64 buf_addr, size_t len, int ddir,
+ unsigned issue_flags);
int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg);
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 16f12f94943f..31a1f15f5f01 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -354,8 +354,6 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
int ddir)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
- struct io_ring_ctx *ctx = req->ctx;
- struct io_rsrc_node *node;
struct io_async_rw *io;
int ret;
@@ -363,13 +361,8 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
if (unlikely(ret))
return ret;
- node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
- if (!node)
- return -EFAULT;
- io_req_assign_buf_node(req, node);
-
io = req->async_data;
- ret = io_import_fixed(ddir, &io->iter, node->buf, rw->addr, rw->len);
+ ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir, 0);
iov_iter_save_state(&io->iter, &io->iter_state);
return ret;
}
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index cf5e9822e49a..80046e755211 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -199,21 +199,9 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (ioucmd->flags & ~IORING_URING_CMD_MASK)
return -EINVAL;
- if (ioucmd->flags & IORING_URING_CMD_FIXED) {
- struct io_ring_ctx *ctx = req->ctx;
- struct io_rsrc_node *node;
- u16 index = READ_ONCE(sqe->buf_index);
-
- node = io_rsrc_node_lookup(&ctx->buf_table, index);
- if (unlikely(!node))
- return -EFAULT;
- /*
- * Pi node upfront, prior to io_uring_cmd_import_fixed()
- * being called. This prevents destruction of the mapped buffer
- * we'll need at actual import time.
- */
- io_req_assign_buf_node(req, node);
- }
+ if (ioucmd->flags & IORING_URING_CMD_FIXED)
+ req->buf_index = READ_ONCE(sqe->buf_index);
+
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
return io_uring_cmd_prep_setup(req, sqe);
@@ -261,13 +249,8 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
unsigned int issue_flags)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
- struct io_rsrc_node *node = req->buf_node;
-
- /* Must have had rsrc_node assigned at prep time */
- if (node)
- return io_import_fixed(rw, iter, node->buf, ubuf, len);
- return -EFAULT;
+ return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
--
Pavel Begunkov