io_send_zc() guards its call to io_send_zc_import() with if (!done_io) in an attempt to avoid calling it redundantly on the same req. However, if the initial non-blocking issue returns -EAGAIN, done_io will stay 0. This causes the subsequent issue to unnecessarily re-import the buffer. Add an explicit flag "imported" to io_sr_msg to track if its buffer has already been imported. Clear the flag in io_send_zc_prep(). Call io_send_zc_import() and set the flag in io_send_zc() if it is unset. Signed-off-by: Caleb Sander Mateos <csander@xxxxxxxxxxxxxxx> Fixes: 54cdcca05abd ("io_uring/net: switch io_send() and io_send_zc() to using io_async_msghdr") --- io_uring/net.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/io_uring/net.c b/io_uring/net.c index 6d13d378358b..a29893d567b8 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -74,10 +74,11 @@ struct io_sr_msg { unsigned nr_multishot_loops; u16 flags; /* initialised and used only by !msg send variants */ u16 buf_group; bool retry; + bool imported; /* only for io_send_zc */ void __user *msg_control; /* used only for send zerocopy */ struct io_kiocb *notif; }; @@ -1222,10 +1223,11 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *notif; zc->done_io = 0; zc->retry = false; + zc->imported = false; req->flags |= REQ_F_POLL_NO_LAZY; if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))) return -EINVAL; /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */ @@ -1369,11 +1371,12 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) if (!(req->flags & REQ_F_POLLED) && (zc->flags & IORING_RECVSEND_POLL_FIRST)) return -EAGAIN; - if (!zc->done_io) { + if (!zc->imported) { + zc->imported = true; ret = io_send_zc_import(req, issue_flags); if (unlikely(ret)) return ret; } -- 2.45.2