After copying a send/recv msg header, fix up all the fields right away instead of delaying it. Keeping it in one place makes it easier to follow. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 365a583033c5..02811c90f711 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4487,6 +4487,10 @@ static int io_setup_async_msg(struct io_kiocb *req, async_msg = req->async_data; req->flags |= REQ_F_NEED_CLEANUP; memcpy(async_msg, kmsg, sizeof(*kmsg)); + async_msg->msg.msg_name = &async_msg->addr; + /* if iov is not set, it uses fast_iov */ + if (!async_msg->iov) + async_msg->msg.msg_iter.iov = async_msg->fast_iov; return -EAGAIN; } @@ -4537,14 +4541,8 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, if (unlikely(!sock)) return ret; - if (req->async_data) { - kmsg = req->async_data; - kmsg->msg.msg_name = &kmsg->addr; - /* if iov is set, it's allocated already */ - if (!kmsg->iov) - kmsg->iov = kmsg->fast_iov; - kmsg->msg.msg_iter.iov = kmsg->iov; - } else { + kmsg = req->async_data; + if (!kmsg) { ret = io_sendmsg_copy_hdr(req, &iomsg); if (ret) return ret; @@ -4563,7 +4561,8 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, if (ret == -ERESTARTSYS) ret = -EINTR; - if (kmsg->iov != kmsg->fast_iov) + /* it's reportedly faster to check for null here */ + if (kmsg->iov) kfree(kmsg->iov); req->flags &= ~REQ_F_NEED_CLEANUP; if (ret < 0) @@ -4765,14 +4764,8 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, if (unlikely(!sock)) return ret; - if (req->async_data) { - kmsg = req->async_data; - kmsg->msg.msg_name = &kmsg->addr; - /* if iov is set, it's allocated already */ - if (!kmsg->iov) - kmsg->iov = kmsg->fast_iov; - kmsg->msg.msg_iter.iov = kmsg->iov; - } else { + kmsg = req->async_data; + if (!kmsg) { ret = io_recvmsg_copy_hdr(req, &iomsg); if (ret) return ret; @@ -4784,7 +4777,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, if (IS_ERR(kbuf)) return PTR_ERR(kbuf); kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr); - iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov, + iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1, req->sr_msg.len); } @@ -4803,7 +4796,8 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, if (req->flags & REQ_F_BUFFER_SELECTED) cflags = io_put_recv_kbuf(req); - if (kmsg->iov != kmsg->fast_iov) + /* it's reportedly faster to check for null here */ + if (kmsg->iov) kfree(kmsg->iov); req->flags &= ~REQ_F_NEED_CLEANUP; if (ret < 0) -- 2.24.0