This is a note to let you know that I've just added the patch titled io_uring/net: move receive multishot out of the generic msghdr path to the 6.1-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: io_uring-net-move-receive-multishot-out-of-the-gener.patch and it can be found in the queue-6.1 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit a2cd534b7a8e57e4769f8a4b7ac1b162c01884df Author: Jens Axboe <axboe@xxxxxxxxx> Date: Tue Feb 27 11:09:20 2024 -0700 io_uring/net: move receive multishot out of the generic msghdr path [ Upstream commit c55978024d123d43808ab393a0a4ce3ce8568150 ] Move the actual user_msghdr / compat_msghdr into the send and receive sides, respectively, so we can move the uaddr receive handling into its own handler, and ditto the multishot with buffer selection logic. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> Stable-dep-of: 8ede3db5061b ("io_uring/net: fix overflow check in io_recvmsg_mshot_prep()") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/io_uring/net.c b/io_uring/net.c index c770961079749..b273914ed99f0 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -183,46 +183,26 @@ static int io_setup_async_msg(struct io_kiocb *req, return -EAGAIN; } -static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg) -{ - int hdr; - - if (iomsg->namelen < 0) - return true; - if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), - iomsg->namelen, &hdr)) - return true; - if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr)) - return true; - - return false; -} - #ifdef CONFIG_COMPAT -static int __io_compat_msg_copy_hdr(struct io_kiocb *req, - struct io_async_msghdr *iomsg, - struct sockaddr __user **addr, int ddir) +static int io_compat_msg_copy_hdr(struct io_kiocb *req, + struct io_async_msghdr *iomsg, + struct compat_msghdr *msg, int ddir) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); - struct compat_msghdr msg; struct compat_iovec __user *uiov; int ret; - if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg))) + if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg))) return -EFAULT; - ret = __get_compat_msghdr(&iomsg->msg, &msg, addr); - if (ret) - return ret; - - uiov = compat_ptr(msg.msg_iov); + uiov = compat_ptr(msg->msg_iov); if (req->flags & REQ_F_BUFFER_SELECT) { compat_ssize_t clen; iomsg->free_iov = NULL; - if (msg.msg_iovlen == 0) { + if (msg->msg_iovlen == 0) { sr->len = 0; - } else if (msg.msg_iovlen > 1) { + } else if (msg->msg_iovlen > 1) { return -EINVAL; } else { if (!access_ok(uiov, sizeof(*uiov))) @@ -234,18 +214,11 @@ static int __io_compat_msg_copy_hdr(struct io_kiocb *req, sr->len = clen; } - if (ddir == ITER_DEST && req->flags & REQ_F_APOLL_MULTISHOT) { - iomsg->namelen = msg.msg_namelen; - iomsg->controllen = msg.msg_controllen; - if (io_recvmsg_multishot_overflow(iomsg)) - return -EOVERFLOW; - } - return 0; } iomsg->free_iov = iomsg->fast_iov; - ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg.msg_iovlen, + ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen, UIO_FASTIOV, &iomsg->free_iov, &iomsg->msg.msg_iter, true); if (unlikely(ret < 0)) @@ -255,47 +228,35 @@ static int __io_compat_msg_copy_hdr(struct io_kiocb *req, } #endif -static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, - struct sockaddr __user **addr, int ddir) +static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, + struct user_msghdr *msg, int ddir) { struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); - struct user_msghdr msg; int ret; - if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg))) + if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg))) return -EFAULT; - ret = __copy_msghdr(&iomsg->msg, &msg, addr); - if (ret) - return ret; - if (req->flags & REQ_F_BUFFER_SELECT) { - if (msg.msg_iovlen == 0) { + if (msg->msg_iovlen == 0) { sr->len = iomsg->fast_iov[0].iov_len = 0; iomsg->fast_iov[0].iov_base = NULL; iomsg->free_iov = NULL; - } else if (msg.msg_iovlen > 1) { + } else if (msg->msg_iovlen > 1) { return -EINVAL; } else { - if (copy_from_user(iomsg->fast_iov, msg.msg_iov, - sizeof(*msg.msg_iov))) + if (copy_from_user(iomsg->fast_iov, msg->msg_iov, + sizeof(*msg->msg_iov))) return -EFAULT; sr->len = iomsg->fast_iov[0].iov_len; iomsg->free_iov = NULL; } - if (ddir == ITER_DEST && req->flags & REQ_F_APOLL_MULTISHOT) { - iomsg->namelen = msg.msg_namelen; - iomsg->controllen = msg.msg_controllen; - if (io_recvmsg_multishot_overflow(iomsg)) - return -EOVERFLOW; - } - return 0; } iomsg->free_iov = iomsg->fast_iov; - ret = __import_iovec(ddir, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV, + ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV, &iomsg->free_iov, &iomsg->msg.msg_iter, false); if (unlikely(ret < 0)) return ret; @@ -303,30 +264,34 @@ static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg return 0; } -static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg, - struct sockaddr __user **addr, int ddir) +static int io_sendmsg_copy_hdr(struct io_kiocb *req, + struct io_async_msghdr *iomsg) { + struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); + struct user_msghdr msg; + int ret; + iomsg->msg.msg_name = &iomsg->addr; iomsg->msg.msg_iter.nr_segs = 0; #ifdef CONFIG_COMPAT - if (req->ctx->compat) - return __io_compat_msg_copy_hdr(req, iomsg, addr, ddir); -#endif + if (unlikely(req->ctx->compat)) { + struct compat_msghdr cmsg; - return __io_msg_copy_hdr(req, iomsg, addr, ddir); -} + ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE); + if (unlikely(ret)) + return ret; -static int io_sendmsg_copy_hdr(struct io_kiocb *req, - struct io_async_msghdr *iomsg) -{ - struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); - int ret; + return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL); + } +#endif - ret = io_msg_copy_hdr(req, iomsg, NULL, ITER_SOURCE); - if (ret) + ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE); + if (unlikely(ret)) return ret; + ret = __copy_msghdr(&iomsg->msg, &msg, NULL); + /* save msg_control as sys_sendmsg() overwrites it */ sr->msg_control = iomsg->msg.msg_control_user; return ret; @@ -549,10 +514,66 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) return IOU_OK; } +static int io_recvmsg_mshot_prep(struct io_kiocb *req, + struct io_async_msghdr *iomsg, + size_t namelen, size_t controllen) +{ + if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) == + (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) { + int hdr; + + if (unlikely(namelen < 0)) + return -EOVERFLOW; + if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out), + namelen, &hdr)) + return -EOVERFLOW; + if (check_add_overflow(hdr, (int)controllen, &hdr)) + return -EOVERFLOW; + + iomsg->namelen = namelen; + iomsg->controllen = controllen; + return 0; + } + + return 0; +} + static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { - return io_msg_copy_hdr(req, iomsg, &iomsg->uaddr, ITER_DEST); + struct user_msghdr msg; + int ret; + + iomsg->msg.msg_name = &iomsg->addr; + iomsg->msg.msg_iter.nr_segs = 0; + +#ifdef CONFIG_COMPAT + if (unlikely(req->ctx->compat)) { + struct compat_msghdr cmsg; + + ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST); + if (unlikely(ret)) + return ret; + + ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr); + if (unlikely(ret)) + return ret; + + return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen, + cmsg.msg_controllen); + } +#endif + + ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST); + if (unlikely(ret)) + return ret; + + ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr); + if (unlikely(ret)) + return ret; + + return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen, + msg.msg_controllen); } int io_recvmsg_prep_async(struct io_kiocb *req)