Some of these code paths assume that any force_nonblock == true issue is not prepped, but that's not true if we did prep as part of link setup earlier. Check if we already have an async context allocate before setting up a new one. Additionally, the io_req_map_io() function is JUST for reads and writes, not for generic use. Move it into read/write prep path, and rename it to io_req_map_rw() to make this clear. Ditto for io_setup_async_io(), which then becomes io_setup_async_rw(). Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- diff --git a/fs/io_uring.c b/fs/io_uring.c index 0e01cdc8a120..476825606204 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1701,7 +1701,7 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, return ret; } -static void io_req_map_io(struct io_kiocb *req, ssize_t io_size, +static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size, struct iovec *iovec, struct iovec *fast_iov, struct iov_iter *iter) { @@ -1715,13 +1715,16 @@ static void io_req_map_io(struct io_kiocb *req, ssize_t io_size, } } -static int io_setup_async_io(struct io_kiocb *req, ssize_t io_size, +static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size, struct iovec *iovec, struct iovec *fast_iov, struct iov_iter *iter) { + if (req->io) + return 0; + req->io = kmalloc(sizeof(*req->io), GFP_KERNEL); if (req->io) { - io_req_map_io(req, io_size, iovec, fast_iov, iter); + io_req_map_rw(req, io_size, iovec, fast_iov, iter); memcpy(&req->io->sqe, req->sqe, sizeof(req->io->sqe)); req->sqe = &req->io->sqe; return 0; @@ -1806,7 +1809,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt, kiocb_done(kiocb, ret2, nxt, req->in_async); } else { copy_iov: - ret = io_setup_async_io(req, io_size, iovec, + ret = io_setup_async_rw(req, io_size, iovec, inline_vecs, &iter); if (ret) goto out_free; @@ -1900,7 +1903,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt, kiocb_done(kiocb, ret2, nxt, req->in_async); } else { copy_iov: - ret = io_setup_async_io(req, io_size, iovec, + ret = io_setup_async_rw(req, io_size, iovec, inline_vecs, &iter); if (ret) goto out_free; @@ -2077,6 +2080,8 @@ static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); if (force_nonblock && ret == -EAGAIN) { + if (req->io) + return -EAGAIN; copy = kmalloc(sizeof(*copy), GFP_KERNEL); if (!copy) { ret = -ENOMEM; @@ -2165,6 +2170,8 @@ static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = __sys_recvmsg_sock(sock, &kmsg->msg, msg, kmsg->uaddr, flags); if (force_nonblock && ret == -EAGAIN) { + if (req->io) + return -EAGAIN; copy = kmalloc(sizeof(*copy), GFP_KERNEL); if (!copy) { ret = -ENOMEM; @@ -2272,6 +2279,8 @@ static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = __sys_connect_file(req->file, &io->connect.address, addr_len, file_flags); if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) { + if (req->io) + return -EAGAIN; io = kmalloc(sizeof(*io), GFP_KERNEL); if (!io) { ret = -ENOMEM; @@ -2871,10 +2880,14 @@ static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io) case IORING_OP_READV: case IORING_OP_READ_FIXED: ret = io_read_prep(req, &iovec, &iter, true); + if (!ret) + io_req_map_rw(req, ret, iovec, inline_vecs, &iter); break; case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: ret = io_write_prep(req, &iovec, &iter, true); + if (!ret) + io_req_map_rw(req, ret, iovec, inline_vecs, &iter); break; case IORING_OP_SENDMSG: ret = io_sendmsg_prep(req, io); @@ -2894,12 +2907,10 @@ static int io_req_defer_prep(struct io_kiocb *req, struct io_async_ctx *io) return 0; } - if (ret < 0) - return ret; + if (!ret) + req->io = io; - req->io = io; - io_req_map_io(req, ret, iovec, inline_vecs, &iter); - return 0; + return ret; } static int io_req_defer(struct io_kiocb *req) -- Jens Axboe