If REQ_F_POLL_FIRST is set and we haven't polled for this request before, go straight to checking poll status before attempting a data transfer. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- fs/io_uring.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index eb5f77bde98d..3ae18604ed59 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5315,6 +5315,9 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) kmsg = &iomsg; } + if ((req->flags & (REQ_F_POLLED | REQ_F_POLL_FIRST)) == REQ_F_POLL_FIRST) + return io_setup_async_msg(req, kmsg); + flags = req->sr_msg.msg_flags; if (issue_flags & IO_URING_F_NONBLOCK) flags |= MSG_DONTWAIT; @@ -5357,6 +5360,9 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags) int min_ret = 0; int ret; + if ((req->flags & (REQ_F_POLLED | REQ_F_POLL_FIRST)) == REQ_F_POLL_FIRST) + return -EAGAIN; + sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; @@ -5547,6 +5553,9 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) kmsg = &iomsg; } + if ((req->flags & (REQ_F_POLLED | REQ_F_POLL_FIRST)) == REQ_F_POLL_FIRST) + return io_setup_async_msg(req, kmsg); + if (req->flags & REQ_F_BUFFER_SELECT) { kbuf = io_recv_buffer_select(req, issue_flags); if (IS_ERR(kbuf)) @@ -5604,6 +5613,9 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) int ret, min_ret = 0; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + if ((req->flags & (REQ_F_POLLED | REQ_F_POLL_FIRST)) == REQ_F_POLL_FIRST) + return -EAGAIN; + sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; -- 2.35.1