From: Jens Axboe <axboe@xxxxxxxxx> [ Upstream commit 4464853277d0ccdb9914608dd1332f0fa2f9846f ] Pass in EPOLL_URING_WAKE when signaling eventfd or doing poll related wakups, so that we can check for a circular event dependency between eventfd and epoll. If this flag is set when our wakeup handlers are called, then we know we have a dependency that needs to terminate multishot requests. eventfd and epoll are the only such possible dependencies. Cc: stable@xxxxxxxxxxxxxxx # 6.0 Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> --- io_uring/io_uring.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 9a01188ff45a..d855e668f37c 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1629,13 +1629,15 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) * wake as many waiters as we need to. */ if (wq_has_sleeper(&ctx->cq_wait)) - wake_up_all(&ctx->cq_wait); + __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait)) wake_up(&ctx->sq_data->wait); if (io_should_trigger_evfd(ctx)) - eventfd_signal(ctx->cq_ev_fd, 1); + eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE); if (waitqueue_active(&ctx->poll_wait)) - wake_up_interruptible(&ctx->poll_wait); + __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); } static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) @@ -1645,12 +1647,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) if (ctx->flags & IORING_SETUP_SQPOLL) { if (waitqueue_active(&ctx->cq_wait)) - wake_up_all(&ctx->cq_wait); + __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); } if (io_should_trigger_evfd(ctx)) - eventfd_signal(ctx->cq_ev_fd, 1); + eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE); if (waitqueue_active(&ctx->poll_wait)) - wake_up_interruptible(&ctx->poll_wait); + __wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0, + poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); } /* Returns true if there are no backlogged entries after the flush */ @@ -5636,8 +5640,17 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, if (mask && !(mask & poll->events)) return 0; - if (io_poll_get_ownership(req)) + if (io_poll_get_ownership(req)) { + /* + * If we trigger a multishot poll off our own wakeup path, + * disable multishot as there is a circular dependency between + * CQ posting and triggering the event. + */ + if (mask & EPOLL_URING_WAKE) + poll->events |= EPOLLONESHOT; + __io_poll_execute(req, mask); + } return 1; } -- 2.39.0