Hi, Can you queue up this one for 5.10 stable? It has an extra hunk that's needed for 5.10. 5.15/16/17 will be a direct port of the 5.18-rc patch. commit e677edbcabee849bfdd43f1602bccbecf736a646 Author: Jens Axboe <axboe@xxxxxxxxx> Date: Fri Apr 8 11:08:58 2022 -0600 io_uring: fix race between timeout flush and removal commit e677edbcabee849bfdd43f1602bccbecf736a646 upstream. io_flush_timeouts() assumes the timeout isn't in progress of triggering or being removed/canceled, so it unconditionally removes it from the timeout list and attempts to cancel it. Leave it on the list and let the normal timeout cancelation take care of it. Cc: stable@xxxxxxxxxxxxxxx # 5.5+ Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> diff --git a/fs/io_uring.c b/fs/io_uring.c index 5959b0359524..bc6bf17566f8 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1556,6 +1556,7 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx) static void io_flush_timeouts(struct io_ring_ctx *ctx) { + struct io_kiocb *req, *tmp; u32 seq; if (list_empty(&ctx->timeout_list)) @@ -1563,10 +1564,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); - do { + list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { u32 events_needed, events_got; - struct io_kiocb *req = list_first_entry(&ctx->timeout_list, - struct io_kiocb, timeout.list); if (io_is_timeout_noseq(req)) break; @@ -1583,9 +1582,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) if (events_got < events_needed) break; - list_del_init(&req->timeout.list); io_kill_timeout(req, 0); - } while (!list_empty(&ctx->timeout_list)); + } ctx->cq_last_tm_flush = seq; } @@ -5639,6 +5637,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, else data->mode = HRTIMER_MODE_REL; + INIT_LIST_HEAD(&req->timeout.list); hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); return 0; } @@ -6282,12 +6281,12 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) if (!list_empty(&req->link_list)) { prev = list_entry(req->link_list.prev, struct io_kiocb, link_list); - if (refcount_inc_not_zero(&prev->refs)) - list_del_init(&req->link_list); - else + list_del_init(&req->link_list); + if (!refcount_inc_not_zero(&prev->refs)) prev = NULL; } + list_del(&req->timeout.list); spin_unlock_irqrestore(&ctx->completion_lock, flags); if (prev) { -- Jens Axboe