If CONFIG_PREEMPT_NONE is set and the task_work chains are long, we could be running into issues blocking others for too long. Add a reschedule check in handle_tw_list(), and flush the ctx if we need to reschedule. Cc: stable@xxxxxxxxxxxxxxx # 5.10+ Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- io_uring/io_uring.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index bb8a532b051e..bc8845de2829 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1179,10 +1179,16 @@ static unsigned int handle_tw_list(struct llist_node *node, /* if not contended, grab and improve batching */ *locked = mutex_trylock(&(*ctx)->uring_lock); percpu_ref_get(&(*ctx)->refs); - } + } else if (!*locked) + *locked = mutex_trylock(&(*ctx)->uring_lock); req->io_task_work.func(req, locked); node = next; count++; + if (unlikely(need_resched())) { + ctx_flush_and_put(*ctx, locked); + *ctx = NULL; + cond_resched(); + } } return count; -- 2.39.0