On 8/17/20 3:44 AM, gregkh@xxxxxxxxxxxxxxxxxxx wrote: > > The patch below does not apply to the 5.8-stable tree. > If someone wants it applied there, or to any other stable or longterm > tree, then please email the backport, including the original git commit > id to <stable@xxxxxxxxxxxxxxx>. Here's a 5.8 version. -- Jens Axboe
>From 4805555b82f6eb78903ee2841ebae13707d9da13 Mon Sep 17 00:00:00 2001 From: Jens Axboe <axboe@xxxxxxxxx> Date: Tue, 11 Aug 2020 08:04:14 -0600 Subject: [PATCH] io_uring: hold 'ctx' reference around task_work queue + execute We're holding the request reference, but we need to go one higher to ensure that the ctx remains valid after the request has finished. If the ring is closed with pending task_work inflight, and the given io_kiocb finishes sync during issue, then we need a reference to the ring itself around the task_work execution cycle. Cc: stable@xxxxxxxxxxxxxxx # v5.7+ Reported-by: syzbot+9b260fc33297966f5a8e@xxxxxxxxxxxxxxxxxxxxxxxxx Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- fs/io_uring.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index 3da73e58759e..c7aefd3da135 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4141,6 +4141,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, tsk = req->task; req->result = mask; init_task_work(&req->task_work, func); + percpu_ref_get(&req->ctx->refs); + /* * If this fails, then the task is exiting. When a task exits, the * work gets canceled, so just cancel this request as well instead @@ -4225,6 +4227,7 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) static void io_poll_task_func(struct callback_head *cb) { struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *nxt = NULL; io_poll_task_handler(req, &nxt); @@ -4235,6 +4238,7 @@ static void io_poll_task_func(struct callback_head *cb) __io_queue_sqe(nxt, NULL); mutex_unlock(&ctx->uring_lock); } + percpu_ref_put(&ctx->refs); } static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, @@ -4349,6 +4353,7 @@ static void io_async_task_func(struct callback_head *cb) if (io_poll_rewait(req, &apoll->poll)) { spin_unlock_irq(&ctx->completion_lock); + percpu_ref_put(&ctx->refs); return; } @@ -4387,6 +4392,7 @@ static void io_async_task_func(struct callback_head *cb) req_set_fail_links(req); io_double_put_req(req); } + percpu_ref_put(&ctx->refs); } static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync, -- 2.28.0