req->iopoll() is not necessarily called by a task that submitted a request. Because of that, it's dangerous to grab_env() and punt async on -EGAIN, potentially grabbinf another task's mm and corrupting its memory. Do resubmit from the submitter task context. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- fs/io_uring.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index bb0dfc450db5..595d2bbb31b1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -884,6 +884,8 @@ enum io_mem_account { ACCT_PINNED, }; +static void io_complete_rw_common(struct kiocb *kiocb, long res); +static bool io_rw_reissue(struct io_kiocb *req, long res); static void io_wq_submit_work(struct io_wq_work **workptr); static void io_cqring_fill_event(struct io_kiocb *req, long res); static void io_put_req(struct io_kiocb *req); @@ -1756,8 +1758,11 @@ static void io_iopoll_queue(struct list_head *again) do { req = list_first_entry(again, struct io_kiocb, list); list_del(&req->list); - refcount_inc(&req->refs); - io_queue_async_work(req); + + if (!io_rw_reissue(req, -EAGAIN)) { + io_complete_rw_common(&req->rw.kiocb, -EAGAIN); + io_put_req(req); + } } while (!list_empty(again)); } @@ -1930,6 +1935,8 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, */ if (!(++iters & 7)) { mutex_unlock(&ctx->uring_lock); + if (current->task_works) + task_work_run(); mutex_lock(&ctx->uring_lock); } @@ -2288,6 +2295,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, kiocb->ki_complete = io_complete_rw_iopoll; req->result = 0; req->iopoll_completed = 0; + io_get_req_task(req); } else { if (kiocb->ki_flags & IOCB_HIPRI) return -EINVAL; -- 2.24.0