On 26/02/2021 11:49, Pavel Begunkov wrote: > Request submission and iopolling might happen from different syscalls, > so iovec backing a request may be already freed by the userspace. > > Catch -EAGAIN passed during submission but through ki_complete, i.e. > io_complete_rw_iopoll(), and try to setup an async context there > similarly as we do in io_complete_rw(). > > Because io_iopoll_req_issued() happens after, just leave it be until > iopoll reaps the request and reissues it, or potentially sees that async > setup failed and post CQE with an error. Let's wait a week with that. Fine for 5.12, but might get racy after merging. > > Cc: <stable@xxxxxxxxxxxxxxx> # 5.9+ > Reported-by: Abaci Robot <abaci@xxxxxxxxxxxxxxxxx> > Reported-by: Xiaoguang Wang <xiaoguang.wang@xxxxxxxxxxxxxxxxx> > Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> > --- > > Jens, that assumption that -EAGAIN comes only when haven't yet gone > async is on you. > > fs/io_uring.c | 18 ++++++++++++++---- > 1 file changed, 14 insertions(+), 4 deletions(-) > > diff --git a/fs/io_uring.c b/fs/io_uring.c > index 5c8e24274acf..9fa8ff227f75 100644 > --- a/fs/io_uring.c > +++ b/fs/io_uring.c > @@ -2610,8 +2610,11 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, > list_del(&req->inflight_entry); > > if (READ_ONCE(req->result) == -EAGAIN) { > + bool reissue = req->async_data || > + !io_op_defs[req->opcode].needs_async_data; > + > req->iopoll_completed = 0; > - if (io_rw_reissue(req)) > + if (reissue && io_rw_reissue(req)) > continue; > } > > @@ -2794,9 +2797,9 @@ static void kiocb_end_write(struct io_kiocb *req) > file_end_write(req->file); > } > > -#ifdef CONFIG_BLOCK > static bool io_resubmit_prep(struct io_kiocb *req) > { > +#ifdef CONFIG_BLOCK > struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; > int rw, ret; > struct iov_iter iter; > @@ -2826,8 +2829,9 @@ static bool io_resubmit_prep(struct io_kiocb *req) > if (ret < 0) > return false; > return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false); > -} > #endif > + return false; > +} > > static bool io_rw_reissue(struct io_kiocb *req) > { > @@ -2892,8 +2896,14 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) > if (kiocb->ki_flags & IOCB_WRITE) > kiocb_end_write(req); > > - if (res != -EAGAIN && res != req->result) > + if (res == -EAGAIN) { > + if (percpu_ref_is_dying(&req->ctx->refs)) > + res = -EFAULT; > + else if (!(req->flags & REQ_F_NOWAIT) && !io_wq_current_is_worker()) > + io_resubmit_prep(req); > + } else if (res != req->result) { > req_set_fail_links(req); > + } > > WRITE_ONCE(req->result, res); > /* order with io_poll_complete() checking ->result */ > -- Pavel Begunkov