On 08/04/2021 19:28, Pavel Begunkov wrote: > WARNING: at fs/io_uring.c:8578 io_ring_exit_work.cold+0x0/0x18 > > As reissuing is now passed back by REQ_F_REISSUE and kiocb_done() > internally uses __io_complete_rw(), it may stop after setting the flag > so leaving a dangling request. > > There are tricky edge cases, e.g. reading beyound file, boundary, so > the easiest way is to hand code reissue in kiocb_done() as > __io_complete_rw() was doing for us before. fwiw, was using this fixed up version for 5.13 diff --git a/fs/io_uring.c b/fs/io_uring.c index 959df7666d45..a1de599dce55 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2743,6 +2743,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); struct io_async_rw *io = req->async_data; + bool check_reissue = kiocb->ki_complete == io_complete_rw; /* add previously done IO, if any */ if (io && io->bytes_done > 0) { @@ -2758,6 +2759,22 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, __io_complete_rw(req, ret, 0, issue_flags); else io_rw_done(kiocb, ret); + + if (check_reissue && req->flags & REQ_F_REISSUE) { + req->flags &= ~REQ_F_REISSUE; + + if (io_resubmit_prep(req)) { + req_ref_get(req); + io_queue_async_work(req); + } else { + int cflags = 0; + + req_set_fail_links(req); + if (req->flags & REQ_F_BUFFER_SELECTED) + cflags = io_put_rw_kbuf(req); + __io_req_complete(req, issue_flags, ret, cflags); + } + } } static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)