If an iocb is split into multiple bios we can't poll for both. So don't bother to even try to poll in that case. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- fs/block_dev.c | 37 ++++++++++++++----------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/fs/block_dev.c b/fs/block_dev.c index 363015fcffdb..ea5b4617ff86 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -373,7 +373,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, struct blk_plug plug; struct blkdev_dio *dio; struct bio *bio; - bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; + bool is_poll = (iocb->ki_flags & IOCB_HIPRI), do_poll = false; bool is_read = (iov_iter_rw(iter) == READ), is_sync; loff_t pos = iocb->ki_pos; blk_qc_t qc = BLK_QC_T_NONE; @@ -433,22 +433,9 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, pos += bio->bi_iter.bi_size; nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); - if (!nr_pages) { - bool polled = false; - - if (iocb->ki_flags & IOCB_HIPRI) { - bio_set_polled(bio, iocb); - polled = true; - } - - qc = submit_bio(bio); - - if (polled) - WRITE_ONCE(iocb->ki_cookie, qc); - break; - } - - if (!dio->multi_bio) { + if (dio->multi_bio) { + atomic_inc(&dio->ref); + } else if (nr_pages) { /* * AIO needs an extra reference to ensure the dio * structure which is embedded into the first bio @@ -458,11 +445,16 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bio_get(bio); dio->multi_bio = true; atomic_set(&dio->ref, 2); - } else { - atomic_inc(&dio->ref); + } else if (is_poll) { + bio_set_polled(bio, iocb); + do_poll = true; + } + qc = submit_bio(bio); + if (!nr_pages) { + if (do_poll) + WRITE_ONCE(iocb->ki_cookie, qc); + break; } - - submit_bio(bio); bio = bio_alloc(GFP_KERNEL, nr_pages); } @@ -477,8 +469,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (!READ_ONCE(dio->waiter)) break; - if (!(iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(bdev_get_queue(bdev), qc, true)) + if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); -- 2.30.1