A synchronous readpage lets us report the actual errno instead of ineffectively setting PageError. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- fs/iomap/buffered-io.c | 64 +++++++++++++++++++++++++----------------- 1 file changed, 38 insertions(+), 26 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 13b56d656337..aec95996bd4b 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -146,9 +146,6 @@ void iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) unsigned long flags; unsigned int i; - if (PageError(page)) - return; - if (!iop) { SetPageUptodate(page); return; @@ -167,32 +164,41 @@ void iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) spin_unlock_irqrestore(&iop->uptodate_lock, flags); } -static void -iomap_read_page_end_io(struct bio_vec *bvec, int error) +struct iomap_sync_end { + blk_status_t status; + struct completion done; +}; + +static void iomap_read_page_end_io(struct bio_vec *bvec, + struct iomap_sync_end *end, bool error) { struct page *page = bvec->bv_page; struct iomap_page *iop = to_iomap_page(page); - if (unlikely(error)) { - ClearPageUptodate(page); - SetPageError(page); - } else { + if (!error) iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); - } - if (!iop || atomic_dec_and_test(&iop->read_count)) - unlock_page(page); + if (!iop || atomic_dec_and_test(&iop->read_count)) { + if (end) + complete(&end->done); + else + unlock_page(page); + } } static void iomap_read_end_io(struct bio *bio) { - int error = blk_status_to_errno(bio->bi_status); + struct iomap_sync_end *end = bio->bi_private; struct bio_vec *bvec; struct bvec_iter_all iter_all; + /* Capture the first error */ + if (end && end->status == BLK_STS_OK) + end->status = bio->bi_status; + bio_for_each_segment_all(bvec, bio, iter_all) - iomap_read_page_end_io(bvec, error); + iomap_read_page_end_io(bvec, end, bio->bi_status != BLK_STS_OK); bio_put(bio); } @@ -201,6 +207,7 @@ struct iomap_readpage_ctx { bool cur_page_in_bio; struct bio *bio; struct readahead_control *rac; + struct iomap_sync_end *end; }; static void @@ -307,6 +314,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ctx->bio->bi_opf |= REQ_RAHEAD; ctx->bio->bi_iter.bi_sector = sector; bio_set_dev(ctx->bio, iomap->bdev); + ctx->bio->bi_private = ctx->end; ctx->bio->bi_end_io = iomap_read_end_io; } @@ -324,22 +332,25 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, int iomap_readpage(struct page *page, const struct iomap_ops *ops) { - struct iomap_readpage_ctx ctx = { .cur_page = page }; + struct iomap_sync_end end; + struct iomap_readpage_ctx ctx = { .cur_page = page, .end = &end, }; struct inode *inode = page->mapping->host; unsigned poff; loff_t ret; trace_iomap_readpage(page->mapping->host, 1); + end.status = BLK_STS_OK; + init_completion(&end.done); + for (poff = 0; poff < PAGE_SIZE; poff += ret) { ret = iomap_apply(inode, page_offset(page) + poff, PAGE_SIZE - poff, 0, ops, &ctx, iomap_readpage_actor); - if (ret <= 0) { - WARN_ON_ONCE(ret == 0); - SetPageError(page); + if (WARN_ON_ONCE(ret == 0)) + ret = -EIO; + if (ret < 0) break; - } } if (ctx.bio) { @@ -347,15 +358,16 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops) WARN_ON_ONCE(!ctx.cur_page_in_bio); } else { WARN_ON_ONCE(ctx.cur_page_in_bio); - unlock_page(page); + complete(&end.done); } - /* - * Just like mpage_readahead and block_read_full_page we always - * return 0 and just mark the page as PageError on errors. This - * should be cleaned up all through the stack eventually. - */ - return 0; + wait_for_completion(&end.done); + if (ret >= 0) + ret = blk_status_to_errno(end.status); + if (ret == 0) + return AOP_UPDATED_PAGE; + unlock_page(page); + return ret; } EXPORT_SYMBOL_GPL(iomap_readpage); -- 2.28.0