From: Zhang Yi <yi.zhang@xxxxxxxxxx> Now we allocate ifs if i_blocks_per_folio is larger than one when writing back dirty folios in iomap_writepage_map(), so we don't attach an ifs after buffer write to an entire folio until it starts writing back, if we partial truncate that folio, iomap_invalidate_folio() can't clear counterpart block's dirty bit as expected. Fix this by advance the ifs allocation to __iomap_write_begin(). Signed-off-by: Zhang Yi <yi.zhang@xxxxxxxxxx> --- fs/iomap/buffered-io.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 64c4808fab31..ec17bf8d62e9 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -686,6 +686,12 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, size_t from = offset_in_folio(folio, pos), to = from + len; size_t poff, plen; + if (nr_blocks > 1) { + ifs = ifs_alloc(iter->inode, folio, iter->flags); + if ((iter->flags & IOMAP_NOWAIT) && !ifs) + return -EAGAIN; + } + /* * If the write or zeroing completely overlaps the current folio, then * entire folio will be dirtied so there is no need for @@ -697,10 +703,6 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, pos + len >= folio_pos(folio) + folio_size(folio)) return 0; - ifs = ifs_alloc(iter->inode, folio, iter->flags); - if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1) - return -EAGAIN; - if (folio_test_uptodate(folio)) return 0; folio_clear_error(folio); @@ -1913,7 +1915,12 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, WARN_ON_ONCE(end_pos <= pos); if (i_blocks_per_folio(inode, folio) > 1) { - if (!ifs) { + /* + * This should not happen since we always allocate ifs in + * iomap_folio_mkwrite_iter() and there is more than one + * blocks per folio in __iomap_write_begin(). + */ + if (WARN_ON_ONCE(!ifs)) { ifs = ifs_alloc(inode, folio, 0); iomap_set_range_dirty(folio, 0, end_pos - pos); } -- 2.39.2