On Mon, Nov 01, 2021 at 08:39:16PM +0000, Matthew Wilcox (Oracle) wrote: > Keep iomap_invalidatepage around as a wrapper for use in address_space > operations. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Looks ok, Reviewed-by: Darrick J. Wong <djwong@xxxxxxxxxx> --D > --- > fs/iomap/buffered-io.c | 20 ++++++++++++-------- > include/linux/iomap.h | 1 + > 2 files changed, 13 insertions(+), 8 deletions(-) > > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c > index a6b64a1ad468..e9a60520e769 100644 > --- a/fs/iomap/buffered-io.c > +++ b/fs/iomap/buffered-io.c > @@ -468,23 +468,27 @@ iomap_releasepage(struct page *page, gfp_t gfp_mask) > } > EXPORT_SYMBOL_GPL(iomap_releasepage); > > -void > -iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) > +void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) > { > - struct folio *folio = page_folio(page); > - > - trace_iomap_invalidatepage(page->mapping->host, offset, len); > + trace_iomap_invalidatepage(folio->mapping->host, offset, len); > > /* > * If we're invalidating the entire page, clear the dirty state from it > * and release it to avoid unnecessary buildup of the LRU. > */ > - if (offset == 0 && len == PAGE_SIZE) { > - WARN_ON_ONCE(PageWriteback(page)); > - cancel_dirty_page(page); > + if (offset == 0 && len == folio_size(folio)) { > + WARN_ON_ONCE(folio_test_writeback(folio)); > + folio_cancel_dirty(folio); > iomap_page_release(folio); > } > } > +EXPORT_SYMBOL_GPL(iomap_invalidate_folio); > + > +void iomap_invalidatepage(struct page *page, unsigned int offset, > + unsigned int len) > +{ > + iomap_invalidate_folio(page_folio(page), offset, len); > +} > EXPORT_SYMBOL_GPL(iomap_invalidatepage); > > #ifdef CONFIG_MIGRATION > diff --git a/include/linux/iomap.h b/include/linux/iomap.h > index 63f4ea4dac9b..91de58ca09fc 100644 > --- a/include/linux/iomap.h > +++ b/include/linux/iomap.h > @@ -225,6 +225,7 @@ void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); > int iomap_is_partially_uptodate(struct page *page, unsigned long from, > unsigned long count); > int iomap_releasepage(struct page *page, gfp_t gfp_mask); > +void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); > void iomap_invalidatepage(struct page *page, unsigned int offset, > unsigned int len); > #ifdef CONFIG_MIGRATION > -- > 2.33.0 >