Add a folio_unmap_invalidate() helper, which unmaps and invalidates a given folio. The caller must already have locked the folio. Use this new helper in invalidate_inode_pages2_range(), rather than duplicate the code there. In preparation for using this elsewhere as well, have it take a gfp_t mask rather than assume GFP_KERNEL is the right choice. This bubbles back to invalidate_complete_folio2() as well. Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- include/linux/pagemap.h | 2 ++ mm/truncate.c | 33 ++++++++++++++++++++------------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 8afacb7520d4..d55bf995bd9e 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -34,6 +34,8 @@ int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); int filemap_invalidate_pages(struct address_space *mapping, loff_t pos, loff_t end, bool nowait); +int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio, + gfp_t gfp); int write_inode_now(struct inode *, int sync); int filemap_fdatawrite(struct address_space *); diff --git a/mm/truncate.c b/mm/truncate.c index 0668cd340a46..5663c3f1d548 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -547,12 +547,12 @@ EXPORT_SYMBOL(invalidate_mapping_pages); * sitting in the folio_add_lru() caches. */ static int invalidate_complete_folio2(struct address_space *mapping, - struct folio *folio) + struct folio *folio, gfp_t gfp_mask) { if (folio->mapping != mapping) return 0; - if (!filemap_release_folio(folio, GFP_KERNEL)) + if (!filemap_release_folio(folio, gfp_mask)) return 0; spin_lock(&mapping->host->i_lock); @@ -584,6 +584,23 @@ static int folio_launder(struct address_space *mapping, struct folio *folio) return mapping->a_ops->launder_folio(folio); } +int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio, + gfp_t gfp) +{ + int ret; + + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + + if (folio_mapped(folio)) + unmap_mapping_folio(folio); + BUG_ON(folio_mapped(folio)); + + ret = folio_launder(mapping, folio); + if (!ret && !invalidate_complete_folio2(mapping, folio, gfp)) + return -EBUSY; + return ret; +} + /** * invalidate_inode_pages2_range - remove range of pages from an address_space * @mapping: the address_space @@ -641,18 +658,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping, folio_unlock(folio); continue; } - VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio); folio_wait_writeback(folio); - - if (folio_mapped(folio)) - unmap_mapping_folio(folio); - BUG_ON(folio_mapped(folio)); - - ret2 = folio_launder(mapping, folio); - if (ret2 == 0) { - if (!invalidate_complete_folio2(mapping, folio)) - ret2 = -EBUSY; - } + ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL); if (ret2 < 0) ret = ret2; folio_unlock(folio); -- 2.45.2