Use folios throughout. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- fs/nilfs2/page.c | 60 ++++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index a8e88cc38e16..3267e96c256c 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -294,57 +294,57 @@ int nilfs_copy_dirty_pages(struct address_space *dmap, void nilfs_copy_back_pages(struct address_space *dmap, struct address_space *smap) { - struct pagevec pvec; + struct folio_batch fbatch; unsigned int i, n; - pgoff_t index = 0; + pgoff_t start = 0; - pagevec_init(&pvec); + folio_batch_init(&fbatch); repeat: - n = pagevec_lookup(&pvec, smap, &index); + n = filemap_get_folios(smap, &start, ~0UL, &fbatch); if (!n) return; - for (i = 0; i < pagevec_count(&pvec); i++) { - struct page *page = pvec.pages[i], *dpage; - pgoff_t offset = page->index; - - lock_page(page); - dpage = find_lock_page(dmap, offset); - if (dpage) { - /* overwrite existing page in the destination cache */ - WARN_ON(PageDirty(dpage)); - nilfs_copy_page(dpage, page, 0); - unlock_page(dpage); - put_page(dpage); - /* Do we not need to remove page from smap here? */ + for (i = 0; i < folio_batch_count(&fbatch); i++) { + struct folio *folio = fbatch.folios[i], *dfolio; + pgoff_t index = folio->index; + + folio_lock(folio); + dfolio = filemap_lock_folio(dmap, index); + if (dfolio) { + /* overwrite existing folio in the destination cache */ + WARN_ON(folio_test_dirty(dfolio)); + nilfs_copy_page(&dfolio->page, &folio->page, 0); + folio_unlock(dfolio); + folio_put(dfolio); + /* Do we not need to remove folio from smap here? */ } else { - struct page *p; + struct folio *f; - /* move the page to the destination cache */ + /* move the folio to the destination cache */ xa_lock_irq(&smap->i_pages); - p = __xa_erase(&smap->i_pages, offset); - WARN_ON(page != p); + f = __xa_erase(&smap->i_pages, index); + WARN_ON(folio != f); smap->nrpages--; xa_unlock_irq(&smap->i_pages); xa_lock_irq(&dmap->i_pages); - p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS); - if (unlikely(p)) { + f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS); + if (unlikely(f)) { /* Probably -ENOMEM */ - page->mapping = NULL; - put_page(page); + folio->mapping = NULL; + folio_put(folio); } else { - page->mapping = dmap; + folio->mapping = dmap; dmap->nrpages++; - if (PageDirty(page)) - __xa_set_mark(&dmap->i_pages, offset, + if (folio_test_dirty(folio)) + __xa_set_mark(&dmap->i_pages, index, PAGECACHE_TAG_DIRTY); } xa_unlock_irq(&dmap->i_pages); } - unlock_page(page); + folio_unlock(folio); } - pagevec_release(&pvec); + folio_batch_release(&fbatch); cond_resched(); goto repeat; -- 2.35.1