shmem_swapin_page() only brings in order-0 pages, which are folios by definition. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- arch/arm64/include/asm/pgtable.h | 6 +- include/linux/pgtable.h | 2 +- mm/shmem.c | 110 ++++++++++++++----------------- 3 files changed, 55 insertions(+), 63 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index dff2b483ea50..27cb6a355fb0 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -964,10 +964,10 @@ static inline void arch_swap_invalidate_area(int type) } #define __HAVE_ARCH_SWAP_RESTORE -static inline void arch_swap_restore(swp_entry_t entry, struct page *page) +static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) { - if (system_supports_mte() && mte_restore_tags(entry, page)) - set_bit(PG_mte_tagged, &page->flags); + if (system_supports_mte() && mte_restore_tags(entry, &folio->page)) + set_bit(PG_mte_tagged, &folio->flags); } #endif /* CONFIG_ARM64_MTE */ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index f4f4077b97aa..a1c44b015463 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -738,7 +738,7 @@ static inline void arch_swap_invalidate_area(int type) #endif #ifndef __HAVE_ARCH_SWAP_RESTORE -static inline void arch_swap_restore(swp_entry_t entry, struct page *page) +static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) { } #endif diff --git a/mm/shmem.c b/mm/shmem.c index 236641c346e8..b0bdfb8d4c15 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -134,8 +134,8 @@ static unsigned long shmem_default_max_inodes(void) } #endif -static int shmem_swapin_page(struct inode *inode, pgoff_t index, - struct page **pagep, enum sgp_type sgp, +static int shmem_swapin_folio(struct inode *inode, pgoff_t index, + struct folio **foliop, enum sgp_type sgp, gfp_t gfp, struct vm_area_struct *vma, vm_fault_t *fault_type); static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, @@ -1158,69 +1158,64 @@ static void shmem_evict_inode(struct inode *inode) } static int shmem_find_swap_entries(struct address_space *mapping, - pgoff_t start, unsigned int nr_entries, - struct page **entries, pgoff_t *indices, - unsigned int type) + pgoff_t start, struct folio_batch *fbatch, + pgoff_t *indices, unsigned int type) { XA_STATE(xas, &mapping->i_pages, start); - struct page *page; + struct folio *folio; swp_entry_t entry; unsigned int ret = 0; - if (!nr_entries) - return 0; - rcu_read_lock(); - xas_for_each(&xas, page, ULONG_MAX) { - if (xas_retry(&xas, page)) + xas_for_each(&xas, folio, ULONG_MAX) { + if (xas_retry(&xas, folio)) continue; - if (!xa_is_value(page)) + if (!xa_is_value(folio)) continue; - entry = radix_to_swp_entry(page); + entry = radix_to_swp_entry(folio); if (swp_type(entry) != type) continue; indices[ret] = xas.xa_index; - entries[ret] = page; + if (!folio_batch_add(fbatch, folio)) + break; if (need_resched()) { xas_pause(&xas); cond_resched_rcu(); } - if (++ret == nr_entries) - break; } rcu_read_unlock(); - return ret; + return xas.xa_index; } /* * Move the swapped pages for an inode to page cache. Returns the count * of pages swapped in, or the error in case of failure. */ -static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec, - pgoff_t *indices) +static int shmem_unuse_swap_entries(struct inode *inode, + struct folio_batch *fbatch, pgoff_t *indices) { int i = 0; int ret = 0; int error = 0; struct address_space *mapping = inode->i_mapping; - for (i = 0; i < pvec.nr; i++) { - struct page *page = pvec.pages[i]; + for (i = 0; i < folio_batch_count(fbatch); i++) { + struct folio *folio = fbatch->folios[i]; - if (!xa_is_value(page)) + if (!xa_is_value(folio)) continue; - error = shmem_swapin_page(inode, indices[i], - &page, SGP_CACHE, + error = shmem_swapin_folio(inode, indices[i], + &folio, SGP_CACHE, mapping_gfp_mask(mapping), NULL, NULL); if (error == 0) { - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); ret++; } if (error == -ENOMEM) @@ -1237,26 +1232,23 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type) { struct address_space *mapping = inode->i_mapping; pgoff_t start = 0; - struct pagevec pvec; + struct folio_batch fbatch; pgoff_t indices[PAGEVEC_SIZE]; int ret = 0; - pagevec_init(&pvec); do { - unsigned int nr_entries = PAGEVEC_SIZE; - - pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, - pvec.pages, indices, type); - if (pvec.nr == 0) { + folio_batch_init(&fbatch); + shmem_find_swap_entries(mapping, start, &fbatch, indices, type); + if (folio_batch_count(&fbatch) == 0) { ret = 0; break; } - ret = shmem_unuse_swap_entries(inode, pvec, indices); + ret = shmem_unuse_swap_entries(inode, &fbatch, indices); if (ret < 0) break; - start = indices[pvec.nr - 1]; + start = indices[folio_batch_count(&fbatch) - 1]; } while (true); return ret; @@ -1686,22 +1678,22 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, * Returns 0 and the page in pagep if success. On failure, returns the * error code and NULL in *pagep. */ -static int shmem_swapin_page(struct inode *inode, pgoff_t index, - struct page **pagep, enum sgp_type sgp, +static int shmem_swapin_folio(struct inode *inode, pgoff_t index, + struct folio **foliop, enum sgp_type sgp, gfp_t gfp, struct vm_area_struct *vma, vm_fault_t *fault_type) { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; - struct page *page = NULL; - struct folio *folio; + struct page *page; + struct folio *folio = NULL; swp_entry_t swap; int error; - VM_BUG_ON(!*pagep || !xa_is_value(*pagep)); - swap = radix_to_swp_entry(*pagep); - *pagep = NULL; + VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); + swap = radix_to_swp_entry(*foliop); + *foliop = NULL; /* Look it up and read it in.. */ page = lookup_swap_cache(swap, NULL, 0); @@ -1719,27 +1711,28 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, goto failed; } } + folio = page_folio(page); /* We have to do this with page locked to prevent races */ - lock_page(page); - if (!PageSwapCache(page) || page_private(page) != swap.val || + folio_lock(folio); + if (!folio_test_swapcache(folio) || + folio_swap_entry(folio).val != swap.val || !shmem_confirm_swap(mapping, index, swap)) { error = -EEXIST; goto unlock; } - if (!PageUptodate(page)) { + if (!folio_test_uptodate(folio)) { error = -EIO; goto failed; } - wait_on_page_writeback(page); + folio_wait_writeback(folio); /* * Some architectures may have to restore extra metadata to the - * physical page after reading from swap. + * folio after reading from swap. */ - arch_swap_restore(swap, page); + arch_swap_restore(swap, folio); - folio = page_folio(page); if (shmem_should_replace_folio(folio, gfp)) { error = shmem_replace_page(&page, gfp, info, index); if (error) @@ -1758,21 +1751,21 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, spin_unlock_irq(&info->lock); if (sgp == SGP_WRITE) - mark_page_accessed(page); + folio_mark_accessed(folio); - delete_from_swap_cache(page); - set_page_dirty(page); + delete_from_swap_cache(&folio->page); + folio_mark_dirty(folio); swap_free(swap); - *pagep = page; + *foliop = folio; return 0; failed: if (!shmem_confirm_swap(mapping, index, swap)) error = -EEXIST; unlock: - if (page) { - unlock_page(page); - put_page(page); + if (folio) { + folio_unlock(folio); + folio_put(folio); } return error; @@ -1826,13 +1819,12 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, } if (xa_is_value(folio)) { - struct page *page = &folio->page; - error = shmem_swapin_page(inode, index, &page, + error = shmem_swapin_folio(inode, index, &folio, sgp, gfp, vma, fault_type); if (error == -EEXIST) goto repeat; - *pagep = page; + *pagep = &folio->page; return error; } -- 2.34.1