The patch titled Subject: mm/shmem: convert shmem_add_to_page_cache to take a folio has been added to the -mm mm-unstable branch. Its filename is mm-shmem-convert-shmem_add_to_page_cache-to-take-a-folio.patch This patch should soon appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Subject: mm/shmem: convert shmem_add_to_page_cache to take a folio Shrinks shmem_add_to_page_cache() by 16 bytes. All the callers grow, but this is temporary as they will all be converted to folios soon. Link: https://lkml.kernel.org/r/20220429192329.3034378-17-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/shmem.c | 59 +++++++++++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 27 deletions(-) --- a/mm/shmem.c~mm-shmem-convert-shmem_add_to_page_cache-to-take-a-folio +++ a/mm/shmem.c @@ -696,36 +696,35 @@ static unsigned long shmem_unused_huge_s /* * Like add_to_page_cache_locked, but error if expected item has gone. */ -static int shmem_add_to_page_cache(struct page *page, +static int shmem_add_to_page_cache(struct folio *folio, struct address_space *mapping, pgoff_t index, void *expected, gfp_t gfp, struct mm_struct *charge_mm) { - XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); - unsigned long nr = compound_nr(page); + XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); + long nr = folio_nr_pages(folio); int error; - VM_BUG_ON_PAGE(PageTail(page), page); - VM_BUG_ON_PAGE(index != round_down(index, nr), page); - VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(!PageSwapBacked(page), page); - VM_BUG_ON(expected && PageTransHuge(page)); - - page_ref_add(page, nr); - page->mapping = mapping; - page->index = index; + VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); + VM_BUG_ON(expected && folio_test_large(folio)); + + folio_ref_add(folio, nr); + folio->mapping = mapping; + folio->index = index; - if (!PageSwapCache(page)) { - error = mem_cgroup_charge(page_folio(page), charge_mm, gfp); + if (!folio_test_swapcache(folio)) { + error = mem_cgroup_charge(folio, charge_mm, gfp); if (error) { - if (PageTransHuge(page)) { + if (folio_test_large(folio)) { count_vm_event(THP_FILE_FALLBACK); count_vm_event(THP_FILE_FALLBACK_CHARGE); } goto error; } } - cgroup_throttle_swaprate(page, gfp); + folio_throttle_swaprate(folio, gfp); do { xas_lock_irq(&xas); @@ -737,16 +736,16 @@ static int shmem_add_to_page_cache(struc xas_set_err(&xas, -EEXIST); goto unlock; } - xas_store(&xas, page); + xas_store(&xas, folio); if (xas_error(&xas)) goto unlock; - if (PageTransHuge(page)) { + if (folio_test_large(folio)) { count_vm_event(THP_FILE_ALLOC); - __mod_lruvec_page_state(page, NR_SHMEM_THPS, nr); + __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); } mapping->nrpages += nr; - __mod_lruvec_page_state(page, NR_FILE_PAGES, nr); - __mod_lruvec_page_state(page, NR_SHMEM, nr); + __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); + __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); unlock: xas_unlock_irq(&xas); } while (xas_nomem(&xas, gfp)); @@ -758,8 +757,8 @@ unlock: return 0; error: - page->mapping = NULL; - page_ref_sub(page, nr); + folio->mapping = NULL; + folio_ref_sub(folio, nr); return error; } @@ -1691,7 +1690,8 @@ static int shmem_swapin_page(struct inod struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; - struct page *page; + struct page *page = NULL; + struct folio *folio; swp_entry_t swap; int error; @@ -1741,7 +1741,8 @@ static int shmem_swapin_page(struct inod goto failed; } - error = shmem_add_to_page_cache(page, mapping, index, + folio = page_folio(page); + error = shmem_add_to_page_cache(folio, mapping, index, swp_to_radix_entry(swap), gfp, charge_mm); if (error) @@ -1792,6 +1793,7 @@ static int shmem_getpage_gfp(struct inod struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo; struct mm_struct *charge_mm; + struct folio *folio; struct page *page; pgoff_t hindex = index; gfp_t huge_gfp; @@ -1906,7 +1908,8 @@ alloc_nohuge: if (sgp == SGP_WRITE) __SetPageReferenced(page); - error = shmem_add_to_page_cache(page, mapping, hindex, + folio = page_folio(page); + error = shmem_add_to_page_cache(folio, mapping, hindex, NULL, gfp & GFP_RECLAIM_MASK, charge_mm); if (error) @@ -2328,6 +2331,7 @@ int shmem_mfill_atomic_pte(struct mm_str gfp_t gfp = mapping_gfp_mask(mapping); pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); void *page_kaddr; + struct folio *folio; struct page *page; int ret; pgoff_t max_off; @@ -2386,7 +2390,8 @@ int shmem_mfill_atomic_pte(struct mm_str if (unlikely(pgoff >= max_off)) goto out_release; - ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, + folio = page_folio(page); + ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp & GFP_RECLAIM_MASK, dst_mm); if (ret) goto out_release; _ Patches currently in -mm which might be from willy@xxxxxxxxxxxxx are shmem-convert-shmem_alloc_hugepage-to-use-vma_alloc_folio.patch mm-huge_memory-convert-do_huge_pmd_anonymous_page-to-use-vma_alloc_folio.patch mm-remove-alloc_pages_vma.patch vmscan-use-folio_mapped-in-shrink_page_list.patch vmscan-convert-the-writeback-handling-in-shrink_page_list-to-folios.patch swap-turn-get_swap_page-into-folio_alloc_swap.patch swap-convert-add_to_swap-to-take-a-folio.patch vmscan-convert-dirty-page-handling-to-folios.patch vmscan-convert-page-buffer-handling-to-use-folios.patch vmscan-convert-lazy-freeing-to-folios.patch vmscan-move-initialisation-of-mapping-down.patch vmscan-convert-the-activate_locked-portion-of-shrink_page_list-to-folios.patch vmscan-remove-remaining-uses-of-page-in-shrink_page_list.patch mm-shmem-use-a-folio-in-shmem_unused_huge_shrink.patch mm-swap-add-folio_throttle_swaprate.patch mm-shmem-convert-shmem_add_to_page_cache-to-take-a-folio.patch mm-shmem-turn-shmem_should_replace_page-into-shmem_should_replace_folio.patch mm-shmem-turn-shmem_alloc_page-into-shmem_alloc_folio.patch mm-shmem-convert-shmem_alloc_and_acct_page-to-use-a-folio.patch mm-shmem-convert-shmem_getpage_gfp-to-use-a-folio.patch mm-shmem-convert-shmem_swapin_page-to-shmem_swapin_folio.patch vmcore-convert-copy_oldmem_page-to-take-an-iov_iter.patch vmcore-convert-__read_vmcore-to-use-an-iov_iter.patch vmcore-convert-read_from_oldmem-to-take-an-iov_iter.patch