To support large folio swapin for shmem in the following patches, add large folio allocation for the new replacement folio in shmem_replace_folio(), as well as updating statistics using the number of pages in the folio. Signed-off-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> --- mm/shmem.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 174d8ae25b9b..eefdf5c61c04 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1889,7 +1889,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, */ gfp &= ~GFP_CONSTRAINT_MASK; VM_BUG_ON_FOLIO(folio_test_large(old), old); - new = shmem_alloc_folio(gfp, 0, info, index); + new = shmem_alloc_folio(gfp, folio_order(old), info, index); if (!new) return -ENOMEM; @@ -1910,11 +1910,13 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, xa_lock_irq(&swap_mapping->i_pages); error = shmem_replace_entry(swap_mapping, swap_index, old, new); if (!error) { + int nr_pages = folio_nr_pages(old); + mem_cgroup_migrate(old, new); - __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1); - __lruvec_stat_mod_folio(new, NR_SHMEM, 1); - __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1); - __lruvec_stat_mod_folio(old, NR_SHMEM, -1); + __lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages); + __lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages); + __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages); + __lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages); } xa_unlock_irq(&swap_mapping->i_pages); -- 2.39.3