The patch titled Subject: userfaultfd: convert mfill_atomic() to use a folio has been added to the -mm mm-unstable branch. Its filename is userfaultfd-convert-mfill_atomic-to-use-a-folio.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/userfaultfd-convert-mfill_atomic-to-use-a-folio.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: ZhangPeng <zhangpeng362@xxxxxxxxxx> Subject: userfaultfd: convert mfill_atomic() to use a folio Date: Mon, 10 Apr 2023 21:39:32 +0800 Convert mfill_atomic_pte_copy(), shmem_mfill_atomic_pte() and mfill_atomic_pte() to take in a folio pointer. Convert mfill_atomic() to use a folio. Convert page_kaddr to kaddr in mfill_atomic(). Link: https://lkml.kernel.org/r/20230410133932.32288-7-zhangpeng362@xxxxxxxxxx Signed-off-by: ZhangPeng <zhangpeng362@xxxxxxxxxx> Reviewed-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Muchun Song <muchun.song@xxxxxxxxx> Cc: Nanyong Sun <sunnanyong@xxxxxxxxxx> Cc: Sidhartha Kumar <sidhartha.kumar@xxxxxxxxxx> Cc: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/shmem_fs.h | 4 +-- mm/shmem.c | 16 +++++++------- mm/userfaultfd.c | 40 ++++++++++++++++++------------------- 3 files changed, 30 insertions(+), 30 deletions(-) --- a/include/linux/shmem_fs.h~userfaultfd-convert-mfill_atomic-to-use-a-folio +++ a/include/linux/shmem_fs.h @@ -165,10 +165,10 @@ extern int shmem_mfill_atomic_pte(pmd_t unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep); + struct folio **foliop); #else /* !CONFIG_SHMEM */ #define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \ - src_addr, flags, pagep) ({ BUG(); 0; }) + src_addr, flags, foliop) ({ BUG(); 0; }) #endif /* CONFIG_SHMEM */ #endif /* CONFIG_USERFAULTFD */ --- a/mm/shmem.c~userfaultfd-convert-mfill_atomic-to-use-a-folio +++ a/mm/shmem.c @@ -2548,7 +2548,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pm unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { struct inode *inode = file_inode(dst_vma->vm_file); struct shmem_inode_info *info = SHMEM_I(inode); @@ -2566,14 +2566,14 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pm * and now we find ourselves with -ENOMEM. Release the page, to * avoid a BUG_ON in our caller. */ - if (unlikely(*pagep)) { - put_page(*pagep); - *pagep = NULL; + if (unlikely(*foliop)) { + folio_put(*foliop); + *foliop = NULL; } return -ENOMEM; } - if (!*pagep) { + if (!*foliop) { ret = -ENOMEM; folio = shmem_alloc_folio(gfp, info, pgoff); if (!folio) @@ -2605,7 +2605,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pm /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { - *pagep = &folio->page; + *foliop = folio; ret = -ENOENT; /* don't free the page */ goto out_unacct_blocks; @@ -2616,9 +2616,9 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pm clear_user_highpage(&folio->page, dst_addr); } } else { - folio = page_folio(*pagep); + folio = *foliop; VM_BUG_ON_FOLIO(folio_test_large(folio), folio); - *pagep = NULL; + *foliop = NULL; } VM_BUG_ON(folio_test_locked(folio)); --- a/mm/userfaultfd.c~userfaultfd-convert-mfill_atomic-to-use-a-folio +++ a/mm/userfaultfd.c @@ -133,13 +133,13 @@ static int mfill_atomic_pte_copy(pmd_t * unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { void *kaddr; int ret; struct folio *folio; - if (!*pagep) { + if (!*foliop) { ret = -ENOMEM; folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, dst_addr, false); @@ -171,15 +171,15 @@ static int mfill_atomic_pte_copy(pmd_t * /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { ret = -ENOENT; - *pagep = &folio->page; + *foliop = folio; /* don't free the page */ goto out; } flush_dcache_folio(folio); } else { - folio = page_folio(*pagep); - *pagep = NULL; + folio = *foliop; + *foliop = NULL; } /* @@ -470,7 +470,7 @@ static __always_inline ssize_t mfill_ato unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { ssize_t err; @@ -493,14 +493,14 @@ static __always_inline ssize_t mfill_ato if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) err = mfill_atomic_pte_copy(dst_pmd, dst_vma, dst_addr, src_addr, - flags, pagep); + flags, foliop); else err = mfill_atomic_pte_zeropage(dst_pmd, dst_vma, dst_addr); } else { err = shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, src_addr, - flags, pagep); + flags, foliop); } return err; @@ -518,7 +518,7 @@ static __always_inline ssize_t mfill_ato pmd_t *dst_pmd; unsigned long src_addr, dst_addr; long copied; - struct page *page; + struct folio *folio; /* * Sanitize the command parameters: @@ -533,7 +533,7 @@ static __always_inline ssize_t mfill_ato src_addr = src_start; dst_addr = dst_start; copied = 0; - page = NULL; + folio = NULL; retry: mmap_read_lock(dst_mm); @@ -629,28 +629,28 @@ retry: BUG_ON(pmd_trans_huge(*dst_pmd)); err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, - src_addr, flags, &page); + src_addr, flags, &folio); cond_resched(); if (unlikely(err == -ENOENT)) { - void *page_kaddr; + void *kaddr; mmap_read_unlock(dst_mm); - BUG_ON(!page); + BUG_ON(!folio); - page_kaddr = kmap_local_page(page); - err = copy_from_user(page_kaddr, + kaddr = kmap_local_folio(folio, 0); + err = copy_from_user(kaddr, (const void __user *) src_addr, PAGE_SIZE); - kunmap_local(page_kaddr); + kunmap_local(kaddr); if (unlikely(err)) { err = -EFAULT; goto out; } - flush_dcache_page(page); + flush_dcache_folio(folio); goto retry; } else - BUG_ON(page); + BUG_ON(folio); if (!err) { dst_addr += PAGE_SIZE; @@ -667,8 +667,8 @@ retry: out_unlock: mmap_read_unlock(dst_mm); out: - if (page) - put_page(page); + if (folio) + folio_put(folio); BUG_ON(copied < 0); BUG_ON(err > 0); BUG_ON(!copied && !err); _ Patches currently in -mm which might be from zhangpeng362@xxxxxxxxxx are mm-madvise-use-vma_lookup-instead-of-find_vma.patch userfaultfd-convert-mfill_atomic_pte_copy-to-use-a-folio.patch userfaultfd-use-kmap_local_page-in-copy_huge_page_from_user.patch userfaultfd-convert-copy_huge_page_from_user-to-copy_folio_from_user.patch userfaultfd-convert-mfill_atomic_hugetlb-to-use-a-folio.patch mm-convert-copy_user_huge_page-to-copy_user_large_folio.patch userfaultfd-convert-mfill_atomic-to-use-a-folio.patch