From: Axel Rasmussen <axelrasmussen@xxxxxxxxxx> Subject: userfaultfd/shmem: modify shmem_mfill_atomic_pte to use install_pte() In a previous commit, we added the mfill_atomic_install_pte() helper. This helper does the job of setting up PTEs for an existing page, to map it into a given VMA. It deals with both the anon and shmem cases, as well as the shared and private cases. In other words, shmem_mfill_atomic_pte() duplicates a case it already handles. So, expose it, and let shmem_mfill_atomic_pte() use it directly, to reduce code duplication. This requires that we refactor shmem_mfill_atomic_pte() a bit: Instead of doing accounting (shmem_recalc_inode() et al) part-way through the PTE setup, do it afterward. This frees up mfill_atomic_install_pte() from having to care about this accounting, and means we don't need to e.g. shmem_uncharge() in the error path. A side effect is this switches shmem_mfill_atomic_pte() to use lru_cache_add_inactive_or_unevictable() instead of just lru_cache_add(). This wrapper does some extra accounting in an exceptional case, if appropriate, so it's actually the more correct thing to use. Link: https://lkml.kernel.org/r/20210503180737.2487560-7-axelrasmussen@xxxxxxxxxx Signed-off-by: Axel Rasmussen <axelrasmussen@xxxxxxxxxx> Reviewed-by: Peter Xu <peterx@xxxxxxxxxx> Acked-by: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: Brian Geffon <bgeffon@xxxxxxxxxx> Cc: "Dr . David Alan Gilbert" <dgilbert@xxxxxxxxxx> Cc: Jerome Glisse <jglisse@xxxxxxxxxx> Cc: Joe Perches <joe@xxxxxxxxxxx> Cc: Kirill A. Shutemov <kirill@xxxxxxxxxxxxx> Cc: Lokesh Gidra <lokeshgidra@xxxxxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Mike Rapoport <rppt@xxxxxxxxxxxxxxxxxx> Cc: Mina Almasry <almasrymina@xxxxxxxxxx> Cc: Oliver Upton <oupton@xxxxxxxxxx> Cc: Shaohua Li <shli@xxxxxx> Cc: Shuah Khan <shuah@xxxxxxxxxx> Cc: Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx> Cc: Wang Qing <wangqing@xxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/userfaultfd_k.h | 5 ++ mm/shmem.c | 58 ++++++-------------------------- mm/userfaultfd.c | 17 +++------ 3 files changed, 23 insertions(+), 57 deletions(-) --- a/include/linux/userfaultfd_k.h~userfaultfd-shmem-modify-shmem_mfill_atomic_pte-to-use-install_pte +++ a/include/linux/userfaultfd_k.h @@ -53,6 +53,11 @@ enum mcopy_atomic_mode { MCOPY_ATOMIC_CONTINUE, }; +extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, struct page *page, + bool newly_allocated, bool wp_copy); + extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool *mmap_changing, __u64 mode); --- a/mm/shmem.c~userfaultfd-shmem-modify-shmem_mfill_atomic_pte-to-use-install_pte +++ a/mm/shmem.c @@ -2376,14 +2376,11 @@ int shmem_mfill_atomic_pte(struct mm_str struct address_space *mapping = inode->i_mapping; gfp_t gfp = mapping_gfp_mask(mapping); pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); - spinlock_t *ptl; void *page_kaddr; struct page *page; - pte_t _dst_pte, *dst_pte; int ret; pgoff_t max_off; - ret = -ENOMEM; if (!shmem_inode_acct_block(inode, 1)) { /* * We may have got a page, returned -ENOENT triggering a retry, @@ -2394,10 +2391,11 @@ int shmem_mfill_atomic_pte(struct mm_str put_page(*pagep); *pagep = NULL; } - goto out; + return -ENOMEM; } if (!*pagep) { + ret = -ENOMEM; page = shmem_alloc_page(gfp, info, pgoff); if (!page) goto out_unacct_blocks; @@ -2412,9 +2410,9 @@ int shmem_mfill_atomic_pte(struct mm_str /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { *pagep = page; - shmem_inode_unacct_blocks(inode, 1); + ret = -ENOENT; /* don't free the page */ - return -ENOENT; + goto out_unacct_blocks; } } else { /* ZEROPAGE */ clear_highpage(page); @@ -2440,32 +2438,10 @@ int shmem_mfill_atomic_pte(struct mm_str if (ret) goto out_release; - _dst_pte = mk_pte(page, dst_vma->vm_page_prot); - if (dst_vma->vm_flags & VM_WRITE) - _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); - else { - /* - * We don't set the pte dirty if the vma has no - * VM_WRITE permission, so mark the page dirty or it - * could be freed from under us. We could do it - * unconditionally before unlock_page(), but doing it - * only if VM_WRITE is not set is faster. - */ - set_page_dirty(page); - } - - dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); - - ret = -EFAULT; - max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); - if (unlikely(pgoff >= max_off)) - goto out_release_unlock; - - ret = -EEXIST; - if (!pte_none(*dst_pte)) - goto out_release_unlock; - - lru_cache_add(page); + ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, + page, true, false); + if (ret) + goto out_delete_from_cache; spin_lock_irq(&info->lock); info->alloced++; @@ -2473,27 +2449,17 @@ int shmem_mfill_atomic_pte(struct mm_str shmem_recalc_inode(inode); spin_unlock_irq(&info->lock); - inc_mm_counter(dst_mm, mm_counter_file(page)); - page_add_file_rmap(page, false); - set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); - - /* No need to invalidate - it was non-present before */ - update_mmu_cache(dst_vma, dst_addr, dst_pte); - pte_unmap_unlock(dst_pte, ptl); + SetPageDirty(page); unlock_page(page); - ret = 0; -out: - return ret; -out_release_unlock: - pte_unmap_unlock(dst_pte, ptl); - ClearPageDirty(page); + return 0; +out_delete_from_cache: delete_from_page_cache(page); out_release: unlock_page(page); put_page(page); out_unacct_blocks: shmem_inode_unacct_blocks(inode, 1); - goto out; + return ret; } #endif /* CONFIG_USERFAULTFD */ --- a/mm/userfaultfd.c~userfaultfd-shmem-modify-shmem_mfill_atomic_pte-to-use-install_pte +++ a/mm/userfaultfd.c @@ -51,18 +51,13 @@ struct vm_area_struct *find_dst_vma(stru /* * Install PTEs, to map dst_addr (within dst_vma) to page. * - * This function handles MCOPY_ATOMIC_CONTINUE (which is always file-backed), - * whether or not dst_vma is VM_SHARED. It also handles the more general - * MCOPY_ATOMIC_NORMAL case, when dst_vma is *not* VM_SHARED (it may be file - * backed, or not). - * - * Note that MCOPY_ATOMIC_NORMAL for a VM_SHARED dst_vma is handled by - * shmem_mcopy_atomic_pte instead. + * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem + * and anon, and for both shared and private VMAs. */ -static int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, - struct vm_area_struct *dst_vma, - unsigned long dst_addr, struct page *page, - bool newly_allocated, bool wp_copy) +int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, struct page *page, + bool newly_allocated, bool wp_copy) { int ret; pte_t _dst_pte, *dst_pte; _