The patch titled Subject: mm: move follow_phys to arch/x86/mm/pat/memtype.c has been added to the -mm mm-unstable branch. Its filename is mm-move-follow_phys-to-arch-x86-mm-pat-memtypec.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-move-follow_phys-to-arch-x86-mm-pat-memtypec.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Christoph Hellwig <hch@xxxxxx> Subject: mm: move follow_phys to arch/x86/mm/pat/memtype.c Date: Mon, 25 Mar 2024 07:45:42 +0800 follow_phys is only used by two callers in arch/x86/mm/pat/memtype.c. Move it there and hardcode the two arguments that get the same values passed by both callers. Link: https://lkml.kernel.org/r/20240324234542.2038726-4-hch@xxxxxx Signed-off-by: Christoph Hellwig <hch@xxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: Fei Li <fei1.li@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/x86/mm/pat/memtype.c | 22 ++++++++++++++++++++-- include/linux/mm.h | 2 -- mm/memory.c | 28 ---------------------------- 3 files changed, 20 insertions(+), 32 deletions(-) --- a/arch/x86/mm/pat/memtype.c~mm-move-follow_phys-to-arch-x86-mm-pat-memtypec +++ a/arch/x86/mm/pat/memtype.c @@ -947,6 +947,24 @@ static void free_pfn_range(u64 paddr, un memtype_free(paddr, paddr + size); } +static int follow_phys(struct vm_area_struct *vma, unsigned long *prot, + resource_size_t *phys) +{ + pte_t *ptep, pte; + spinlock_t *ptl; + + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) + return -EINVAL; + + if (follow_pte(vma->vm_mm, vma->vm_start, &ptep, &ptl)) + return -EINVAL; + pte = ptep_get(ptep); + *prot = pgprot_val(pte_pgprot(pte)); + *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; + pte_unmap_unlock(ptep, ptl); + return 0; +} + /* * track_pfn_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). @@ -966,7 +984,7 @@ int track_pfn_copy(struct vm_area_struct * reserve the whole chunk covered by vma. We need the * starting address and protection from pte. */ - if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { + if (follow_phys(vma, &prot, &paddr)) { WARN_ON_ONCE(1); return -EINVAL; } @@ -1053,7 +1071,7 @@ void untrack_pfn(struct vm_area_struct * /* free the chunk starting from pfn or the whole chunk */ paddr = (resource_size_t)pfn << PAGE_SHIFT; if (!paddr && !size) { - if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { + if (follow_phys(vma, &prot, &paddr)) { WARN_ON_ONCE(1); return; } --- a/include/linux/mm.h~mm-move-follow_phys-to-arch-x86-mm-pat-memtypec +++ a/include/linux/mm.h @@ -2424,8 +2424,6 @@ int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp); -int follow_phys(struct vm_area_struct *vma, unsigned long address, - unsigned int flags, unsigned long *prot, resource_size_t *phys); int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); --- a/mm/memory.c~mm-move-follow_phys-to-arch-x86-mm-pat-memtypec +++ a/mm/memory.c @@ -5928,34 +5928,6 @@ out: EXPORT_SYMBOL_GPL(follow_pte); #ifdef CONFIG_HAVE_IOREMAP_PROT -int follow_phys(struct vm_area_struct *vma, - unsigned long address, unsigned int flags, - unsigned long *prot, resource_size_t *phys) -{ - int ret = -EINVAL; - pte_t *ptep, pte; - spinlock_t *ptl; - - if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) - goto out; - - if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) - goto out; - pte = ptep_get(ptep); - - if ((flags & FOLL_WRITE) && !pte_write(pte)) - goto unlock; - - *prot = pgprot_val(pte_pgprot(pte)); - *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; - - ret = 0; -unlock: - pte_unmap_unlock(ptep, ptl); -out: - return ret; -} - /** * generic_access_phys - generic implementation for iomem mmap access * @vma: the vma to access _ Patches currently in -mm which might be from hch@xxxxxx are virt-acrn-stop-using-follow_pfn.patch mm-remove-follow_pfn.patch mm-move-follow_phys-to-arch-x86-mm-pat-memtypec.patch