follow_phys is only used by two callers in arch/x86/mm/pat/memtype.c. Move it there and hardcode the two arguments that get the same values passed by both callers. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- arch/x86/mm/pat/memtype.c | 23 +++++++++++++++++++++-- include/linux/mm.h | 2 -- mm/memory.c | 28 ---------------------------- 3 files changed, 21 insertions(+), 32 deletions(-) diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 0d72183b5dd028..c64f07b0fc2099 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -947,6 +947,25 @@ static void free_pfn_range(u64 paddr, unsigned long size) memtype_free(paddr, paddr + size); } +static int follow_phys(struct vm_area_struct *vma, unsigned long *prot, + resource_size_t *phys) +{ + pte_t *ptep, pte; + spinlock_t *ptl; + + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) + return -EINVAL; + + if (follow_pte(vma->vm_mm, vma->vm_start, &ptep, &ptl)) + return -EINVAL; + + pte = ptep_get(ptep); + *prot = pgprot_val(pte_pgprot(pte)); + *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; + pte_unmap_unlock(ptep, ptl); + return 0; +} + /* * track_pfn_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). @@ -966,7 +985,7 @@ int track_pfn_copy(struct vm_area_struct *vma) * reserve the whole chunk covered by vma. We need the * starting address and protection from pte. */ - if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { + if (follow_phys(vma, &prot, &paddr)) { WARN_ON_ONCE(1); return -EINVAL; } @@ -1053,7 +1072,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, /* free the chunk starting from pfn or the whole chunk */ paddr = (resource_size_t)pfn << PAGE_SHIFT; if (!paddr && !size) { - if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { + if (follow_phys(vma, &prot, &paddr)) { WARN_ON_ONCE(1); return; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 9cd2c69f913601..51cfc8267da755 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2393,8 +2393,6 @@ int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp); -int follow_phys(struct vm_area_struct *vma, unsigned long address, - unsigned int flags, unsigned long *prot, resource_size_t *phys); int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); diff --git a/mm/memory.c b/mm/memory.c index 33559720f0aab9..86c9eb92b5e03c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5926,34 +5926,6 @@ int follow_pte(struct mm_struct *mm, unsigned long address, EXPORT_SYMBOL_GPL(follow_pte); #ifdef CONFIG_HAVE_IOREMAP_PROT -int follow_phys(struct vm_area_struct *vma, - unsigned long address, unsigned int flags, - unsigned long *prot, resource_size_t *phys) -{ - int ret = -EINVAL; - pte_t *ptep, pte; - spinlock_t *ptl; - - if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) - goto out; - - if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) - goto out; - pte = ptep_get(ptep); - - if ((flags & FOLL_WRITE) && !pte_write(pte)) - goto unlock; - - *prot = pgprot_val(pte_pgprot(pte)); - *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; - - ret = 0; -unlock: - pte_unmap_unlock(ptep, ptl); -out: - return ret; -} - /** * generic_access_phys - generic implementation for iomem mmap access * @vma: the vma to access -- 2.39.2