The patch titled Subject: mm: convert insert_pfn() to vm_fault_t has been added to the -mm tree. Its filename is mm-convert-insert_pfn-to-vm_fault_t.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-convert-insert_pfn-to-vm_fault_t.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-convert-insert_pfn-to-vm_fault_t.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Matthew Wilcox <willy@xxxxxxxxxxxxx> Subject: mm: convert insert_pfn() to vm_fault_t All callers convert its errno into a vm_fault_t, so convert it to return a vm_fault_t directly. Link: http://lkml.kernel.org/r/20180828145728.11873-11-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Nicolas Pitre <nicolas.pitre@xxxxxxxxxx> Cc: Souptick Joarder <jrdr.linux@xxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/memory.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) --- a/mm/memory.c~mm-convert-insert_pfn-to-vm_fault_t +++ a/mm/memory.c @@ -1767,19 +1767,16 @@ int vm_insert_page(struct vm_area_struct } EXPORT_SYMBOL(vm_insert_page); -static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, +static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn, pgprot_t prot, bool mkwrite) { struct mm_struct *mm = vma->vm_mm; - int retval; pte_t *pte, entry; spinlock_t *ptl; - retval = -ENOMEM; pte = get_locked_pte(mm, addr, &ptl); if (!pte) - goto out; - retval = -EBUSY; + return VM_FAULT_OOM; if (!pte_none(*pte)) { if (mkwrite) { /* @@ -1812,11 +1809,9 @@ out_mkwrite: set_pte_at(mm, addr, pte, entry); update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ - retval = 0; out_unlock: pte_unmap_unlock(pte, ptl); -out: - return retval; + return VM_FAULT_NOPAGE; } /** @@ -1840,8 +1835,6 @@ out: vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot) { - int err; - /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like @@ -1862,15 +1855,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); - err = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, + return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, false); - - if (err == -ENOMEM) - return VM_FAULT_OOM; - if (err < 0 && err != -EBUSY) - return VM_FAULT_SIGBUS; - - return VM_FAULT_NOPAGE; } EXPORT_SYMBOL(vmf_insert_pfn_prot); @@ -1950,7 +1936,7 @@ static vm_fault_t __vm_insert_mixed(stru page = pfn_to_page(pfn_t_to_pfn(pfn)); err = insert_page(vma, addr, page, pgprot); } else { - err = insert_pfn(vma, addr, pfn, pgprot, mkwrite); + return insert_pfn(vma, addr, pfn, pgprot, mkwrite); } if (err == -ENOMEM) _ Patches currently in -mm which might be from willy@xxxxxxxxxxxxx are cramfs-convert-to-use-vmf_insert_mixed.patch mm-remove-vm_insert_mixed.patch mm-introduce-vmf_insert_pfn_prot.patch x86-convert-vdso-to-use-vm_fault_t.patch mm-make-vm_insert_pfn_prot-static.patch mm-remove-references-to-vm_insert_pfn.patch mm-remove-vm_insert_pfn.patch mm-inline-vm_insert_pfn_prot-into-caller.patch mm-convert-__vm_insert_mixed-to-vm_fault_t.patch mm-convert-insert_pfn-to-vm_fault_t.patch