In do_anonymous_page(), we have separate cases for the zero page vs allocating new anonymous pages. However, once the pte entry has been computed, the rest of the handling (mapping and locking the page table, checking that we didn't lose a race with another page fault handler, etc) is identical between the two cases. This change reduces the code duplication between the two cases. Signed-off-by: Michel Lespinasse <michel@xxxxxxxxxxxxxx> --- mm/memory.c | 85 +++++++++++++++++++++++------------------------------ 1 file changed, 37 insertions(+), 48 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 59ff65cb3ab4..217c31c616f4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3495,7 +3495,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) static vm_fault_t do_anonymous_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - struct page *page; + struct page *page = NULL; vm_fault_t ret = 0; pte_t entry; @@ -3525,77 +3525,66 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) !mm_forbids_zeropage(vma->vm_mm)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), vma->vm_page_prot)); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); - if (!pte_none(*vmf->pte)) { - update_mmu_tlb(vma, vmf->address, vmf->pte); - goto unlock; - } - ret = check_stable_address_space(vma->vm_mm); - if (ret) - goto unlock; - /* Deliver the page fault to userland, check inside PT lock */ - if (userfaultfd_missing(vma)) { - pte_unmap_unlock(vmf->pte, vmf->ptl); - return handle_userfault(vmf, VM_UFFD_MISSING); - } - goto setpte; + } else { + /* Allocate our own private page. */ + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_zeroed_user_highpage_movable(vma, vmf->address); + if (!page) + goto oom; + + if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) + goto oom_free_page; + cgroup_throttle_swaprate(page, GFP_KERNEL); + + /* + * The memory barrier inside __SetPageUptodate makes sure that + * preceding stores to the page contents become visible before + * the set_pte_at() write. + */ + __SetPageUptodate(page); + + entry = mk_pte(page, vma->vm_page_prot); + if (vma->vm_flags & VM_WRITE) + entry = pte_mkwrite(pte_mkdirty(entry)); } - /* Allocate our own private page. */ - if (unlikely(anon_vma_prepare(vma))) - goto oom; - page = alloc_zeroed_user_highpage_movable(vma, vmf->address); - if (!page) - goto oom; - - if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) - goto oom_free_page; - cgroup_throttle_swaprate(page, GFP_KERNEL); - - /* - * The memory barrier inside __SetPageUptodate makes sure that - * preceding stores to the page contents become visible before - * the set_pte_at() write. - */ - __SetPageUptodate(page); - - entry = mk_pte(page, vma->vm_page_prot); - if (vma->vm_flags & VM_WRITE) - entry = pte_mkwrite(pte_mkdirty(entry)); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!pte_none(*vmf->pte)) { update_mmu_tlb(vma, vmf->address, vmf->pte); - goto release; + goto unlock; } ret = check_stable_address_space(vma->vm_mm); if (ret) - goto release; + goto unlock; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { pte_unmap_unlock(vmf->pte, vmf->ptl); - put_page(page); + if (page) + put_page(page); return handle_userfault(vmf, VM_UFFD_MISSING); } - inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, vmf->address, false); - lru_cache_add_inactive_or_unevictable(page, vma); -setpte: + if (page) { + inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); + page_add_new_anon_rmap(page, vma, vmf->address, false); + lru_cache_add_inactive_or_unevictable(page, vma); + } + set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, vmf->address, vmf->pte); + pte_unmap_unlock(vmf->pte, vmf->ptl); + return 0; unlock: pte_unmap_unlock(vmf->pte, vmf->ptl); + if (page) + put_page(page); return ret; -release: - put_page(page); - goto unlock; oom_free_page: put_page(page); oom: -- 2.20.1