On 08/21, Peter Xu wrote: > > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -2927,50 +2927,25 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) > * not dirty accountable. > */ > if (PageAnon(vmf->page)) { > - int total_map_swapcount; > - if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) || > - page_count(vmf->page) != 1)) > + struct page *page = vmf->page; > + > + /* PageKsm() doesn't necessarily raise the page refcount */ > + if (PageKsm(page) || page_count(page) != 1) > + goto copy; > + if (!trylock_page(page)) > + goto copy; > + if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) { > + unlock_page(page); > goto copy; > - if (!trylock_page(vmf->page)) { > - get_page(vmf->page); > - pte_unmap_unlock(vmf->pte, vmf->ptl); > - lock_page(vmf->page); > - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, > - vmf->address, &vmf->ptl); > - if (!pte_same(*vmf->pte, vmf->orig_pte)) { > - update_mmu_tlb(vma, vmf->address, vmf->pte); > - unlock_page(vmf->page); > - pte_unmap_unlock(vmf->pte, vmf->ptl); > - put_page(vmf->page); > - return 0; > - } > - put_page(vmf->page); > - } > - if (PageKsm(vmf->page)) { > - bool reused = reuse_ksm_page(vmf->page, vmf->vma, > - vmf->address); > - unlock_page(vmf->page); > - if (!reused) > - goto copy; > - wp_page_reuse(vmf); > - return VM_FAULT_WRITE; > - } > - if (reuse_swap_page(vmf->page, &total_map_swapcount)) { It seems that nobody else calls reuse_swap_page() with total_map_swapcount != NULL. Oleg.