In handle_pte_fault(), allow speculative execution to proceed. Use pte_spinlock() to validate the mmap sequence count when locking the page table. If speculative execution proceeds through do_wp_page(), ensure that we end up in the wp_page_reuse() or wp_page_copy() paths, rather than wp_pfn_shared() or wp_page_shared() (both unreachable as we only handle anon vmas so far) or handle_userfault() (needs an explicit abort to handle non-speculatively). Signed-off-by: Michel Lespinasse <michel@xxxxxxxxxxxxxx> --- mm/memory.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index cb66585f5145..c3cd29d3acc6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3120,6 +3120,7 @@ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; + VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE); if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { vm_fault_t ret; @@ -3140,6 +3141,8 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; vm_fault_t ret = VM_FAULT_WRITE; + VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE); + get_page(vmf->page); if (vma->vm_ops && vma->vm_ops->page_mkwrite) { @@ -3193,6 +3196,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) if (userfaultfd_pte_wp(vma, *vmf->pte)) { pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + return VM_FAULT_RETRY; return handle_userfault(vmf, VM_UFFD_WP); } @@ -4383,13 +4388,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); - if (vmf->flags & FAULT_FLAG_SPECULATIVE) { - pte_unmap(vmf->pte); + if (!pte_spinlock(vmf)) return VM_FAULT_RETRY; - } - - vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); - spin_lock(vmf->ptl); entry = vmf->orig_pte; if (unlikely(!pte_same(*vmf->pte, entry))) { update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); -- 2.20.1