In the speculative case, we want to avoid direct pmd checks (which would require some extra synchronization to be safe), and rely on pte_map_lock which will both lock the page table and verify that the pmd has not changed from its initial value. Signed-off-by: Michel Lespinasse <michel@xxxxxxxxxxxxxx> --- mm/memory.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 7139004c624d..13e2aaf900e5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3915,23 +3915,25 @@ vm_fault_t finish_fault(struct vm_fault *vmf) return ret; } - if (pmd_none(*vmf->pmd)) { - if (PageTransCompound(page)) { - ret = do_set_pmd(vmf, page); - if (ret != VM_FAULT_FALLBACK) - return ret; + if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { + if (pmd_none(*vmf->pmd)) { + if (PageTransCompound(page)) { + ret = do_set_pmd(vmf, page); + if (ret != VM_FAULT_FALLBACK) + return ret; + } + + if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) + return VM_FAULT_OOM; } - if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) - return VM_FAULT_OOM; + /* See comment in __handle_mm_fault() */ + if (pmd_devmap_trans_unstable(vmf->pmd)) + return 0; } - /* See comment in __handle_mm_fault() */ - if (pmd_devmap_trans_unstable(vmf->pmd)) - return 0; - - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; ret = 0; /* Re-check under ptl */ if (likely(pte_none(*vmf->pte))) -- 2.20.1