In the case pte_map_lock failed to lock the pte or if the VMA is no more valid, the fault entry's fields should not be set so that caller won't try to unlock it. Signed-off-by: Laurent Dufour <ldufour@xxxxxxxxxxxxxxxxxx> --- mm/memory.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index f05288797c60..75d24e74c4ff 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2275,6 +2275,8 @@ static bool pte_spinlock(struct vm_fault *vmf) static bool pte_map_lock(struct vm_fault *vmf) { bool ret = false; + pte_t *pte; + spinlock_t *ptl; if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, @@ -2299,18 +2301,20 @@ static bool pte_map_lock(struct vm_fault *vmf) * to invalidate TLB but this CPU has irq disabled. * Since we are in a speculative patch, accept it could fail */ - vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); - vmf->pte = pte_offset_map(vmf->pmd, vmf->address); - if (unlikely(!spin_trylock(vmf->ptl))) { - pte_unmap(vmf->pte); + ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + pte = pte_offset_map(vmf->pmd, vmf->address); + if (unlikely(!spin_trylock(ptl))) { + pte_unmap(pte); goto out; } if (vma_has_changed(vmf->vma, vmf->sequence)) { - pte_unmap_unlock(vmf->pte, vmf->ptl); + pte_unmap_unlock(pte, ptl); goto out; } + vmf->pte = pte; + vmf->ptl = ptl; ret = true; out: local_irq_enable(); -- 2.7.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>