On Tue, Apr 16, 2019 at 03:44:57PM +0200, Laurent Dufour wrote: > When handling page fault without holding the mmap_sem the fetch of the > pte lock pointer and the locking will have to be done while ensuring > that the VMA is not touched in our back. > > So move the fetch and locking operations in a dedicated function. > > Signed-off-by: Laurent Dufour <ldufour@xxxxxxxxxxxxx> Reviewed-by: Jérôme Glisse <jglisse@xxxxxxxxxx> > --- > mm/memory.c | 15 +++++++++++---- > 1 file changed, 11 insertions(+), 4 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index fc3698d13cb5..221ccdf34991 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -2073,6 +2073,13 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, > } > EXPORT_SYMBOL_GPL(apply_to_page_range); > > +static inline bool pte_spinlock(struct vm_fault *vmf) > +{ > + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); > + spin_lock(vmf->ptl); > + return true; > +} > + > static inline bool pte_map_lock(struct vm_fault *vmf) > { > vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, > @@ -3656,8 +3663,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) > * validation through pte_unmap_same(). It's of NUMA type but > * the pfn may be screwed if the read is non atomic. > */ > - vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); > - spin_lock(vmf->ptl); > + if (!pte_spinlock(vmf)) > + return VM_FAULT_RETRY; > if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { > pte_unmap_unlock(vmf->pte, vmf->ptl); > goto out; > @@ -3850,8 +3857,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) > if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) > return do_numa_page(vmf); > > - vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); > - spin_lock(vmf->ptl); > + if (!pte_spinlock(vmf)) > + return VM_FAULT_RETRY; > entry = vmf->orig_pte; > if (unlikely(!pte_same(*vmf->pte, entry))) > goto unlock; > -- > 2.21.0 >