Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxx>
---
mm/mremap.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/mm/mremap.c b/mm/mremap.c
index 109560977944..9effca76bf17 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -258,7 +258,7 @@ static bool move_normal_pmd(struct vm_area_struct
*vma, unsigned long old_addr,
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_lock prevents deadlock.
*/
- old_ptl = pmd_lock(vma->vm_mm, old_pmd);
+ old_ptl = pmd_lock(mm, old_pmd);
new_ptl = pmd_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -270,11 +270,11 @@ static bool move_normal_pmd(struct
vm_area_struct *vma, unsigned long old_addr,
VM_BUG_ON(!pmd_none(*new_pmd));
pmd_populate(mm, new_pmd, (pgtable_t)pmd_page_vaddr(pmd));
- flush_pte_tlb_pwc_range(vma, old_addr, old_addr + PMD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
spin_unlock(old_ptl);
+ flush_pte_tlb_pwc_range(vma, old_addr, old_addr + PMD_SIZE);
return true;
}
#else
@@ -305,7 +305,7 @@ static bool move_normal_pud(struct vm_area_struct
*vma, unsigned long old_addr,
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_lock prevents deadlock.
*/
- old_ptl = pud_lock(vma->vm_mm, old_pud);
+ old_ptl = pud_lock(mm, old_pud);
new_ptl = pud_lockptr(mm, new_pud);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -317,11 +317,11 @@ static bool move_normal_pud(struct
vm_area_struct *vma, unsigned long old_addr,
VM_BUG_ON(!pud_none(*new_pud));
pud_populate(mm, new_pud, (pmd_t *)pud_page_vaddr(pud));
- flush_pte_tlb_pwc_range(vma, old_addr, old_addr + PUD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
spin_unlock(old_ptl);
+ flush_pte_tlb_pwc_range(vma, old_addr, old_addr + PUD_SIZE);
return true;
}
#else