There's no modification to anon_vma interval tree. We only need to serialize against exclusive rmap walker who want s to catch all ptes the page is mapped with. Shared lock is enough for that. Suggested-by: Davidlohr Bueso <dbueso@xxxxxxx> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> --- mm/mremap.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index c855922497a3..1e35ba664406 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -123,7 +123,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, } if (vma->anon_vma) { anon_vma = vma->anon_vma; - anon_vma_lock_write(anon_vma); + anon_vma_lock_read(anon_vma); } } @@ -154,7 +154,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); if (anon_vma) - anon_vma_unlock_write(anon_vma); + anon_vma_unlock_read(anon_vma); if (mapping) mutex_unlock(&mapping->i_mmap_mutex); } @@ -199,12 +199,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma, vma); /* See comment in move_ptes() */ if (need_rmap_locks) - anon_vma_lock_write(vma->anon_vma); + anon_vma_lock_read(vma->anon_vma); err = move_huge_pmd(vma, new_vma, old_addr, new_addr, old_end, old_pmd, new_pmd); if (need_rmap_locks) - anon_vma_unlock_write(vma->anon_vma); + anon_vma_unlock_read(vma->anon_vma); } if (err > 0) { need_flush = true; -- 2.1.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>