From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> Instead of moving (or leaving) the vma iterator pointing at the previous vma, leave it pointing at the insert location. Pointing the vma iterator at the insert location allows for a cleaner walk of the vma tree for MAP_FIXED and the no expansion cases. The vma_prev() call in the case of merging the previous vma is equivalent to vma_iter_prev_range(), since the vma iterator will be pointing to the location just before the previous vma. This change needs to export abort_munmap_vmas() from mm/vma. Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx> --- mm/mmap.c | 40 +++++++++++++++++++++++----------------- mm/vma.c | 16 ---------------- mm/vma.h | 16 ++++++++++++++++ 3 files changed, 39 insertions(+), 33 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index ac348ae933ba..08cf9199f314 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1401,21 +1401,23 @@ unsigned long mmap_region(struct file *file, unsigned long addr, /* Prepare to unmap any existing mapping in the area */ error = vms_gather_munmap_vmas(&vms, &mas_detach); if (error) - return error; + goto gather_failed; /* Remove any existing mappings from the vma tree */ - if (vma_iter_clear_gfp(&vmi, addr, end, GFP_KERNEL)) - return -ENOMEM; + error = vma_iter_clear_gfp(&vmi, addr, end, GFP_KERNEL); + if (error) + goto clear_tree_failed; /* Unmap any existing mapping in the area */ vms_complete_munmap_vmas(&vms, &mas_detach); next = vms.next; prev = vms.prev; - vma_prev(&vmi); vma = NULL; } else { next = vma_next(&vmi); prev = vma_prev(&vmi); + if (prev) + vma_iter_next_range(&vmi); } /* @@ -1428,11 +1430,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vm_flags |= VM_ACCOUNT; } - if (vm_flags & VM_SPECIAL) { - if (prev) - vma_iter_next_range(&vmi); + if (vm_flags & VM_SPECIAL) goto cannot_expand; - } /* Attempt to expand an old mapping */ /* Check next */ @@ -1453,19 +1452,21 @@ unsigned long mmap_region(struct file *file, unsigned long addr, merge_start = prev->vm_start; vma = prev; vm_pgoff = prev->vm_pgoff; - } else if (prev) { - vma_iter_next_range(&vmi); + vma_prev(&vmi); /* Equivalent to going to the previous range */ } - /* Actually expand, if possible */ - if (vma && - !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) { - khugepaged_enter_vma(vma, vm_flags); - goto expanded; + if (vma) { + /* Actually expand, if possible */ + if (!vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) { + khugepaged_enter_vma(vma, vm_flags); + goto expanded; + } + + /* If the expand fails, then reposition the vma iterator */ + if (unlikely(vma == prev)) + vma_iter_set(&vmi, addr); } - if (vma == prev) - vma_iter_set(&vmi, addr); cannot_expand: /* @@ -1624,6 +1625,11 @@ unsigned long mmap_region(struct file *file, unsigned long addr, unacct_error: if (charged) vm_unacct_memory(charged); + +clear_tree_failed: + if (vms.vma_count) + abort_munmap_vmas(&mas_detach); +gather_failed: validate_mm(mm); return error; } diff --git a/mm/vma.c b/mm/vma.c index fc425eb34bf7..5e0ed5d63877 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -646,22 +646,6 @@ void vma_complete(struct vma_prepare *vp, uprobe_mmap(vp->insert); } -/* - * abort_munmap_vmas - Undo any munmap work and free resources - * - * Reattach any detached vmas and free up the maple tree used to track the vmas. - */ -static inline void abort_munmap_vmas(struct ma_state *mas_detach) -{ - struct vm_area_struct *vma; - - mas_set(mas_detach, 0); - mas_for_each(mas_detach, vma, ULONG_MAX) - vma_mark_detached(vma, false); - - __mt_destroy(mas_detach->tree); -} - /* * vms_complete_munmap_vmas() - Finish the munmap() operation * @vms: The vma munmap struct diff --git a/mm/vma.h b/mm/vma.h index 0e214bbf443e..c85fc7c888a8 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -116,6 +116,22 @@ int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, struct ma_state *mas_detach); +/* + * abort_munmap_vmas - Undo any munmap work and free resources + * + * Reattach any detached vmas and free up the maple tree used to track the vmas. + */ +static inline void abort_munmap_vmas(struct ma_state *mas_detach) +{ + struct vm_area_struct *vma; + + mas_set(mas_detach, 0); + mas_for_each(mas_detach, vma, ULONG_MAX) + vma_mark_detached(vma, false); + + __mt_destroy(mas_detach->tree); +} + int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, struct mm_struct *mm, unsigned long start, -- 2.43.0