From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> Extract all necessary operations that need to be completed after the vma maple tree is updated from a munmap() operation. Extracting this makes the later patch in the series easier to understand. Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx> Reviewed-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> --- mm/vma.c | 80 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 55 insertions(+), 25 deletions(-) diff --git a/mm/vma.c b/mm/vma.c index 58ecd447670d..3a2098464b8f 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -684,6 +684,58 @@ static inline void abort_munmap_vmas(struct ma_state *mas_detach) __mt_destroy(mas_detach->tree); } +/* + * vmi_complete_munmap_vmas() - Finish the munmap() operation + * @vmi: The vma iterator + * @vma: The first vma to be munmapped + * @mm: The mm struct + * @start: The start address + * @end: The end address + * @unlock: Unlock the mm or not + * @mas_detach: them maple state of the detached vma maple tree + * @locked_vm: The locked_vm count in the detached vmas + * + * This function updates the mm_struct, unmaps the region, frees the resources + * used for the munmap() and may downgrade the lock - if requested. Everything + * needed to be done once the vma maple tree is updated. + */ +static void +vmi_complete_munmap_vmas(struct vma_iterator *vmi, struct vm_area_struct *vma, + struct mm_struct *mm, unsigned long start, unsigned long end, + bool unlock, struct ma_state *mas_detach, + unsigned long locked_vm) +{ + struct vm_area_struct *prev, *next; + int count; + + count = mas_detach->index + 1; + mm->map_count -= count; + mm->locked_vm -= locked_vm; + if (unlock) + mmap_write_downgrade(mm); + + prev = vma_iter_prev_range(vmi); + next = vma_next(vmi); + if (next) + vma_iter_prev_range(vmi); + + /* + * We can free page tables without write-locking mmap_lock because VMAs + * were isolated before we downgraded mmap_lock. + */ + mas_set(mas_detach, 1); + unmap_region(mm, mas_detach, vma, prev, next, start, end, count, + !unlock); + /* Statistics and freeing VMAs */ + mas_set(mas_detach, 0); + remove_mt(mm, mas_detach); + validate_mm(mm); + if (unlock) + mmap_read_unlock(mm); + + __mt_destroy(mas_detach->tree); +} + /* * do_vmi_align_munmap() - munmap the aligned region from @start to @end. * @vmi: The vma iterator @@ -703,7 +755,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, struct mm_struct *mm, unsigned long start, unsigned long end, struct list_head *uf, bool unlock) { - struct vm_area_struct *prev, *next = NULL; + struct vm_area_struct *next = NULL; struct maple_tree mt_detach; int count = 0; int error = -ENOMEM; @@ -818,31 +870,9 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, goto clear_tree_failed; /* Point of no return */ - mm->locked_vm -= locked_vm; - mm->map_count -= count; - if (unlock) - mmap_write_downgrade(mm); - - prev = vma_iter_prev_range(vmi); - next = vma_next(vmi); - if (next) - vma_iter_prev_range(vmi); - - /* - * We can free page tables without write-locking mmap_lock because VMAs - * were isolated before we downgraded mmap_lock. - */ - mas_set(&mas_detach, 1); - unmap_region(mm, &mas_detach, vma, prev, next, start, end, count, - !unlock); - /* Statistics and freeing VMAs */ - mas_set(&mas_detach, 0); - remove_mt(mm, &mas_detach); - validate_mm(mm); - if (unlock) - mmap_read_unlock(mm); + vmi_complete_munmap_vmas(vmi, vma, mm, start, end, unlock, &mas_detach, + locked_vm); - __mt_destroy(&mt_detach); return 0; modify_vma_failed: -- 2.43.0