From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> Splitting can be more efficient when the order is not of concern. Change do_vmi_align_munmap() to reduce walking of the tree during split operations. move_vma() must also be altered to remove the dependency of keeping the original VMA as the active part of the split. Transition to using vma iterator to look up the prev and/or next vma after munmap. Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> --- mm/mmap.c | 18 ++---------------- mm/mremap.c | 27 ++++++++++++++++----------- 2 files changed, 18 insertions(+), 27 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index af611f805776..e0e70b9bc391 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2388,21 +2388,9 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, for_each_vma_range(*vmi, next, end) { /* Does it split the end? */ if (next->vm_end > end) { - struct vm_area_struct *split; - - error = __split_vma(vmi, next, end, 1); + error = __split_vma(vmi, next, end, 0); if (error) goto end_split_failed; - - split = vma_prev(vmi); - error = munmap_sidetree(split, &mas_detach); - if (error) - goto munmap_sidetree_failed; - - count++; - if (vma == next) - vma = split; - break; } error = munmap_sidetree(next, &mas_detach); if (error) @@ -2415,9 +2403,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, #endif } - if (!next) - next = vma_next(vmi); - + next = vma_next(vmi); if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas diff --git a/mm/mremap.c b/mm/mremap.c index cbafea7fe895..00b39f0121bc 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -580,11 +580,12 @@ static unsigned long move_vma(struct vm_area_struct *vma, unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; - unsigned long excess = 0; + unsigned long account_start = 0; + unsigned long account_end = 0; unsigned long hiwater_vm; - int split = 0; int err = 0; bool need_rmap_locks; + VMA_ITERATOR(vmi, mm, old_addr); /* * We'd prefer to avoid failure later on in do_munmap: @@ -662,10 +663,10 @@ static unsigned long move_vma(struct vm_area_struct *vma, /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { vma->vm_flags &= ~VM_ACCOUNT; - excess = vma->vm_end - vma->vm_start - old_len; - if (old_addr > vma->vm_start && - old_addr + old_len < vma->vm_end) - split = 1; + if (vma->vm_start < old_addr) + account_start = vma->vm_start; + if (vma->vm_end > old_addr + old_len) + account_end = vma->vm_end; } /* @@ -700,11 +701,11 @@ static unsigned long move_vma(struct vm_area_struct *vma, return new_addr; } - if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { + if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) { /* OOM: unable to split vma, just get accounts right */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) vm_acct_memory(old_len >> PAGE_SHIFT); - excess = 0; + account_start = account_end = 0; } if (vm_flags & VM_LOCKED) { @@ -715,10 +716,14 @@ static unsigned long move_vma(struct vm_area_struct *vma, mm->hiwater_vm = hiwater_vm; /* Restore VM_ACCOUNT if one or two pieces of vma left */ - if (excess) { + if (account_start) { + vma = vma_prev(&vmi); + vma->vm_flags |= VM_ACCOUNT; + } + + if (account_end) { + vma = vma_next(&vmi); vma->vm_flags |= VM_ACCOUNT; - if (split) - find_vma(mm, vma->vm_end)->vm_flags |= VM_ACCOUNT; } return new_addr; -- 2.35.1