From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> Only write to the maple tree if we are not inserting or the insert isn't going to overwrite the area to clear. This avoids spanning writes and node coealescing when unnecessary. The change requires a custom search for the linked list addition to find the correct VMA for the prev link. Link: https://lkml.kernel.org/r/20220504011345.662299-3-Liam.Howlett@xxxxxxxxxx Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: David Howells <dhowells@xxxxxxxxxx> Cc: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Cc: SeongJae Park <sj@xxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Cc: Davidlohr Bueso <dave@xxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/mmap.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index da480836f9de..7d6b5bdbdd1d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -613,11 +613,11 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma, * mm's list and the mm tree. It has already been inserted into the interval tree. */ static void __insert_vm_struct(struct mm_struct *mm, struct ma_state *mas, - struct vm_area_struct *vma) + struct vm_area_struct *vma, unsigned long location) { struct vm_area_struct *prev; - mas_set(mas, vma->vm_start); + mas_set(mas, location); prev = mas_prev(mas, 0); vma_mas_store(vma, mas); __vma_link_list(mm, vma, prev); @@ -647,6 +647,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, int remove_next = 0; MA_STATE(mas, &mm->mm_mt, 0, 0); struct vm_area_struct *exporter = NULL, *importer = NULL; + unsigned long ll_prev = vma->vm_start; /* linked list prev. */ if (next && !insert) { if (end >= next->vm_end) { @@ -778,15 +779,27 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, } if (start != vma->vm_start) { - if (vma->vm_start < start) + if ((vma->vm_start < start) && + (!insert || (insert->vm_end != start))) { vma_mas_szero(&mas, vma->vm_start, start); - vma_changed = true; + VM_WARN_ON(insert && insert->vm_start > vma->vm_start); + } else { + vma_changed = true; + } vma->vm_start = start; } if (end != vma->vm_end) { - if (vma->vm_end > end) - vma_mas_szero(&mas, end, vma->vm_end); - vma_changed = true; + if (vma->vm_end > end) { + if (!insert || (insert->vm_start != end)) { + vma_mas_szero(&mas, end, vma->vm_end); + VM_WARN_ON(insert && + insert->vm_end < vma->vm_end); + } else if (insert->vm_start == end) { + ll_prev = vma->vm_end; + } + } else { + vma_changed = true; + } vma->vm_end = end; if (!next) mm->highest_vm_end = vm_end_gap(vma); @@ -821,7 +834,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, * us to insert it before dropping the locks * (it may either follow vma or precede it). */ - __insert_vm_struct(mm, &mas, insert); + __insert_vm_struct(mm, &mas, insert, ll_prev); } if (anon_vma) { @@ -908,6 +921,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, if (insert && file) uprobe_mmap(insert); + mas_destroy(&mas); validate_mm(mm); return 0; } -- 2.35.1