The quilt patch titled Subject: mm: pass vma iterator through to __vma_adjust() has been removed from the -mm tree. Its filename was mm-pass-vma-iterator-through-to-__vma_adjust.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> Subject: mm: pass vma iterator through to __vma_adjust() Date: Fri, 20 Jan 2023 11:26:36 -0500 Pass the iterator through to be used in __vma_adjust(). The state of the iterator needs to be correct for the operation that will occur so make the adjustments. Link: https://lkml.kernel.org/r/20230120162650.984577-36-Liam.Howlett@xxxxxxxxxx Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/mmap.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) --- a/mm/mmap.c~mm-pass-vma-iterator-through-to-__vma_adjust +++ a/mm/mmap.c @@ -525,6 +525,10 @@ inline int vma_expand(struct vma_iterato vma_interval_tree_remove(vma, root); } + /* VMA iterator points to previous, so set to start if necessary */ + if (vma_iter_addr(vmi) != start) + vma_iter_set(vmi, start); + vma->vm_start = start; vma->vm_end = end; vma->vm_pgoff = pgoff; @@ -2164,13 +2168,13 @@ static void unmap_region(struct mm_struc /* * __split_vma() bypasses sysctl_max_map_count checking. We use this where it * has already been checked or doesn't make sense to fail. + * VMA Iterator will point to the end VMA. */ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vm_area_struct *new; int err; - unsigned long end = vma->vm_end; validate_mm_mt(vma->vm_mm); @@ -2206,14 +2210,17 @@ int __split_vma(struct vma_iterator *vmi new->vm_ops->open(new); if (new_below) - err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + - ((addr - new->vm_start) >> PAGE_SHIFT), new); + err = __vma_adjust(vmi, vma, addr, vma->vm_end, + vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT), + new, NULL); else - err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + err = __vma_adjust(vmi, vma, vma->vm_start, addr, vma->vm_pgoff, + new, NULL); /* Success. */ if (!err) { - vma_iter_set(vmi, end); + if (new_below) + vma_next(vmi); return 0; } @@ -2308,8 +2315,7 @@ do_vmi_align_munmap(struct vma_iterator if (error) goto start_split_failed; - vma_iter_set(vmi, start); - vma = vma_find(vmi, end); + vma = vma_iter_load(vmi); } prev = vma_prev(vmi); @@ -2329,7 +2335,6 @@ do_vmi_align_munmap(struct vma_iterator if (error) goto end_split_failed; - vma_iter_set(vmi, end); split = vma_prev(vmi); error = munmap_sidetree(split, &mas_detach); if (error) @@ -2573,6 +2578,7 @@ cannot_expand: goto unacct_error; } + vma_iter_set(&vmi, addr); vma->vm_start = addr; vma->vm_end = end; vma->vm_flags = vm_flags; _ Patches currently in -mm which might be from Liam.Howlett@xxxxxxxxxx are