On Wed, May 04, 2022 at 01:14:07AM +0000, Liam Howlett wrote: ... > @@ -2488,13 +2380,20 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, > struct mm_struct *mm, unsigned long start, > unsigned long end, struct list_head *uf, bool downgrade) > { > - struct vm_area_struct *prev, *last; > + struct vm_area_struct *prev, *next = NULL; > + struct maple_tree mt_detach; > + int count = 0; > int error = -ENOMEM; > - /* we have start < vma->vm_end */ > + MA_STATE(mas_detach, &mt_detach, start, end - 1); > + mt_init_flags(&mt_detach, MM_MT_FLAGS); > + mt_set_external_lock(&mt_detach, &mm->mmap_lock); > > if (mas_preallocate(mas, vma, GFP_KERNEL)) > return -ENOMEM; > > + if (mas_preallocate(&mas_detach, vma, GFP_KERNEL)) This guy was reported as leaks as well. unreferenced object 0xffff0802d49b5500 (size 256): comm "trinity-c22", pid 107245, jiffies 4295674711 (age 816.980s) hex dump (first 32 bytes): 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: kmem_cache_alloc mas_alloc_nodes mas_preallocate do_mas_align_munmap.constprop.0 do_mas_align_munmap at mm/mmap.c:2384 do_mas_munmap __vm_munmap __arm64_sys_munmap invoke_syscall el0_svc_common.constprop.0 do_el0_svc el0_svc el0t_64_sync_handler el0t_64_sync > + return -ENOMEM; > + > mas->last = end - 1; > /* > * If we need to split any vma, do it now to save pain later.