Export new interface and use it in place of old interface. Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> --- include/linux/mm.h | 4 ++-- mm/mmap.c | 16 ++++------------ mm/mremap.c | 7 ++++--- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index dd8abaa433f9..cbc79a9fa911 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2604,8 +2604,8 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr, extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf); -extern int __do_munmap(struct mm_struct *, unsigned long, size_t, - struct list_head *uf, bool downgrade); +extern int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm, + unsigned long start, size_t len, struct list_head *uf, bool downgrade); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); diff --git a/mm/mmap.c b/mm/mmap.c index 3e67fb5eac31..cf4aa715eb63 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2574,13 +2574,6 @@ int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm, return do_mas_align_munmap(mas, vma, mm, start, end, uf, downgrade); } -int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, - struct list_head *uf, bool downgrade) -{ - MA_STATE(mas, &mm->mm_mt, start, start); - return do_mas_munmap(&mas, mm, start, len, uf, downgrade); -} - /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. * @mm: The mm_struct * @start: The start address to munmap @@ -2590,7 +2583,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) { - return __do_munmap(mm, start, len, uf, false); + MA_STATE(mas, &mm->mm_mt, start, start); + return do_mas_munmap(&mas, mm, start, len, uf, false); } unsigned long mmap_region(struct file *file, unsigned long addr, @@ -2834,11 +2828,12 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade) int ret; struct mm_struct *mm = current->mm; LIST_HEAD(uf); + MA_STATE(mas, &mm->mm_mt, start, start); if (mmap_write_lock_killable(mm)) return -EINTR; - ret = __do_munmap(mm, start, len, &uf, downgrade); + ret = do_mas_munmap(&mas, mm, start, len, &uf, downgrade); /* * Returning 1 indicates mmap_lock is downgraded. * But 1 is not legal return value of vm_munmap() and munmap(), reset @@ -2975,9 +2970,6 @@ static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, arch_unmap(mm, newbrk, oldbrk); if (likely(vma->vm_start >= newbrk)) { // remove entire mapping(s) - mas_set(mas, newbrk); - if (vma->vm_start != newbrk) - mas_reset(mas); // cause a re-walk for the first overlap. ret = do_mas_munmap(mas, mm, newbrk, oldbrk-newbrk, uf, true); goto munmap_full_vma; } diff --git a/mm/mremap.c b/mm/mremap.c index 04143755cd1e..d2dba8188be5 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -881,14 +881,15 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. - * __do_munmap does all the needed commit accounting, and + * do_mas_munmap does all the needed commit accounting, and * downgrades mmap_lock to read if so directed. */ if (old_len >= new_len) { int retval; + MA_STATE(mas, &mm->mm_mt, addr + new_len, addr + new_len); - retval = __do_munmap(mm, addr+new_len, old_len - new_len, - &uf_unmap, true); + retval = do_mas_munmap(&mas, mm, addr + new_len, + old_len - new_len, &uf_unmap, true); if (retval < 0 && old_len != new_len) { ret = retval; goto out; -- 2.30.2