Unmapping vmas, which have VM_HUGETLB | VM_PFNMAP flag set or have uprobes set, need get done with write mmap_sem held since they may update vm_flags. So, it might be not safe enough to deal with these kind of special mappings with read mmap_sem. Deal with such mappings with regular do_munmap() call. Michal suggested to make this as a separate patch for safer and more bisectable sake. Cc: Michal Hocko <mhocko@xxxxxxxxxx> Signed-off-by: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx> --- mm/mmap.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/mm/mmap.c b/mm/mmap.c index 2234d5a..06cb83c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2766,6 +2766,16 @@ static inline void munlock_vmas(struct vm_area_struct *vma, } } +static inline bool can_zap_with_rlock(struct vm_area_struct *vma) +{ + if ((vma->vm_file && + vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) || + (vma->vm_flags | (VM_HUGETLB | VM_PFNMAP))) + return false; + + return true; +} + /* * Zap pages with read mmap_sem held * @@ -2808,6 +2818,17 @@ static int do_munmap_zap_rlock(struct mm_struct *mm, unsigned long start, goto out; } + /* + * Unmapping vmas, which have VM_HUGETLB | VM_PFNMAP flag set or + * have uprobes set, need get done with write mmap_sem held since + * they may update vm_flags. Deal with such mappings with regular + * do_munmap() call. + */ + for (vma = start_vma; vma && vma->vm_start < end; vma = vma->vm_next) { + if (!can_zap_with_rlock(vma)) + goto regular_path; + } + /* Handle mlocked vmas */ if (mm->locked_vm) { vma = start_vma; @@ -2828,6 +2849,9 @@ static int do_munmap_zap_rlock(struct mm_struct *mm, unsigned long start, return 0; +regular_path: + ret = do_munmap(mm, start, len, uf); + out: up_write(&mm->mmap_sem); return ret; -- 1.8.3.1