The quilt patch titled Subject: mm: move 'mmap_min_addr' logic from callers into vm_unmapped_area() has been removed from the -mm tree. Its filename was mm-move-mmap_min_addr-logic-from-callers-into-vm_unmapped_area.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Subject: mm: move 'mmap_min_addr' logic from callers into vm_unmapped_area() Date: Tue, 18 Apr 2023 17:40:09 -0400 Instead of having callers care about the mmap_min_addr logic for the lowest valid mapping address (and some of them getting it wrong), just move the logic into vm_unmapped_area() itself. One less thing for various architecture cases (and generic helpers) to worry about. We should really try to make much more of this be common code, but baby steps.. Without this, vm_unmapped_area() could return an address below mmap_min_addr (because some caller forgot about that). That then causes the mmap machinery to think it has found a workable address, but then later security_mmap_addr(addr) is unhappy about it and the mmap() returns with a nonsensical error (EPERM). The proper action is to either return ENOMEM (if the virtual address space is exhausted), or try to find another address (ie do a bottom-up search for free addresses after the top-down one failed). See commit 2afc745f3e30 ("mm: ensure get_unmapped_area() returns higher address than mmap_min_addr"), which fixed this for one call site (the generic arch_get_unmapped_area_topdown() fallback) but left other cases alone. Link: https://lkml.kernel.org/r/20230418214009.1142926-1-Liam.Howlett@xxxxxxxxxx Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> Cc: Russell King <linux@xxxxxxxxxxxxxxx> Cc: Liam Howlett <liam.howlett@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/s390/mm/hugetlbpage.c | 2 +- arch/s390/mm/mmap.c | 2 +- fs/hugetlbfs/inode.c | 2 +- mm/mmap.c | 19 +++++++++++++------ 4 files changed, 16 insertions(+), 9 deletions(-) --- a/arch/s390/mm/hugetlbpage.c~mm-move-mmap_min_addr-logic-from-callers-into-vm_unmapped_area +++ a/arch/s390/mm/hugetlbpage.c @@ -273,7 +273,7 @@ static unsigned long hugetlb_get_unmappe info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; - info.low_limit = max(PAGE_SIZE, mmap_min_addr); + info.low_limit = PAGE_SIZE; info.high_limit = current->mm->mmap_base; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; --- a/arch/s390/mm/mmap.c~mm-move-mmap_min_addr-logic-from-callers-into-vm_unmapped_area +++ a/arch/s390/mm/mmap.c @@ -136,7 +136,7 @@ unsigned long arch_get_unmapped_area_top info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; - info.low_limit = max(PAGE_SIZE, mmap_min_addr); + info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; if (filp || (flags & MAP_SHARED)) info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; --- a/fs/hugetlbfs/inode.c~mm-move-mmap_min_addr-logic-from-callers-into-vm_unmapped_area +++ a/fs/hugetlbfs/inode.c @@ -208,7 +208,7 @@ hugetlb_get_unmapped_area_topdown(struct info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; - info.low_limit = max(PAGE_SIZE, mmap_min_addr); + info.low_limit = PAGE_SIZE; info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; --- a/mm/mmap.c~mm-move-mmap_min_addr-logic-from-callers-into-vm_unmapped_area +++ a/mm/mmap.c @@ -1548,7 +1548,8 @@ static inline int accountable_mapping(st */ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) { - unsigned long length, gap, low_limit; + unsigned long length, gap; + unsigned long low_limit, high_limit; struct vm_area_struct *tmp; MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); @@ -1559,8 +1560,11 @@ static unsigned long unmapped_area(struc return -ENOMEM; low_limit = info->low_limit; + if (low_limit < mmap_min_addr) + low_limit = mmap_min_addr; + high_limit = info->high_limit; retry: - if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length)) + if (mas_empty_area(&mas, low_limit, high_limit - 1, length)) return -ENOMEM; gap = mas.index; @@ -1596,7 +1600,8 @@ retry: */ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { - unsigned long length, gap, high_limit, gap_end; + unsigned long length, gap, gap_end; + unsigned long low_limit, high_limit; struct vm_area_struct *tmp; MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); @@ -1605,10 +1610,12 @@ static unsigned long unmapped_area_topdo if (length < info->length) return -ENOMEM; + low_limit = info->low_limit; + if (low_limit < mmap_min_addr) + low_limit = mmap_min_addr; high_limit = info->high_limit; retry: - if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1, - length)) + if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length)) return -ENOMEM; gap = mas.last + 1 - info->length; @@ -1743,7 +1750,7 @@ generic_get_unmapped_area_topdown(struct info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; - info.low_limit = max(PAGE_SIZE, mmap_min_addr); + info.low_limit = PAGE_SIZE; info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; info.align_offset = 0; _ Patches currently in -mm which might be from torvalds@xxxxxxxxxxxxxxxxxxxx are