We want to stop special casing hugetlb mappings and make them go through generic channels, so teach generic_get_unmapped_area{_topdown} to handle those. The main difference is that we set info.align_mask for huge mappings. Signed-off-by: Oscar Salvador <osalvador@xxxxxxx> --- include/linux/hugetlb.h | 10 ++++++++++ mm/mmap.c | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 2b3c3a404769..1c7b0b32ff7e 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -1029,9 +1029,19 @@ void hugetlb_unregister_node(struct node *node); */ bool is_raw_hwpoison_page_in_hugepage(struct page *page); +static inline unsigned long huge_page_mask_align(struct file *file) +{ + return PAGE_MASK & ~huge_page_mask(hstate_file(file)); +} + #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; +static inline unsigned long huge_page_mask_align(struct file *file) +{ + return 0; +} + static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) { return NULL; diff --git a/mm/mmap.c b/mm/mmap.c index 83b4682ec85c..09131b705e7b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1743,6 +1743,8 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, info.length = len; info.low_limit = mm->mmap_base; info.high_limit = mmap_end; + if (filp && is_file_hugepages(filp)) + info.align_mask = huge_page_mask_align(filp); return vm_unmapped_area(&info); } @@ -1791,6 +1793,8 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); + if (filp && is_file_hugepages(filp)) + info.align_mask = huge_page_mask_align(filp); addr = vm_unmapped_area(&info); /* -- 2.45.2