We want to stop special casing hugetlb mappings and make them go through generic channels, so teach generic_get_unmapped_area{_topdown} to handle those. The main difference is that we set info.align_mask for huge mappings. Signed-off-by: Oscar Salvador <osalvador@xxxxxxx> --- include/linux/hugetlb.h | 10 ++++++++++ mm/mmap.c | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index c9bf68c239a0..0ec14e5e0890 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -1007,9 +1007,19 @@ void hugetlb_unregister_node(struct node *node); */ bool is_raw_hwpoison_page_in_hugepage(struct page *page); +static inline unsigned long huge_page_mask_align(struct file *file) +{ + return PAGE_MASK & ~huge_page_mask(hstate_file(file)); +} + #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; +static inline unsigned long huge_page_mask_align(struct file *file) +{ + return 0; +} + static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) { return NULL; diff --git a/mm/mmap.c b/mm/mmap.c index b74d2967cfc0..7b623811d82a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -732,6 +732,8 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, info.length = len; info.low_limit = mm->mmap_base; info.high_limit = mmap_end; + if (filp && is_file_hugepages(filp)) + info.align_mask = huge_page_mask_align(filp); return vm_unmapped_area(&info); } @@ -780,6 +782,8 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); + if (filp && is_file_hugepages(filp)) + info.align_mask = huge_page_mask_align(filp); addr = vm_unmapped_area(&info); /* -- 2.45.2