Currently 'hugetlb_cma=' command line argument does not create CMA area on ARM64_16K_PAGES and ARM64_64K_PAGES based platforms. Instead, it just ends up with the following warning message. Reason being, hugetlb_cma_reserve() never gets called for these huge page sizes. [ 64.255669] hugetlb_cma: the option isn't supported by current arch This enables CMA areas reservation on ARM64_16K_PAGES and ARM64_64K_PAGES configs by defining an unified arm64_hugetlb_cma_reseve() that is wrapped in CONFIG_CMA. Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Barry Song <song.bao.hua@xxxxxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx> --- Applies on 5.8-rc2. arch/arm64/include/asm/hugetlb.h | 8 ++++++++ arch/arm64/mm/hugetlbpage.c | 38 ++++++++++++++++++++++++++++++++++++++ arch/arm64/mm/init.c | 4 +--- 3 files changed, 47 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index 94ba0c5..8eea0e0 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -17,6 +17,14 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h); #endif +#ifdef CONFIG_CMA +void arm64_hugetlb_cma_reserve(void); +#else +static inline void arm64_hugetlb_cma_reserve(void) +{ +} +#endif + static inline void arch_clear_hugepage_flags(struct page *page) { clear_bit(PG_dcache_clean, &page->flags); diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 0a52ce4..ea7fb48 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -19,6 +19,44 @@ #include <asm/tlbflush.h> #include <asm/pgalloc.h> +/* + * HugeTLB Support Matrix + * + * --------------------------------------------------- + * | Page Size | CONT PTE | PMD | CONT PMD | PUD | + * --------------------------------------------------- + * | 4K | 64K | 2M | 32M | 1G | + * | 16K | 2M | 32M | 1G | | + * | 64K | 2M | 512M | 16G | | + * --------------------------------------------------- + */ + +/* + * Reserve CMA areas for the largest supported gigantic + * huge page when requested. Any other smaller gigantic + * huge pages could still be served from those areas. + */ +#ifdef CONFIG_CMA +void __init arm64_hugetlb_cma_reserve(void) +{ + int order; + +#ifdef CONFIG_ARM64_4K_PAGES + order = PUD_SHIFT - PAGE_SHIFT; +#else + order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT; +#endif + /* + * HugeTLB CMA reservation is required for gigantic + * huge pages which could not be allocated via the + * page allocator. Just warn if there is any change + * breaking this assumption. + */ + WARN_ON(order <= MAX_ORDER); + hugetlb_cma_reserve(order); +} +#endif /* CONFIG_CMA */ + #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION bool arch_hugetlb_migration_supported(struct hstate *h) { diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e93cfc..fabf8b0 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -425,9 +425,7 @@ void __init bootmem_init(void) * initialize node_online_map that gets used in hugetlb_cma_reserve() * while allocating required CMA size across online nodes. */ -#ifdef CONFIG_ARM64_4K_PAGES - hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); -#endif + arm64_hugetlb_cma_reserve(); /* * Sparsemem tries to allocate bootmem in memory_present(), so must be -- 2.7.4