Currently 'hugetlb_cma=' command line argument does not create CMA area on ARM64_16K_PAGES and ARM64_64K_PAGES based platforms. Instead, it just ends up with the following warning message. Reason being, hugetlb_cma_reserve() never gets called for these huge page sizes. [ 64.255669] hugetlb_cma: the option isn't supported by current arch This enables CMA areas reservation on ARM64_16K_PAGES and ARM64_64K_PAGES configs by defining an unified arm64_hugetlb_cma_reseve() that is wrapped in CONFIG_CMA. Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Barry Song <song.bao.hua@xxxxxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx> --- Applies on 5.8-rc3. Changes in V2: - Moved arm64_hugetlb_cma_reserve() stub and declaration near call site Changes in V1: (https://patchwork.kernel.org/patch/11619839/) arch/arm64/mm/hugetlbpage.c | 38 +++++++++++++++++++++++++++++++++++++ arch/arm64/mm/init.c | 12 +++++++++--- 2 files changed, 47 insertions(+), 3 deletions(-) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 0a52ce46f020..ea7fb48b8617 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -19,6 +19,44 @@ #include <asm/tlbflush.h> #include <asm/pgalloc.h> +/* + * HugeTLB Support Matrix + * + * --------------------------------------------------- + * | Page Size | CONT PTE | PMD | CONT PMD | PUD | + * --------------------------------------------------- + * | 4K | 64K | 2M | 32M | 1G | + * | 16K | 2M | 32M | 1G | | + * | 64K | 2M | 512M | 16G | | + * --------------------------------------------------- + */ + +/* + * Reserve CMA areas for the largest supported gigantic + * huge page when requested. Any other smaller gigantic + * huge pages could still be served from those areas. + */ +#ifdef CONFIG_CMA +void __init arm64_hugetlb_cma_reserve(void) +{ + int order; + +#ifdef CONFIG_ARM64_4K_PAGES + order = PUD_SHIFT - PAGE_SHIFT; +#else + order = CONT_PMD_SHIFT + PMD_SHIFT - PAGE_SHIFT; +#endif + /* + * HugeTLB CMA reservation is required for gigantic + * huge pages which could not be allocated via the + * page allocator. Just warn if there is any change + * breaking this assumption. + */ + WARN_ON(order <= MAX_ORDER); + hugetlb_cma_reserve(order); +} +#endif /* CONFIG_CMA */ + #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION bool arch_hugetlb_migration_supported(struct hstate *h) { diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1e93cfc7c47a..8a260ef0cb94 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -406,6 +406,14 @@ void __init arm64_memblock_init(void) dma_contiguous_reserve(arm64_dma32_phys_limit); } +#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) +void arm64_hugetlb_cma_reserve(void); +#else +static inline void arm64_hugetlb_cma_reserve(void) +{ +} +#endif + void __init bootmem_init(void) { unsigned long min, max; @@ -425,9 +433,7 @@ void __init bootmem_init(void) * initialize node_online_map that gets used in hugetlb_cma_reserve() * while allocating required CMA size across online nodes. */ -#ifdef CONFIG_ARM64_4K_PAGES - hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); -#endif + arm64_hugetlb_cma_reserve(); /* * Sparsemem tries to allocate bootmem in memory_present(), so must be -- 2.20.1