The preparation of supporting freeing vmemmap associated with each HugeTLB page is ready, so we can support this feature for arm64. Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> --- arch/arm64/mm/mmu.c | 5 +++++ fs/Kconfig | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 5d37e461c41f..967b01ce468d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -23,6 +23,7 @@ #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/set_memory.h> +#include <linux/hugetlb.h> #include <asm/barrier.h> #include <asm/cputype.h> @@ -1134,6 +1135,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, pmd_t *pmdp; WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); + + if (is_hugetlb_free_vmemmap_enabled() && !altmap) + return vmemmap_populate_basepages(start, end, node, altmap); + do { next = pmd_addr_end(addr, end); diff --git a/fs/Kconfig b/fs/Kconfig index 6ce6fdac00a3..02c2d3bf1cb8 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -242,7 +242,7 @@ config HUGETLB_PAGE config HUGETLB_PAGE_FREE_VMEMMAP def_bool HUGETLB_PAGE - depends on X86_64 + depends on X86_64 || ARM64 depends on SPARSEMEM_VMEMMAP config MEMFD_CREATE -- 2.11.0