From: Chen Zhou <chenzhou10@xxxxxxxxxx> Introduce macro CRASH_ALIGN for alignment, macro CRASH_ADDR_LOW_MAX for upper bound of low crash memory, macro CRASH_ADDR_HIGH_MAX for upper bound of high crash memory, use macros instead. Besides, keep consistent with x86, use CRASH_ALIGN as the lower bound of crash kernel reservation. Signed-off-by: Chen Zhou <chenzhou10@xxxxxxxxxx> Tested-by: John Donnelly <John.p.donnelly@xxxxxxxxxx> --- arch/arm64/include/asm/kexec.h | 6 ++++++ arch/arm64/mm/init.c | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 00dbcc71aeb2918..b51ceb143cbbdb0 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -25,6 +25,12 @@ #define KEXEC_ARCH KEXEC_ARCH_AARCH64 +/* 2M alignment for crash kernel regions */ +#define CRASH_ALIGN SZ_2M + +#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit +#define CRASH_ADDR_HIGH_MAX MEMBLOCK_ALLOC_ACCESSIBLE + #ifndef __ASSEMBLY__ /** diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 37a81754d9b61f7..2c94ae13b160834 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -75,7 +75,7 @@ phys_addr_t arm64_dma_phys_limit __ro_after_init; static void __init reserve_crashkernel(void) { unsigned long long crash_base, crash_size; - unsigned long long crash_max = arm64_dma_phys_limit; + unsigned long long crash_max = CRASH_ADDR_LOW_MAX; int ret; ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), @@ -91,7 +91,7 @@ static void __init reserve_crashkernel(void) crash_max = crash_base + crash_size; /* Current arm64 boot protocol requires 2MB alignment */ - crash_base = memblock_phys_alloc_range(crash_size, SZ_2M, + crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, crash_base, crash_max); if (!crash_base) { pr_warn("cannot allocate crashkernel (size:0x%llx)\n", -- 2.25.1