Move CRASH_ALIGN to header asm/kexec.h and replace the hard-coded alignment with macro CRASH_ALIGN in function reserve_crashkernel(). Suggested-by: Dave Young <dyoung@xxxxxxxxxx> Signed-off-by: Chen Zhou <chenzhou10@xxxxxxxxxx> Tested-by: John Donnelly <John.p.donnelly@xxxxxxxxxx> --- arch/x86/include/asm/kexec.h | 3 +++ arch/x86/kernel/setup.c | 5 +---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 6802c59e8252..8cf9d3fd31c7 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -18,6 +18,9 @@ # define KEXEC_CONTROL_CODE_MAX_SIZE 2048 +/* 2M alignment for crash kernel regions */ +#define CRASH_ALIGN SZ_16M + #ifndef __ASSEMBLY__ #include <linux/string.h> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 84f581c91db4..bf373422dc8a 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -395,9 +395,6 @@ static void __init memblock_x86_reserve_range_setup_data(void) #ifdef CONFIG_KEXEC_CORE -/* 16M alignment for crash kernel regions */ -#define CRASH_ALIGN SZ_16M - /* * Keep the crash kernel below this limit. * @@ -515,7 +512,7 @@ static void __init reserve_crashkernel(void) } else { unsigned long long start; - start = memblock_phys_alloc_range(crash_size, SZ_1M, crash_base, + start = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, crash_base, crash_base + crash_size); if (start != crash_base) { pr_info("crashkernel reservation failed - memory is in use.\n"); -- 2.20.1