From: Mike Rapoport <rppt@xxxxxxxxxxxxx> The check for crashkernel command line parameter and presence of CONFIG_ZONE_DMA[32] in mmu::map_mem() are not necessary because crashk_res.end would be set by the time map_mem() runs only if reserve_crashkernel() was called from arm64_memblock_init() and only if there was proper crashkernel parameter in the command line. Leave only check that crashk_res.end is non-zero to decide whether crash kernel memory should be mapped with base pages. Signed-off-by: Mike Rapoport <rppt@xxxxxxxxxxxxx> --- arch/arm64/mm/mmu.c | 44 ++++++++++++-------------------------------- 1 file changed, 12 insertions(+), 32 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 83f2f18f7f34..fa23cfa6b772 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -502,21 +502,6 @@ void __init mark_linear_text_alias_ro(void) PAGE_KERNEL_RO); } -static bool crash_mem_map __initdata; - -static int __init enable_crash_mem_map(char *arg) -{ - /* - * Proper parameter parsing is done by reserve_crashkernel(). We only - * need to know if the linear map has to avoid block mappings so that - * the crashkernel reservations can be unmapped later. - */ - crash_mem_map = true; - - return 0; -} -early_param("crashkernel", enable_crash_mem_map); - static void __init map_mem(pgd_t *pgdp) { static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN); @@ -547,11 +532,9 @@ static void __init map_mem(pgd_t *pgdp) memblock_mark_nomap(kernel_start, kernel_end - kernel_start); #ifdef CONFIG_KEXEC_CORE - if (crash_mem_map && !have_zone_dma()) { - if (crashk_res.end) - memblock_mark_nomap(crashk_res.start, - resource_size(&crashk_res)); - } + if (crashk_res.end) + memblock_mark_nomap(crashk_res.start, + resource_size(&crashk_res)); #endif /* map all the memory banks */ @@ -582,20 +565,17 @@ static void __init map_mem(pgd_t *pgdp) memblock_clear_nomap(kernel_start, kernel_end - kernel_start); /* - * Use page-level mappings here so that we can shrink the region - * in page granularity and put back unused memory to buddy system - * through /sys/kernel/kexec_crash_size interface. + * Use page-level mappings here so that we can protect crash kernel + * memory to allow post-mortem analysis when things go awry. */ #ifdef CONFIG_KEXEC_CORE - if (crash_mem_map && !have_zone_dma()) { - if (crashk_res.end) { - __map_memblock(pgdp, crashk_res.start, - crashk_res.end + 1, - PAGE_KERNEL, - NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); - memblock_clear_nomap(crashk_res.start, - resource_size(&crashk_res)); - } + if (crashk_res.end) { + __map_memblock(pgdp, crashk_res.start, + crashk_res.end + 1, + PAGE_KERNEL, + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + memblock_clear_nomap(crashk_res.start, + resource_size(&crashk_res)); } #endif } -- 2.35.3