Soon, relocation function will share the same page with EL2 vectors. Add offset within this page to arm64_relocate_new_kernel, and also the total size of relocation code which will include both the function and the EL2 vectors. Signed-off-by: Pavel Tatashin <pasha.tatashin@xxxxxxxxxx> --- arch/arm64/include/asm/kexec.h | 7 +++++++ arch/arm64/kernel/machine_kexec.c | 13 ++++--------- arch/arm64/kernel/relocate_kernel.S | 13 ++++++++----- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 189dce24f4cb..8cad34e7a9d9 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -90,6 +90,13 @@ static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif +#if defined(CONFIG_KEXEC_CORE) +/* The beginning and size of relcation code to stage 2 kernel */ +extern const unsigned long kexec_relocate_code_size; +extern const unsigned char kexec_relocate_code_start[]; +extern const unsigned long kexec_kern_reloc_offset; +#endif + /* * kern_reloc_arg is passed to kernel relocation function as an argument. * head kimage->head, allows to traverse through relocation segments. diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 5f1211f3aeef..5e7b1f6569c4 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -23,10 +23,6 @@ #include "cpu-reset.h" -/* Global variables for the arm64_relocate_new_kernel routine. */ -extern const unsigned char arm64_relocate_new_kernel[]; -extern const unsigned long arm64_relocate_new_kernel_size; - /** * kexec_image_info - For debugging output. */ @@ -82,9 +78,8 @@ int machine_kexec_post_load(struct kimage *kimage) if (!kern_reloc_arg) return -ENOMEM; - memcpy(reloc_code, arm64_relocate_new_kernel, - arm64_relocate_new_kernel_size); - kimage->arch.kern_reloc = __pa(reloc_code); + memcpy(reloc_code, kexec_relocate_code_start, kexec_relocate_code_size); + kimage->arch.kern_reloc = __pa(reloc_code) + kexec_kern_reloc_offset; kimage->arch.kern_reloc_arg = __pa(kern_reloc_arg); kern_reloc_arg->head = kimage->head; kern_reloc_arg->entry_addr = kimage->start; @@ -190,7 +185,7 @@ void machine_kexec(struct kimage *kimage) "Some CPUs may be stale, kdump will be unreliable.\n"); /* Flush the reboot_code_buffer in preparation for its execution. */ - __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size); + __flush_dcache_area(reboot_code_buffer, kexec_relocate_code_size); /* * Although we've killed off the secondary CPUs, we don't update @@ -199,7 +194,7 @@ void machine_kexec(struct kimage *kimage) * the offline CPUs. Therefore, we must use the __* variant here. */ __flush_icache_range((uintptr_t)reboot_code_buffer, - arm64_relocate_new_kernel_size); + kexec_relocate_code_size); /* Flush the kimage list and its buffers. */ kexec_list_flush(kimage); diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index 22ccdcb106d3..3c05220a79ab 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -14,6 +14,8 @@ #include <asm/page.h> #include <asm/sysreg.h> +GLOBAL(kexec_relocate_code_start) + /* * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. * @@ -86,13 +88,14 @@ ENTRY(arm64_relocate_new_kernel) .ltorg END(arm64_relocate_new_kernel) -.Lcopy_end: +.Lkexec_relocate_code_end: .org KEXEC_CONTROL_PAGE_SIZE .align 3 /* To keep the 64-bit values below naturally aligned. */ /* - * arm64_relocate_new_kernel_size - Number of bytes to copy to the + * kexec_relocate_code_size - Number of bytes to copy to the * control_code_page. */ -.globl arm64_relocate_new_kernel_size -arm64_relocate_new_kernel_size: - .quad .Lcopy_end - arm64_relocate_new_kernel +GLOBAL(kexec_relocate_code_size) + .quad .Lkexec_relocate_code_end - kexec_relocate_code_start +GLOBAL(kexec_kern_reloc_offset) + .quad arm64_relocate_new_kernel - kexec_relocate_code_start -- 2.24.0