From: David Woodhouse <dwmw@xxxxxxxxxxxx> This currently calls set_memory_x() from machine_kexec_prepare() just like the 32-bit version does. That's actually a bit earlier than I'd like, as it leaves the page RWX all the time the image is even *loaded*. Subsequent commits will eliminate all the writes to the page between the point it's marked executable in machine_kexec_prepare() the time that relocate_kernel() is running and has switched to the identmap %cr3, so that it can be ROX. But that can't happen until it's moved to the .data section of the kernel, and *that* can't happen until we start executing the copy instead of executing it in place in the kernel .text. So break the circular dependency in those commits by letting it be RWX for now. Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx> --- arch/x86/kernel/machine_kexec_64.c | 28 +++++++++++++++++++++------- arch/x86/kernel/relocate_kernel_64.S | 5 ++++- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 9c9ac606893e..3aeb225a0b36 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -156,8 +156,8 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) pmd_t *pmd; pte_t *pte; - vaddr = (unsigned long)relocate_kernel; - paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE); + vaddr = (unsigned long)page_address(image->control_code_page) + PAGE_SIZE; + paddr = __pa(vaddr); pgd += pgd_index(vaddr); if (!pgd_present(*pgd)) { p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); @@ -296,6 +296,7 @@ static void load_segments(void) int machine_kexec_prepare(struct kimage *image) { + void *control_page = page_address(image->control_code_page) + PAGE_SIZE; unsigned long start_pgtable; int result; @@ -307,11 +308,17 @@ int machine_kexec_prepare(struct kimage *image) if (result) return result; + set_memory_x((unsigned long)control_page, 1); + return 0; } void machine_kexec_cleanup(struct kimage *image) { + void *control_page = page_address(image->control_code_page) + PAGE_SIZE; + + set_memory_nx((unsigned long)control_page, 1); + free_transition_pgtable(image); } @@ -321,6 +328,11 @@ void machine_kexec_cleanup(struct kimage *image) */ void machine_kexec(struct kimage *image) { + unsigned long (*relocate_kernel_ptr)(unsigned long indirection_page, + unsigned long page_list, + unsigned long start_address, + unsigned int preserve_context, + unsigned int host_mem_enc_active); unsigned long page_list[PAGES_NR]; unsigned int host_mem_enc_active; int save_ftrace_enabled; @@ -369,6 +381,8 @@ void machine_kexec(struct kimage *image) page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT); + relocate_kernel_ptr = control_page; + /* * The segment registers are funny things, they have both a * visible and an invisible part. Whenever the visible part is @@ -388,11 +402,11 @@ void machine_kexec(struct kimage *image) native_gdt_invalidate(); /* now call it */ - image->start = relocate_kernel((unsigned long)image->head, - (unsigned long)page_list, - image->start, - image->preserve_context, - host_mem_enc_active); + image->start = relocate_kernel_ptr((unsigned long)image->head, + (unsigned long)page_list, + image->start, + image->preserve_context, + host_mem_enc_active); #ifdef CONFIG_KEXEC_JUMP if (image->preserve_context) diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 92d5dbed3097..70539b1b9545 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -39,6 +39,7 @@ #define CP_PA_TABLE_PAGE DATA(0x20) #define CP_PA_SWAP_PAGE DATA(0x28) #define CP_PA_BACKUP_PAGES_MAP DATA(0x30) +#define CP_VA_CONTROL_PAGE DATA(0x38) .text .align PAGE_SIZE @@ -99,6 +100,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel) movq %r9, CP_PA_TABLE_PAGE(%r11) movq %r10, CP_PA_SWAP_PAGE(%r11) movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11) + movq %r11, CP_VA_CONTROL_PAGE(%r11) /* Save the preserve_context to %r11 as swap_pages clobbers %rcx. */ movq %rcx, %r11 @@ -235,7 +237,8 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) movq %rax, %cr3 lea PAGE_SIZE(%r8), %rsp call swap_pages - movq $virtual_mapped, %rax + movq CP_VA_CONTROL_PAGE(%r8), %rax + addq $(virtual_mapped - relocate_kernel), %rax pushq %rax ANNOTATE_UNRET_SAFE ret -- 2.47.0