From: David Woodhouse <dwmw@xxxxxxxxxxxx> The swap_pages function expects the swap page to be in %r10, but there was no documentation to that effect. Once upon a time the setup code used to load its value from a kernel virtual address and save it to an address which is accessible in the identity-mapped page tables, and *happened* to use %r10 to do so, with no comment that it was left there on *purpose* instead of just being a scratch register. Once that was no longer necessary, %r10 just holds whatever the kernel happened to leave in it. Now that the original value passed by the kernel is accessible via %rip-relative addressing, load directly from there instead of using %r10 for it. But document the other parameters that the swap_pages function *does* expect in registers. Fixes: b3adabae8a96 ("x86/kexec: Drop page_list argument from relocate_kernel()") Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx> --- arch/x86/kernel/relocate_kernel_64.S | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 1a52e4339c1d..0d6fce1e0a32 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -264,6 +264,10 @@ SYM_CODE_END(virtual_mapped) /* Do the copies */ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) UNWIND_HINT_END_OF_STACK + /* + * %rdi indirection page + * %r11 preserve_context + */ movq %rdi, %rcx /* Put the indirection_page in %rcx */ xorl %edi, %edi xorl %esi, %esi @@ -302,7 +306,7 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) jz .Lnoswap /* copy source page to swap page */ - movq %r10, %rdi + movq kexec_pa_swap_page(%rip), %rdi movl $512, %ecx rep ; movsq @@ -314,7 +318,7 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) /* copy swap page to destination page */ movq %rdx, %rdi - movq %r10, %rsi + movq kexec_pa_swap_page(%rip), %rsi .Lnoswap: movl $512, %ecx rep ; movsq -- 2.47.0