Get the new relocation base address inside adjust_got, and save it so that the caller doesn't have to figure it out. Signed-off-by: Arvind Sankar <nivedita@xxxxxxxxxxxx> --- arch/x86/boot/compressed/head_64.S | 58 +++++++++++++----------------- 1 file changed, 24 insertions(+), 34 deletions(-) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 1f1f6c8139b3..1464d8d0ec66 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -312,20 +312,7 @@ SYM_CODE_START(startup_64) /* * paging_prepare() and cleanup_trampoline() below can have GOT * references. Adjust the table with address we are running at. - * - * Zero RAX for adjust_got: the GOT was not adjusted before; - * there's no adjustment to undo. */ - xorq %rax, %rax - - /* - * Calculate the address the binary is loaded at and use it as - * a GOT adjustment. - */ - call 1f -1: popq %rdi - subq $1b, %rdi - call .Ladjust_got /* @@ -412,21 +399,6 @@ trampoline_return: pushq $0 popfq - /* - * Previously we've adjusted the GOT with address the binary was - * loaded at. Now we need to re-adjust for relocation address. - * - * Calculate the address the binary is loaded at, so that we can - * undo the previous GOT adjustment. - */ - call 1f -1: popq %rax - subq $1b, %rax - - /* The new adjustment is the relocation address */ - movq %rbx, %rdi - call .Ladjust_got - /* * Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. @@ -475,6 +447,12 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) shrq $3, %rcx rep stosq +/* + * Previously we've adjusted the GOT with address the binary was + * loaded at. Now we need to re-adjust for relocation address. + */ + call .Ladjust_got + /* * Do the extraction, and jump to the new kernel.. */ @@ -497,23 +475,33 @@ SYM_FUNC_END(.Lrelocated) /* * Adjust the global offset table * - * RAX is the previous adjustment of the table to undo (use 0 if it's the - * first time we touch GOT). - * RDI is the new adjustment to apply. + * The relocation base address calculation uses RIP-relative addressing, so if + * the kernel is being relocated to a new address, this function must be called + * after execution has been passed to the new location. We keep track of the + * relocation address so that it can be backed out if this function is called + * repeatedly. */ -.Ladjust_got: + +SYM_FUNC_START_LOCAL(.Ladjust_got) + /* Get the new relocation base address */ + leaq startup_32(%rip), %rax + /* Backout the previous relocation address if any */ + subq got_relocation_base(%rip), %rax + /* Store the relocation base address for future reference */ + addq %rax, got_relocation_base(%rip) + /* Walk through the GOT adding the address to the entries */ leaq _got(%rip), %rdx leaq _egot(%rip), %rcx 1: cmpq %rcx, %rdx jae 2f - subq %rax, (%rdx) /* Undo previous adjustment */ - addq %rdi, (%rdx) /* Apply the new adjustment */ + addq %rax, (%rdx) /* Apply the incremental adjustment */ addq $8, %rdx jmp 1b 2: ret +SYM_FUNC_END(.Ladjust_got) .code32 /* @@ -628,6 +616,8 @@ SYM_DATA_START_LOCAL(gdt) .quad 0x0000000000000000 /* TS continued */ SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) +SYM_DATA_LOCAL(got_relocation_base, .quad 0) + #ifdef CONFIG_EFI_MIXED SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0) SYM_DATA(efi_is64, .byte 1) -- 2.24.1