From: Ard Biesheuvel <ardb@xxxxxxxxxx> Fix up a couple of occurrences in the x86_64 entry code where we take the absolute address of a symbol while we could use RIP-relative addressing just the same. This avoids relocation fixups at boot for these quantities. Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> --- arch/x86/entry/calling.h | 9 +++++---- arch/x86/entry/entry_64.S | 12 +++++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index ea81770629ee..099da5aaf929 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -375,8 +375,8 @@ For 32-bit we have the following conventions - kernel is built with .endm .macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req + GET_PERCPU_BASE \scratch_reg \save_reg rdgsbase \save_reg - GET_PERCPU_BASE \scratch_reg wrgsbase \scratch_reg .endm @@ -412,15 +412,16 @@ For 32-bit we have the following conventions - kernel is built with * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives * while running KVM's run loop. */ -.macro GET_PERCPU_BASE reg:req +.macro GET_PERCPU_BASE reg:req scratch:req LOAD_CPU_AND_NODE_SEG_LIMIT \reg andq $VDSO_CPUNODE_MASK, \reg - movq __per_cpu_offset(, \reg, 8), \reg + leaq __per_cpu_offset(%rip), \scratch + movq (\scratch, \reg, 8), \reg .endm #else -.macro GET_PERCPU_BASE reg:req +.macro GET_PERCPU_BASE reg:req scratch:req movq pcpu_unit_offsets(%rip), \reg .endm diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 1b5be07f8669..6509e12b6329 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1038,7 +1038,8 @@ SYM_CODE_START(error_entry) movl %ecx, %eax /* zero extend */ cmpq %rax, RIP+8(%rsp) je .Lbstep_iret - cmpq $.Lgs_change, RIP+8(%rsp) + leaq .Lgs_change(%rip), %rcx + cmpq %rcx, RIP+8(%rsp) jne .Lerror_entry_done_lfence /* @@ -1250,10 +1251,10 @@ SYM_CODE_START(asm_exc_nmi) * the outer NMI. */ - movq $repeat_nmi, %rdx + leaq repeat_nmi(%rip), %rdx cmpq 8(%rsp), %rdx ja 1f - movq $end_repeat_nmi, %rdx + leaq end_repeat_nmi(%rip), %rdx cmpq 8(%rsp), %rdx ja nested_nmi_out 1: @@ -1307,7 +1308,8 @@ nested_nmi: pushq %rdx pushfq pushq $__KERNEL_CS - pushq $repeat_nmi + leaq repeat_nmi(%rip), %rdx + pushq %rdx /* Put stack back */ addq $(6*8), %rsp @@ -1346,7 +1348,7 @@ first_nmi: addq $8, (%rsp) /* Fix up RSP */ pushfq /* RFLAGS */ pushq $__KERNEL_CS /* CS */ - pushq $1f /* RIP */ + pushq 1f@GOTPCREL(%rip) /* RIP */ iretq /* continues at repeat_nmi below */ UNWIND_HINT_IRET_REGS 1: -- 2.46.0.792.g87dc391469-goog