The following commit has been merged into the x86/entry branch of tip: Commit-ID: 4b1f63084d3ebd14c3ef2cd4e8732c25bcd8381d Gitweb: https://git.kernel.org/tip/4b1f63084d3ebd14c3ef2cd4e8732c25bcd8381d Author: Thomas Gleixner <tglx@xxxxxxxxxxxxx> AuthorDate: Mon, 01 Jun 2020 21:33:56 +02:00 Committer: Thomas Gleixner <tglx@xxxxxxxxxxxxx> CommitterDate: Mon, 01 Jun 2020 23:31:48 +02:00 x86/xen: Unbreak hypervisor callback on 32bit The IDTENTRY conversion broke XEN on 32bit: ld: arch/x86/xen/setup.o: in function `register_callback': >> arch/x86/xen/setup.c:940: undefined reference to `xen_asm_exc_xen_hypervisor_callback' The reason is that 32bit does not have the extra indirection of 64bit via the XEN trampolines and 32bit never emitted an actual IDT entry function for this. - Add and use IDTENTRY_XENCB so the ASM variant emits an entry point only for 64 bit. - Rename the 32bit ASM function to match the 64bit trampoline function name. Fixup a few comments as well. Fixes: 66a07b44e765 ("x86/entry: Switch XEN/PV hypercall entry to IDTENTRY") Reported-by: kbuild test robot <lkp@xxxxxxxxx> Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Juergen Gross <jgross@xxxxxxxx> --- arch/x86/entry/entry_32.S | 7 +++++-- arch/x86/include/asm/idtentry.h | 24 +++++++++++++++++++++++- arch/x86/xen/xen-asm_32.S | 6 +++--- 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 96fa462..2d29f77 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -1167,8 +1167,11 @@ SYM_CODE_END(native_iret) #ifdef CONFIG_XEN_PV /* * See comment in entry_64.S for further explanation + * + * Note: This is not an actual IDT entry point. It's a XEN specific entry + * point and therefore named to match the 64-bit trampoline counterpart. */ -SYM_FUNC_START(exc_xen_hypervisor_callback) +SYM_FUNC_START(xen_asm_exc_xen_hypervisor_callback) /* * Check to see if we got the event in the critical * region in xen_iret_direct, after we've reenabled @@ -1189,7 +1192,7 @@ SYM_FUNC_START(exc_xen_hypervisor_callback) mov %esp, %eax call xen_pv_evtchn_do_upcall jmp handle_exception_return -SYM_FUNC_END(exc_xen_hypervisor_callback) +SYM_FUNC_END(xen_asm_exc_xen_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index f8e2737..d203c54 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -283,6 +283,22 @@ __visible noinstr void func(struct pt_regs *regs) \ \ static __always_inline void __##func(struct pt_regs *regs) +/** + * DECLARE_IDTENTRY_XENCB - Declare functions for XEN HV callback entry point + * @vector: Vector number (ignored for C) + * @func: Function name of the entry point + * + * Declares three functions: + * - The ASM entry point: asm_##func + * - The XEN PV trap entry point: xen_##func (maybe unused) + * - The C handler called from the ASM entry point + * + * Maps to DECLARE_IDTENTRY(). Distinct entry point to handle the 32/64-bit + * difference + */ +#define DECLARE_IDTENTRY_XENCB(vector, func) \ + DECLARE_IDTENTRY(vector, func) + #ifdef CONFIG_X86_64 /** * DECLARE_IDTENTRY_IST - Declare functions for IST handling IDT entry points @@ -432,6 +448,9 @@ __visible noinstr void func(struct pt_regs *regs, \ # define DECLARE_IDTENTRY_DF(vector, func) \ idtentry_df vector asm_##func func +# define DECLARE_IDTENTRY_XENCB(vector, func) \ + DECLARE_IDTENTRY(vector, func) + #else # define DECLARE_IDTENTRY_MCE(vector, func) \ DECLARE_IDTENTRY(vector, func) @@ -442,6 +461,9 @@ __visible noinstr void func(struct pt_regs *regs, \ /* No ASM emitted for DF as this goes through a C shim */ # define DECLARE_IDTENTRY_DF(vector, func) +/* No ASM emitted for XEN hypervisor callback */ +# define DECLARE_IDTENTRY_XENCB(vector, func) + #endif /* No ASM code emitted for NMI */ @@ -558,7 +580,7 @@ DECLARE_IDTENTRY_XEN(X86_TRAP_DB, debug); DECLARE_IDTENTRY_DF(X86_TRAP_DF, exc_double_fault); #ifdef CONFIG_XEN_PV -DECLARE_IDTENTRY(X86_TRAP_OTHER, exc_xen_hypervisor_callback); +DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback); #endif /* Device interrupts common/spurious */ diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index d0ff2dc..4757cec 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -113,7 +113,7 @@ iret_restore_end: * Events are masked, so jumping out of the critical region is * OK. */ - je asm_exc_xen_hypervisor_callback + je xen_asm_exc_xen_hypervisor_callback 1: iret xen_iret_end_crit: @@ -127,7 +127,7 @@ SYM_CODE_END(xen_iret) .globl xen_iret_start_crit, xen_iret_end_crit /* - * This is called by exc_xen_hypervisor_callback in entry_32.S when it sees + * This is called by xen_asm_exc_xen_hypervisor_callback in entry_32.S when it sees * that the EIP at the time of interrupt was between * xen_iret_start_crit and xen_iret_end_crit. * @@ -144,7 +144,7 @@ SYM_CODE_END(xen_iret) * eflags } * cs } nested exception info * eip } - * return address : (into asm_exc_xen_hypervisor_callback) + * return address : (into xen_asm_exc_xen_hypervisor_callback) * * In order to deliver the nested exception properly, we need to discard the * nested exception frame such that when we handle the exception, we do it