This is a note to let you know that I've just added the patch titled x86/static_call: Add support for Jcc tail-calls to the 5.15-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: x86-static_call-add-support-for-jcc-tail-calls.patch and it can be found in the queue-5.15 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From stable+bounces-27580-greg=kroah.com@xxxxxxxxxxxxxxx Wed Mar 13 11:43:43 2024 From: Thadeu Lima de Souza Cascardo <cascardo@xxxxxxxxxx> Date: Wed, 13 Mar 2024 07:42:55 -0300 Subject: x86/static_call: Add support for Jcc tail-calls To: stable@xxxxxxxxxxxxxxx Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>, Thomas Gleixner <tglx@xxxxxxxxxxxxx>, Ingo Molnar <mingo@xxxxxxxxxx>, kernel-dev@xxxxxxxxxx Message-ID: <20240313104255.1083365-6-cascardo@xxxxxxxxxx> From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> commit 923510c88d2b7d947c4217835fd9ca6bd65cc56c upstream. Clang likes to create conditional tail calls like: 0000000000000350 <amd_pmu_add_event>: 350: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) 351: R_X86_64_NONE __fentry__-0x4 355: 48 83 bf 20 01 00 00 00 cmpq $0x0,0x120(%rdi) 35d: 0f 85 00 00 00 00 jne 363 <amd_pmu_add_event+0x13> 35f: R_X86_64_PLT32 __SCT__amd_pmu_branch_add-0x4 363: e9 00 00 00 00 jmp 368 <amd_pmu_add_event+0x18> 364: R_X86_64_PLT32 __x86_return_thunk-0x4 Where 0x35d is a static call site that's turned into a conditional tail-call using the Jcc class of instructions. Teach the in-line static call text patching about this. Notably, since there is no conditional-ret, in that case patch the Jcc to point at an empty stub function that does the ret -- or the return thunk when needed. Reported-by: "Erhard F." <erhard_f@xxxxxxxxxxx> Change-Id: I99c8fc3f721e5d1c74f06710b38d4bac5230303a Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Reviewed-by: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx> Link: https://lore.kernel.org/r/Y9Kdg9QjHkr9G5b5@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx [cascardo: __static_call_validate didn't have the bool tramp argument] Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/kernel/static_call.c | 50 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 3 deletions(-) --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -9,6 +9,7 @@ enum insn_type { NOP = 1, /* site cond-call */ JMP = 2, /* tramp / site tail-call */ RET = 3, /* tramp / site cond-tail-call */ + JCC = 4, }; /* @@ -25,12 +26,40 @@ static const u8 xor5rax[] = { 0x2e, 0x2e static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc }; +static u8 __is_Jcc(u8 *insn) /* Jcc.d32 */ +{ + u8 ret = 0; + + if (insn[0] == 0x0f) { + u8 tmp = insn[1]; + if ((tmp & 0xf0) == 0x80) + ret = tmp; + } + + return ret; +} + +extern void __static_call_return(void); + +asm (".global __static_call_return\n\t" + ".type __static_call_return, @function\n\t" + ASM_FUNC_ALIGN "\n\t" + "__static_call_return:\n\t" + ANNOTATE_NOENDBR + ANNOTATE_RETPOLINE_SAFE + "ret; int3\n\t" + ".size __static_call_return, . - __static_call_return \n\t"); + static void __ref __static_call_transform(void *insn, enum insn_type type, void *func, bool modinit) { const void *emulate = NULL; int size = CALL_INSN_SIZE; const void *code; + u8 op, buf[6]; + + if ((type == JMP || type == RET) && (op = __is_Jcc(insn))) + type = JCC; switch (type) { case CALL: @@ -56,6 +85,20 @@ static void __ref __static_call_transfor else code = &retinsn; break; + + case JCC: + if (!func) { + func = __static_call_return; + if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) + func = x86_return_thunk; + } + + buf[0] = 0x0f; + __text_gen_insn(buf+1, op, insn+1, func, 5); + code = buf; + size = 6; + + break; } if (memcmp(insn, code, size) == 0) @@ -67,13 +110,14 @@ static void __ref __static_call_transfor text_poke_bp(insn, code, size, emulate); } -static void __static_call_validate(void *insn, bool tail) +static void __static_call_validate(u8 *insn, bool tail) { - u8 opcode = *(u8 *)insn; + u8 opcode = insn[0]; if (tail) { if (opcode == JMP32_INSN_OPCODE || - opcode == RET_INSN_OPCODE) + opcode == RET_INSN_OPCODE || + __is_Jcc(insn)) return; } else { if (opcode == CALL_INSN_OPCODE || Patches currently in stable-queue which might be from kroah.com@xxxxxxxxxxxxxxx are queue-5.15/x86-rfds-mitigate-register-file-data-sampling-rfds.patch queue-5.15/x86-entry_32-add-verw-just-before-userspace-transition.patch queue-5.15/x86-bugs-add-asm-helpers-for-executing-verw.patch queue-5.15/x86-asm-differentiate-between-code-and-function-alignment.patch queue-5.15/x86-alternatives-teach-text_poke_bp-to-patch-jcc.d32-instructions.patch queue-5.15/kvm-x86-export-rfds_no-and-rfds_clear-to-guests.patch queue-5.15/x86-asm-add-_asm_rip-macro-for-x86-64-rip-suffix.patch queue-5.15/x86-entry_64-add-verw-just-before-userspace-transition.patch queue-5.15/x86-alternatives-introduce-int3_emulate_jcc.patch queue-5.15/x86-mmio-disable-kvm-mitigation-when-x86_feature_clear_cpu_buf-is-set.patch queue-5.15/x86-bugs-use-alternative-instead-of-mds_user_clear-static-key.patch queue-5.15/documentation-hw-vuln-add-documentation-for-rfds.patch queue-5.15/kvm-vmx-use-bt-jnc-i.e.-eflags.cf-to-select-vmresume-vs.-vmlaunch.patch queue-5.15/kvm-vmx-move-verw-closer-to-vmentry-for-mds-mitigation.patch queue-5.15/arch-introduce-config_function_alignment.patch queue-5.15/x86-static_call-add-support-for-jcc-tail-calls.patch