The following commit has been merged into the x86/alternatives branch of tip: Commit-ID: d2408e043e7296017420aa5929b3bba4d5e61013 Gitweb: https://git.kernel.org/tip/d2408e043e7296017420aa5929b3bba4d5e61013 Author: Borislav Petkov (AMD) <bp@xxxxxxxxx> AuthorDate: Fri, 12 May 2023 14:05:11 +02:00 Committer: Borislav Petkov (AMD) <bp@xxxxxxxxx> CommitterDate: Fri, 12 May 2023 17:53:18 +02:00 x86/alternative: Optimize returns patching Instead of decoding each instruction in the return sites range only to realize that that return site is a jump to the default return thunk which is needed - X86_FEATURE_RETHUNK is enabled - lift that check before the loop and get rid of that loop overhead. Add comments about what gets patched, while at it. Signed-off-by: Borislav Petkov (AMD) <bp@xxxxxxxxx> Acked-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Link: https://lore.kernel.org/r/20230512120952.7924-1-bp@xxxxxxxxx --- arch/x86/kernel/alternative.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index b78d55f..3bb0a5f 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -693,13 +693,12 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes) { int i = 0; + /* Patch the custom return thunks... */ if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { - if (x86_return_thunk == __x86_return_thunk) - return -1; - i = JMP32_INSN_SIZE; __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); } else { + /* ... or patch them out if not needed. */ bytes[i++] = RET_INSN_OPCODE; } @@ -712,6 +711,14 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) { s32 *s; + /* + * Do not patch out the default return thunks if those needed are the + * ones generated by the compiler. + */ + if (cpu_feature_enabled(X86_FEATURE_RETHUNK) && + (x86_return_thunk == __x86_return_thunk)) + return; + for (s = start; s < end; s++) { void *dest = NULL, *addr = (void *)s + *s; struct insn insn;