Patch "x86: Undo return-thunk damage" has been added to the 5.18-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    x86: Undo return-thunk damage

to the 5.18-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     x86-undo-return-thunk-damage.patch
and it can be found in the queue-5.18 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.


>From foo@baz Tue Jul 12 05:03:58 PM CEST 2022
From: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Date: Tue, 14 Jun 2022 23:15:37 +0200
Subject: x86: Undo return-thunk damage

From: Peter Zijlstra <peterz@xxxxxxxxxxxxx>

commit 15e67227c49a57837108acfe1c80570e1bd9f962 upstream.

Introduce X86_FEATURE_RETHUNK for those afflicted with needing this.

  [ bp: Do only INT3 padding - simpler. ]

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Signed-off-by: Borislav Petkov <bp@xxxxxxx>
Reviewed-by: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Signed-off-by: Borislav Petkov <bp@xxxxxxx>
[cascardo: CONFIG_STACK_VALIDATION vs CONFIG_OBJTOOL]
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@xxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
 arch/x86/include/asm/alternative.h       |    1 
 arch/x86/include/asm/cpufeatures.h       |    1 
 arch/x86/include/asm/disabled-features.h |    3 +
 arch/x86/kernel/alternative.c            |   60 +++++++++++++++++++++++++++++++
 arch/x86/kernel/module.c                 |    8 +++-
 arch/x86/kernel/vmlinux.lds.S            |    7 +++
 6 files changed, 78 insertions(+), 2 deletions(-)

--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -76,6 +76,7 @@ extern int alternatives_patched;
 extern void alternative_instructions(void);
 extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
 extern void apply_retpolines(s32 *start, s32 *end);
+extern void apply_returns(s32 *start, s32 *end);
 extern void apply_ibt_endbr(s32 *start, s32 *end);
 
 struct module;
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -299,6 +299,7 @@
 /* FREE!				(11*32+11) */
 #define X86_FEATURE_RETPOLINE		(11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_RETPOLINE_LFENCE	(11*32+13) /* "" Use LFENCE for Spectre variant 2 */
+#define X86_FEATURE_RETHUNK		(11*32+14) /* "" Use REturn THUNK */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI		(12*32+ 4) /* AVX VNNI instructions */
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -60,7 +60,8 @@
 # define DISABLE_RETPOLINE	0
 #else
 # define DISABLE_RETPOLINE	((1 << (X86_FEATURE_RETPOLINE & 31)) | \
-				 (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
+				 (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)) | \
+				 (1 << (X86_FEATURE_RETHUNK & 31)))
 #endif
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -115,6 +115,7 @@ static void __init_or_module add_nops(vo
 }
 
 extern s32 __retpoline_sites[], __retpoline_sites_end[];
+extern s32 __return_sites[], __return_sites_end[];
 extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
 extern s32 __smp_locks[], __smp_locks_end[];
@@ -507,9 +508,67 @@ void __init_or_module noinline apply_ret
 	}
 }
 
+/*
+ * Rewrite the compiler generated return thunk tail-calls.
+ *
+ * For example, convert:
+ *
+ *   JMP __x86_return_thunk
+ *
+ * into:
+ *
+ *   RET
+ */
+static int patch_return(void *addr, struct insn *insn, u8 *bytes)
+{
+	int i = 0;
+
+	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+		return -1;
+
+	bytes[i++] = RET_INSN_OPCODE;
+
+	for (; i < insn->length;)
+		bytes[i++] = INT3_INSN_OPCODE;
+
+	return i;
+}
+
+void __init_or_module noinline apply_returns(s32 *start, s32 *end)
+{
+	s32 *s;
+
+	for (s = start; s < end; s++) {
+		void *addr = (void *)s + *s;
+		struct insn insn;
+		int len, ret;
+		u8 bytes[16];
+		u8 op1;
+
+		ret = insn_decode_kernel(&insn, addr);
+		if (WARN_ON_ONCE(ret < 0))
+			continue;
+
+		op1 = insn.opcode.bytes[0];
+		if (WARN_ON_ONCE(op1 != JMP32_INSN_OPCODE))
+			continue;
+
+		DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
+			addr, addr, insn.length,
+			addr + insn.length + insn.immediate.value);
+
+		len = patch_return(addr, &insn, bytes);
+		if (len == insn.length) {
+			DUMP_BYTES(((u8*)addr),  len, "%px: orig: ", addr);
+			DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
+			text_poke_early(addr, bytes, len);
+		}
+	}
+}
 #else /* !RETPOLINES || !CONFIG_STACK_VALIDATION */
 
 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
+void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
 
 #endif /* CONFIG_RETPOLINE && CONFIG_STACK_VALIDATION */
 
@@ -860,6 +919,7 @@ void __init alternative_instructions(voi
 	 * those can rewrite the retpoline thunks.
 	 */
 	apply_retpolines(__retpoline_sites, __retpoline_sites_end);
+	apply_returns(__return_sites, __return_sites_end);
 
 	/*
 	 * Then patch alternatives, such that those paravirt calls that are in
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -253,7 +253,7 @@ int module_finalize(const Elf_Ehdr *hdr,
 {
 	const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
 		*para = NULL, *orc = NULL, *orc_ip = NULL,
-		*retpolines = NULL, *ibt_endbr = NULL;
+		*retpolines = NULL, *returns = NULL, *ibt_endbr = NULL;
 	char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
 	for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
@@ -271,6 +271,8 @@ int module_finalize(const Elf_Ehdr *hdr,
 			orc_ip = s;
 		if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
 			retpolines = s;
+		if (!strcmp(".return_sites", secstrings + s->sh_name))
+			returns = s;
 		if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name))
 			ibt_endbr = s;
 	}
@@ -287,6 +289,10 @@ int module_finalize(const Elf_Ehdr *hdr,
 		void *rseg = (void *)retpolines->sh_addr;
 		apply_retpolines(rseg, rseg + retpolines->sh_size);
 	}
+	if (returns) {
+		void *rseg = (void *)returns->sh_addr;
+		apply_returns(rseg, rseg + returns->sh_size);
+	}
 	if (alt) {
 		/* patch .altinstructions */
 		void *aseg = (void *)alt->sh_addr;
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -283,6 +283,13 @@ SECTIONS
 		*(.retpoline_sites)
 		__retpoline_sites_end = .;
 	}
+
+	. = ALIGN(8);
+	.return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
+		__return_sites = .;
+		*(.return_sites)
+		__return_sites_end = .;
+	}
 #endif
 
 #ifdef CONFIG_X86_KERNEL_IBT


Patches currently in stable-queue which might be from peterz@xxxxxxxxxxxxx are

queue-5.18/x86-sev-avoid-using-__x86_return_thunk.patch
queue-5.18/kvm-vmx-prevent-rsb-underflow-before-vmenter.patch
queue-5.18/x86-ftrace-use-alternative-ret-encoding.patch
queue-5.18/objtool-re-add-unwind_hint_-save_restore.patch
queue-5.18/x86-bugs-add-retbleed-ibpb.patch
queue-5.18/x86-bugs-enable-stibp-for-jmp2ret.patch
queue-5.18/x86-retpoline-cleanup-some-ifdefery.patch
queue-5.18/kvm-vmx-flatten-__vmx_vcpu_run.patch
queue-5.18/x86-kvm-vmx-make-noinstr-clean.patch
queue-5.18/x86-retbleed-add-fine-grained-kconfig-knobs.patch
queue-5.18/x86-cpu-amd-add-spectral-chicken.patch
queue-5.18/kvm-vmx-fix-ibrs-handling-after-vmexit.patch
queue-5.18/kvm-vmx-prevent-guest-rsb-poisoning-attacks-with-eibrs.patch
queue-5.18/x86-vsyscall_emu-64-don-t-use-ret-in-vsyscall-emulation.patch
queue-5.18/x86-add-magic-amd-return-thunk.patch
queue-5.18/x86-bugs-keep-a-per-cpu-ia32_spec_ctrl-value.patch
queue-5.18/x86-objtool-create-.return_sites.patch
queue-5.18/x86-kvm-fix-setcc-emulation-for-return-thunks.patch
queue-5.18/x86-retpoline-swizzle-retpoline-thunk.patch
queue-5.18/x86-speculation-fix-firmware-entry-spec_ctrl-handling.patch
queue-5.18/x86-speculation-add-spectre_v2-ibrs-option-to-support-kernel-ibrs.patch
queue-5.18/x86-xen-add-untrain_ret.patch
queue-5.18/x86-undo-return-thunk-damage.patch
queue-5.18/x86-entry-avoid-very-early-ret.patch
queue-5.18/x86-entry-move-push_and_clear_regs-back-into-error_entry.patch
queue-5.18/x86-speculation-fill-rsb-on-vmexit-for-ibrs.patch
queue-5.18/objtool-add-entry-unret-validation.patch
queue-5.18/kvm-vmx-convert-launched-argument-to-flags.patch
queue-5.18/x86-bpf-use-alternative-ret-encoding.patch
queue-5.18/x86-common-stamp-out-the-stepping-madness.patch
queue-5.18/x86-bugs-split-spectre_v2_select_mitigation-and-spectre_v2_user_select_mitigation.patch
queue-5.18/x86-bugs-report-intel-retbleed-vulnerability.patch
queue-5.18/x86-cpufeatures-move-retpoline-flags-to-word-11.patch
queue-5.18/x86-speculation-fix-spec_ctrl-write-on-smt-state-change.patch
queue-5.18/x86-retpoline-use-mfunction-return.patch
queue-5.18/x86-xen-rename-sys-entry-points.patch
queue-5.18/x86-bugs-optimize-spec_ctrl-msr-writes.patch
queue-5.18/x86-bugs-report-amd-retbleed-vulnerability.patch
queue-5.18/x86-static_call-use-alternative-ret-encoding.patch
queue-5.18/x86-speculation-fix-rsb-filling-with-config_retpoline-n.patch
queue-5.18/x86-use-return-thunk-in-asm-code.patch
queue-5.18/intel_idle-disable-ibrs-during-long-idle.patch
queue-5.18/x86-entry-remove-skip_r11rcx.patch
queue-5.18/x86-speculation-use-cached-host-spec_ctrl-value-for-guest-entry-exit.patch
queue-5.18/x86-bugs-add-amd-retbleed-boot-parameter.patch
queue-5.18/x86-entry-add-kernel-ibrs-implementation.patch
queue-5.18/objtool-treat-.text.__x86.-as-noinstr.patch
queue-5.18/objtool-update-retpoline-validation.patch



[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux