linux-next: manual merge of the tip tree with Linus' tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi all,

Today's linux-next merge of the tip tree got conflicts in:

  arch/x86/include/asm/processor.h
  arch/x86/kernel/vmlinux.lds.S
  arch/x86/lib/retpoline.S

between commits:

  fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
  3bbbe97ad83d ("x86/srso: Add a forgotten NOENDBR annotation")

from Linus' tree and commits:

  566ffa3ae964 ("x86/cpu: Fix amd_check_microcode() declaration")
  973ab2d61f33 ("x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG")
  029239c5b0e6 ("x86/retpoline,kprobes: Skip optprobe check for indirect jumps with retpolines and IBT")

from the tip tree.

I fixed it up (I think - see below) and can carry the fix as
necessary. This is now fixed as far as linux-next is concerned, but any
non trivial conflicts should be mentioned to your upstream maintainer
when your tree is submitted for merging.  You may also want to consider
cooperating with the maintainer of the conflicting tree to minimise any
particularly complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc arch/x86/include/asm/processor.h
index 7c67db7c9f53,36d52075fdad..000000000000
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@@ -682,11 -682,11 +682,13 @@@ extern u16 get_llc_id(unsigned int cpu)
  #ifdef CONFIG_CPU_SUP_AMD
  extern u32 amd_get_nodes_per_socket(void);
  extern u32 amd_get_highest_perf(void);
 +extern bool cpu_has_ibpb_brtype_microcode(void);
+ extern void amd_check_microcode(void);
  #else
  static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
  static inline u32 amd_get_highest_perf(void)		{ return 0; }
 +static inline bool cpu_has_ibpb_brtype_microcode(void)	{ return false; }
+ static inline void amd_check_microcode(void)		{ }
  #endif
  
  extern unsigned long arch_align_stack(unsigned long sp);
diff --cc arch/x86/kernel/vmlinux.lds.S
index e76813230192,dd5b0a68cf84..000000000000
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@@ -133,28 -133,12 +133,26 @@@ SECTION
  		KPROBES_TEXT
  		SOFTIRQENTRY_TEXT
  #ifdef CONFIG_RETPOLINE
- 		__indirect_thunk_start = .;
- 		*(.text.__x86.indirect_thunk)
- 		*(.text.__x86.return_thunk)
- 		__indirect_thunk_end = .;
 -		*(.text..__x86.*)
++		*(.text..__x86.indirect_thunk)
++		*(.text..__x86.return_thunk)
  #endif
  		STATIC_CALL_TEXT
  
  		ALIGN_ENTRY_TEXT_BEGIN
 +#ifdef CONFIG_CPU_SRSO
- 		*(.text.__x86.rethunk_untrain)
++		*(.text..__x86.rethunk_untrain)
 +#endif
 +
  		ENTRY_TEXT
 +
 +#ifdef CONFIG_CPU_SRSO
 +		/*
 +		 * See the comment above srso_untrain_ret_alias()'s
 +		 * definition.
 +		 */
 +		. = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
- 		*(.text.__x86.rethunk_safe)
++		*(.text..__x86.rethunk_safe)
 +#endif
  		ALIGN_ENTRY_TEXT_END
  		*(.gnu.warning)
  
diff --cc arch/x86/lib/retpoline.S
index 2cff585f22f2,3bea96341d00..000000000000
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@@ -11,9 -11,8 +11,9 @@@
  #include <asm/unwind_hints.h>
  #include <asm/percpu.h>
  #include <asm/frame.h>
 +#include <asm/nops.h>
  
- 	.section .text.__x86.indirect_thunk
+ 	.section .text..__x86.indirect_thunk
  
  
  .macro POLINE reg
@@@ -132,47 -131,7 +132,47 @@@ SYM_CODE_END(__x86_indirect_jump_thunk_
   */
  #ifdef CONFIG_RETHUNK
  
 +/*
 + * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
 + * special addresses:
 + *
 + * - srso_untrain_ret_alias() is 2M aligned
 + * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
 + * and 20 in its virtual address are set (while those bits in the
 + * srso_untrain_ret_alias() function are cleared).
 + *
 + * This guarantees that those two addresses will alias in the branch
 + * target buffer of Zen3/4 generations, leading to any potential
 + * poisoned entries at that BTB slot to get evicted.
 + *
 + * As a result, srso_safe_ret_alias() becomes a safe return.
 + */
 +#ifdef CONFIG_CPU_SRSO
- 	.section .text.__x86.rethunk_untrain
++	.section .text..__x86.rethunk_untrain
 +
 +SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
 +	ANNOTATE_NOENDBR
 +	ASM_NOP2
 +	lfence
 +	jmp __x86_return_thunk
 +SYM_FUNC_END(srso_untrain_ret_alias)
 +__EXPORT_THUNK(srso_untrain_ret_alias)
 +
- 	.section .text.__x86.rethunk_safe
++	.section .text..__x86.rethunk_safe
 +#endif
 +
 +/* Needs a definition for the __x86_return_thunk alternative below. */
 +SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
 +#ifdef CONFIG_CPU_SRSO
 +	add $8, %_ASM_SP
 +	UNWIND_HINT_FUNC
 +#endif
 +	ANNOTATE_UNRET_SAFE
 +	ret
 +	int3
 +SYM_FUNC_END(srso_safe_ret_alias)
 +
- 	.section .text.__x86.return_thunk
+ 	.section .text..__x86.return_thunk
  
  /*
   * Safety details here pertain to the AMD Zen{1,2} microarchitecture:

Attachment: pgpC1EFqOykJs.pgp
Description: OpenPGP digital signature


[Index of Archives]     [Linux Kernel]     [Linux USB Development]     [Yosemite News]     [Linux SCSI]

  Powered by Linux