On Sun, Feb 04, 2024 at 11:18:59PM -0800, Pawan Gupta wrote: > .popsection > + > +/* > + * Defines the VERW operand that is disguised as entry code so that "Define..." > + * it can be referenced with KPTI enabled. This ensures VERW can be "Ensure..." But committer can fix those. > + * used late in exit-to-user path after page tables are switched. > + */ > +.pushsection .entry.text, "ax" > + > +.align L1_CACHE_BYTES, 0xcc > +SYM_CODE_START_NOALIGN(mds_verw_sel) > + UNWIND_HINT_UNDEFINED > + ANNOTATE_NOENDBR > + .word __KERNEL_DS > +.align L1_CACHE_BYTES, 0xcc > +SYM_CODE_END(mds_verw_sel); > +/* For KVM */ > +EXPORT_SYMBOL_GPL(mds_verw_sel); > + > +.popsection > diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h > index fdf723b6f6d0..2b62cdd8dd12 100644 > --- a/arch/x86/include/asm/cpufeatures.h > +++ b/arch/x86/include/asm/cpufeatures.h > @@ -95,7 +95,7 @@ > #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ > #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ > #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */ > -/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */ > +#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */ > #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ > #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ > #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ > diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h > index 262e65539f83..ec85dfe67123 100644 > --- a/arch/x86/include/asm/nospec-branch.h > +++ b/arch/x86/include/asm/nospec-branch.h > @@ -315,6 +315,21 @@ > #endif > .endm > > +/* > + * Macros to execute VERW instruction that mitigate transient data sampling > + * attacks such as MDS. On affected systems a microcode update overloaded VERW > + * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF. > + * > + * Note: Only the memory operand variant of VERW clears the CPU buffers. > + */ > +.macro EXEC_VERW I think I asked this already: Why isn't this called simply "VERW"? There's no better name as this is basically the insn itself... > + verw _ASM_RIP(mds_verw_sel) > +.endm -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette