Add a function to set per-cpu ibs_flags based on an active or inactive PreventHostIBS window. MSR_AMD64_IBSFETCHCTL[IbsFetchEn] and MSR_AMD64_IBSOPCTL[IbsOpEn] bits need to be cleared for PreventHostIBS feature to be enabled before VMRUN is executed. ENABLE bit and VALID bit for MSR_AMD64_IBSFETCHCTL are contained in the same MSR and same is the case with MSR_AMD64_IBSOPCTL. Consider the following scenario: - The IBS MSR which has ENABLE bit set and VALID bit clear is read. - During the process of clearing the ENABLE bit and writing the IBS MSR to disable IBS, an IBS event can occur that sets the VALID bit. - The write operation on IBS MSR can clear the newly set VALID bit. - Since this situation is occurring in the CLGI/STGI window (PreventHostIBS window), the actual NMI is not taken. - Once VMRUN is issued, it will exit with VMEXIT_NMI. As soon as STGI is executed, the pending NMI will trigger. - The IBS NMI handler checks for the VALID bit to determine if the NMI is generated because of IBS. - Since VALID bit is now clear, it doesn't recognize that an IBS event is occurred. Due to this reason, the dazed and confused unknown NMI messages are generated. amd_prevent_hostibs_window() is added to avoid these messages when PreventHostIBS window is active and PreventHostIBS feature is enabled for the guest. Signed-off-by: Manali Shukla <manali.shukla@xxxxxxx> --- arch/x86/events/amd/ibs.c | 64 +++++++++++++++++++++++++++++++ arch/x86/include/asm/perf_event.h | 20 ++++++++++ 2 files changed, 84 insertions(+) diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index da3f5ebac4e1..e96a4c9ff4ba 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -30,7 +30,9 @@ static u32 ibs_caps; #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT) #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT +#define PREVENT_HOSTIBS_WINDOW BIT(0) +static DEFINE_PER_CPU(unsigned int, ibs_flags); /* * IBS states: @@ -1035,6 +1037,18 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) if (test_and_clear_bit(IBS_STOPPED, pcpu->state)) return 1; + /* + * Catch NMIs generated in an active PreventHostIBS window: + * incoming NMIs from an active PreventHostIBS window might have + * the VALID bit cleared when it is supposed to be set due to + * a race. The reason for the race is ENABLE and VALID bits for + * MSR_AMD64_IBSFETCHCTL and MSR_AMD64_IBSOPCTL being in their + * same respective MSRs. Ignore all such NMIs and treat them as + * handled. + */ + if (__this_cpu_read(ibs_flags) & PREVENT_HOSTIBS_WINDOW) + return 1; + return 0; } @@ -1540,3 +1554,53 @@ static __init int amd_ibs_init(void) /* Since we need the pci subsystem to init ibs we can't do this earlier: */ device_initcall(amd_ibs_init); + +void amd_prevent_hostibs_window(bool active) +{ + if (active) + __this_cpu_write(ibs_flags, + __this_cpu_read(ibs_flags) | + PREVENT_HOSTIBS_WINDOW); + else + __this_cpu_write(ibs_flags, + __this_cpu_read(ibs_flags) & + ~PREVENT_HOSTIBS_WINDOW); +} +EXPORT_SYMBOL_GPL(amd_prevent_hostibs_window); + +bool amd_disable_ibs_fetch(u64 *ibs_fetch_ctl) +{ + *ibs_fetch_ctl = __rdmsr(MSR_AMD64_IBSFETCHCTL); + if (!(*ibs_fetch_ctl & IBS_FETCH_ENABLE)) + return false; + + native_wrmsrl(MSR_AMD64_IBSFETCHCTL, + *ibs_fetch_ctl & ~IBS_FETCH_ENABLE); + + return true; +} +EXPORT_SYMBOL(amd_disable_ibs_fetch); + +bool amd_disable_ibs_op(u64 *ibs_op_ctl) +{ + *ibs_op_ctl = __rdmsr(MSR_AMD64_IBSOPCTL); + if (!(*ibs_op_ctl & IBS_OP_ENABLE)) + return false; + + native_wrmsrl(MSR_AMD64_IBSOPCTL, *ibs_op_ctl & ~IBS_OP_ENABLE); + + return true; +} +EXPORT_SYMBOL(amd_disable_ibs_op); + +void amd_restore_ibs_fetch(u64 ibs_fetch_ctl) +{ + native_wrmsrl(MSR_AMD64_IBSFETCHCTL, ibs_fetch_ctl); +} +EXPORT_SYMBOL(amd_restore_ibs_fetch); + +void amd_restore_ibs_op(u64 ibs_op_ctl) +{ + native_wrmsrl(MSR_AMD64_IBSOPCTL, ibs_op_ctl); +} +EXPORT_SYMBOL(amd_restore_ibs_op); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 5d0f6891ae61..1005505e23b1 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -561,6 +561,26 @@ static inline void intel_pt_handle_vmx(int on) } #endif +#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_CPU_SUP_AMD) +extern void amd_prevent_hostibs_window(bool active); +extern bool amd_disable_ibs_fetch(u64 *ibs_fetch_ctl); +extern bool amd_disable_ibs_op(u64 *ibs_op_ctl); +extern void amd_restore_ibs_fetch(u64 ibs_fetch_ctl); +extern void amd_restore_ibs_op(u64 ibs_op_ctl); +#else +static inline void amd_prevent_hostibs_window(bool active) {} +static inline bool amd_disable_ibs_fetch(u64 *ibs_fetch_ctl) +{ + return false; +} +static inline bool amd_disable_ibs_op(u64 *ibs_op_ctl) +{ + return false; +} +static inline void amd_restore_ibs_fetch(u64 ibs_fetch_ctl) {} +static inline void amd_restore_ibs_op(u64 ibs_op_ctl) {} +#endif + #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) extern void amd_pmu_enable_virt(void); extern void amd_pmu_disable_virt(void); -- 2.34.1