sld_update_msr() and sld_state will be used in KVM in future patch to add virtualization support of split lock detection. Signed-off-by: Xiaoyao Li <xiaoyao.li@xxxxxxxxx> --- arch/x86/include/asm/cpu.h | 12 ++++++++++++ arch/x86/kernel/cpu/intel.c | 13 +++++-------- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index dd17c2da1af5..6c6528b3153e 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -40,12 +40,23 @@ int mwait_usable(const struct cpuinfo_x86 *); unsigned int x86_family(unsigned int sig); unsigned int x86_model(unsigned int sig); unsigned int x86_stepping(unsigned int sig); +enum split_lock_detect_state { + sld_off = 0, + sld_warn, + sld_fatal, +}; + #ifdef CONFIG_CPU_SUP_INTEL +extern enum split_lock_detect_state sld_state __ro_after_init; + extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c); extern void switch_to_sld(unsigned long tifn); extern bool handle_user_split_lock(struct pt_regs *regs, long error_code); extern bool handle_guest_split_lock(unsigned long ip); +extern void sld_update_msr(bool on); #else +#define sld_state sld_off + static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {} static inline void switch_to_sld(unsigned long tifn) {} static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code) @@ -57,5 +68,6 @@ static inline bool handle_guest_split_lock(unsigned long ip) { return false; } +static inline void sld_update_msr(bool on) {} #endif #endif /* _ASM_X86_CPU_H */ diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index bf08d4508ecb..80d1c0c93c08 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -34,18 +34,14 @@ #include <asm/apic.h> #endif -enum split_lock_detect_state { - sld_off = 0, - sld_warn, - sld_fatal, -}; - /* * Default to sld_off because most systems do not support split lock detection * split_lock_setup() will switch this to sld_warn on systems that support * split lock detect, unless there is a command line override. */ -static enum split_lock_detect_state sld_state __ro_after_init = sld_off; +enum split_lock_detect_state sld_state __ro_after_init = sld_off; +EXPORT_SYMBOL_GPL(sld_state); + static u64 msr_test_ctrl_cache __ro_after_init; /* @@ -1052,7 +1048,7 @@ static void __init split_lock_setup(void) * is not implemented as one thread could undo the setting of the other * thread immediately after dropping the lock anyway. */ -static void sld_update_msr(bool on) +void sld_update_msr(bool on) { u64 test_ctrl_val = msr_test_ctrl_cache; @@ -1061,6 +1057,7 @@ static void sld_update_msr(bool on) wrmsrl(MSR_TEST_CTRL, test_ctrl_val); } +EXPORT_SYMBOL_GPL(sld_update_msr); static void split_lock_init(void) { -- 2.20.1