From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> X86 MCE support is kernel builtin. CONFIG_X86_MCE=y or n. The x86 MCE injection framework can be kernel builtin or kernel module. CONFIG_X86_MCE_INJECT=y, m, or n. We don't want KVM depend on CONFIG_X86_MCE_INJECT=y, allow CONFIG_X86_MCE_INJECT=m for KVM mce injection. Move the necessary symbols with rename from arch/x86/kernel/cpu/mce/inject.c to arch/x86/kernel/cpu/mce/core.c and export symbols for KVM to inject MCE. Oppertunistically add lockdep_assert_held(&mce_inject_mutex) to show that injectm is protected by mce_inject_mutex. Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> --- arch/x86/include/asm/mce.h | 10 +++++++++ arch/x86/kernel/cpu/mce/core.c | 36 ++++++++++++++++++++++++++++++++ arch/x86/kernel/cpu/mce/inject.c | 26 +++-------------------- 3 files changed, 49 insertions(+), 23 deletions(-) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 180b1cbfcc4e..459066ecd922 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -265,6 +265,16 @@ int mce_notify_irq(void); DECLARE_PER_CPU(struct mce, injectm); +#ifdef CONFIG_X86_MCE +void mce_inject_lock(void); +void mce_inject_unlock(void); +void mce_inject(struct mce *m); +#else +static inline void mce_inject_lock(void) {} +static inline void mce_inject_unlock(void) {} +static inline void mce_inject(struct mce *m) {} +#endif + /* Disable CMCI/polling for MCA bank claimed by firmware */ extern void mce_disable_bank(int bank); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 6f35f724cc14..6929c3cad278 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -129,9 +129,45 @@ void mce_setup(struct mce *m) m->ppin = cpu_data(m->extcpu).ppin; m->microcode = boot_cpu_data.microcode; } +EXPORT_SYMBOL_GPL(mce_setup); DEFINE_PER_CPU(struct mce, injectm); EXPORT_PER_CPU_SYMBOL_GPL(injectm); +static DEFINE_MUTEX(mce_inject_mutex); + +void mce_inject_lock(void) +{ + mutex_lock(&mce_inject_mutex); +} +EXPORT_SYMBOL_GPL(mce_inject_lock); + +void mce_inject_unlock(void) +{ + mutex_unlock(&mce_inject_mutex); +} +EXPORT_SYMBOL_GPL(mce_inject_unlock); + +/* Update fake mce registers on current CPU. */ +void mce_inject(struct mce *m) +{ + struct mce *i = &per_cpu(injectm, m->extcpu); + + lockdep_assert_held(&mce_inject_mutex); + + /* Make sure no one reads partially written injectm */ + i->finished = 0; + mb(); + m->finished = 0; + /* First set the fields after finished */ + i->extcpu = m->extcpu; + mb(); + /* Now write record in order, finished last (except above) */ + memcpy(i, m, sizeof(struct mce)); + /* Finally activate it */ + mb(); + i->finished = 1; +} +EXPORT_SYMBOL_GPL(mce_inject); void mce_log(struct mce *m) { diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c index 88603a6c0afe..ae3efbeb78bd 100644 --- a/arch/x86/kernel/cpu/mce/inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -126,25 +126,6 @@ static void setup_inj_struct(struct mce *m) m->microcode = boot_cpu_data.microcode; } -/* Update fake mce registers on current CPU. */ -static void inject_mce(struct mce *m) -{ - struct mce *i = &per_cpu(injectm, m->extcpu); - - /* Make sure no one reads partially written injectm */ - i->finished = 0; - mb(); - m->finished = 0; - /* First set the fields after finished */ - i->extcpu = m->extcpu; - mb(); - /* Now write record in order, finished last (except above) */ - memcpy(i, m, sizeof(struct mce)); - /* Finally activate it */ - mb(); - i->finished = 1; -} - static void raise_poll(struct mce *m) { unsigned long flags; @@ -176,7 +157,6 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs) } static cpumask_var_t mce_inject_cpumask; -static DEFINE_MUTEX(mce_inject_mutex); static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) { @@ -245,7 +225,7 @@ static void __maybe_unused raise_mce(struct mce *m) { int context = MCJ_CTX(m->inject_flags); - inject_mce(m); + mce_inject(m); if (context == MCJ_CTX_RANDOM) return; @@ -303,9 +283,9 @@ static int mce_inject_raise(struct notifier_block *nb, unsigned long val, if (!m) return NOTIFY_DONE; - mutex_lock(&mce_inject_mutex); + mce_inject_lock(); raise_mce(m); - mutex_unlock(&mce_inject_mutex); + mce_inject_unlock(); return NOTIFY_DONE; } -- 2.25.1