Use the virt callback to disable SVM (and set GIF=1) during an emergency instead of blindly attempting to disable SVM. Like the VMX case, if KVM (or an out-of-tree hypervisor) isn't loaded/active, SVM can't be in use. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/include/asm/reboot.h | 2 +- arch/x86/include/asm/virtext.h | 8 -------- arch/x86/kernel/reboot.c | 6 ++---- arch/x86/kvm/svm/svm.c | 19 +++++++++++++++++-- 4 files changed, 20 insertions(+), 15 deletions(-) diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 1d098a7d329a..dc2b77e6704b 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -25,7 +25,7 @@ void __noreturn machine_real_restart(unsigned int type); #define MRR_BIOS 0 #define MRR_APM 1 -#if IS_ENABLED(CONFIG_KVM_INTEL) +#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD) typedef void (cpu_emergency_virt_cb)(void); void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback); void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback); diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 5bc29fab15da..aaed66249ccf 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -133,12 +133,4 @@ static inline void cpu_svm_disable(void) } } -/** Makes sure SVM is disabled, if it is supported on the CPU - */ -static inline void cpu_emergency_svm_disable(void) -{ - if (cpu_has_svm(NULL)) - cpu_svm_disable(); -} - #endif /* _ASM_X86_VIRTEX_H */ diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 5fb1fbf14c82..db535542b7ab 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -787,7 +787,7 @@ void machine_crash_shutdown(struct pt_regs *regs) } #endif -#if IS_ENABLED(CONFIG_KVM_INTEL) +#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD) /* RCU-protected callback to disable virtualization prior to reboot. */ static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback; @@ -821,7 +821,7 @@ int crashing_cpu = -1; */ void cpu_emergency_disable_virtualization(void) { -#if IS_ENABLED(CONFIG_KVM_INTEL) +#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD) cpu_emergency_virt_cb *callback; rcu_read_lock(); @@ -830,8 +830,6 @@ void cpu_emergency_disable_virtualization(void) callback(); rcu_read_unlock(); #endif - /* KVM_AMD doesn't yet utilize the common callback. */ - cpu_emergency_svm_disable(); } #if defined(CONFIG_SMP) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index b43775490074..541dd978a94b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -38,6 +38,7 @@ #include <asm/spec-ctrl.h> #include <asm/cpu_device_id.h> #include <asm/traps.h> +#include <asm/reboot.h> #include <asm/fpu/api.h> #include <asm/virtext.h> @@ -565,6 +566,11 @@ void __svm_write_tsc_multiplier(u64 multiplier) preempt_enable(); } +static void svm_emergency_disable(void) +{ + cpu_svm_disable(); +} + static void svm_hardware_disable(void) { /* Make sure we clean up behind us */ @@ -5092,6 +5098,13 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = { .pmu_ops = &amd_pmu_ops, }; +static void __svm_exit(void) +{ + kvm_x86_vendor_exit(); + + cpu_emergency_unregister_virt_callback(svm_emergency_disable); +} + static int __init svm_init(void) { int r; @@ -5105,6 +5118,8 @@ static int __init svm_init(void) if (r) return r; + cpu_emergency_register_virt_callback(svm_emergency_disable); + /* * Common KVM initialization _must_ come last, after this, /dev/kvm is * exposed to userspace! @@ -5117,14 +5132,14 @@ static int __init svm_init(void) return 0; err_kvm_init: - kvm_x86_vendor_exit(); + __svm_exit(); return r; } static void __exit svm_exit(void) { kvm_exit(); - kvm_x86_vendor_exit(); + __svm_exit(); } module_init(svm_init) -- 2.40.0.rc1.284.g88254d51c5-goog