Use KVM VMX's reboot/crash callback to do VMXOFF in an emergency instead of manually and blindly doing VMXOFF. There's no need to attempt VMXOFF if KVM (or some other out-of-tree hypervisor) isn't loaded/active, i.e. if the CPU can't possibly be post-VMXON. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/include/asm/virtext.h | 10 ---------- arch/x86/kernel/reboot.c | 30 +++++++++--------------------- arch/x86/kvm/vmx/vmx.c | 8 +++++--- 3 files changed, 14 insertions(+), 34 deletions(-) diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 0acb14806a74..4f61283b1f52 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -70,16 +70,6 @@ static inline void __cpu_emergency_vmxoff(void) cpu_vmxoff(); } -/** Disable VMX if it is supported and enabled on the current CPU - */ -static inline void cpu_emergency_vmxoff(void) -{ - if (cpu_has_vmx()) - __cpu_emergency_vmxoff(); -} - - - /* * SVM functions: diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 40ea16ecb3b8..85ea1fdebf4c 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -788,13 +788,7 @@ void machine_crash_shutdown(struct pt_regs *regs) #endif #if IS_ENABLED(CONFIG_KVM_INTEL) -/* - * This is used to VMCLEAR all VMCSs loaded on the - * processor. And when loading kvm_intel module, the - * callback function pointer will be assigned. - * - * protected by rcu. - */ +/* RCU-protected callback to disable virtualization prior to reboot. */ static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback; void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback) @@ -815,17 +809,6 @@ void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) synchronize_rcu(); } EXPORT_SYMBOL_GPL(cpu_emergency_unregister_virt_callback); - -static inline void cpu_crash_vmclear_loaded_vmcss(void) -{ - cpu_emergency_virt_cb *callback; - - rcu_read_lock(); - callback = rcu_dereference(cpu_emergency_virt_callback); - if (callback) - callback(); - rcu_read_unlock(); -} #endif /* This is the CPU performing the emergency shutdown work. */ @@ -841,10 +824,15 @@ void cpu_emergency_disable_virtualization(void) lockdep_assert_irqs_disabled(); #if IS_ENABLED(CONFIG_KVM_INTEL) - cpu_crash_vmclear_loaded_vmcss(); -#endif + cpu_emergency_virt_cb *callback; - cpu_emergency_vmxoff(); + rcu_read_lock(); + callback = rcu_dereference(cpu_emergency_virt_callback); + if (callback) + callback(); + rcu_read_unlock(); +#endif + /* KVM_AMD doesn't yet utilize the common callback. */ cpu_emergency_svm_disable(); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 6e0cc4d03884..6f3ade75a670 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -743,7 +743,7 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, return ret; } -static void crash_vmclear_local_loaded_vmcss(void) +static void vmx_emergency_disable(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; @@ -751,6 +751,8 @@ static void crash_vmclear_local_loaded_vmcss(void) list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); + + __cpu_emergency_vmxoff(); } static void __loaded_vmcs_clear(void *arg) @@ -8526,7 +8528,7 @@ static void __vmx_exit(void) { allow_smaller_maxphyaddr = false; - cpu_emergency_unregister_virt_callback(crash_vmclear_local_loaded_vmcss); + cpu_emergency_unregister_virt_callback(vmx_emergency_disable); vmx_cleanup_l1d_flush(); } @@ -8576,7 +8578,7 @@ static int __init vmx_init(void) pi_init_cpu(cpu); } - cpu_emergency_register_virt_callback(crash_vmclear_local_loaded_vmcss); + cpu_emergency_register_virt_callback(vmx_emergency_disable); vmx_check_vmcs12_offsets(); -- 2.39.0.rc0.267.gcb52ba06e7-goog