From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> KVM/X86 can change those callbacks without worrying about breaking other archs. Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx> Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> --- arch/x86/kvm/x86.c | 125 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 120 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0b112cd7de58..71e90d0f0da9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11841,6 +11841,124 @@ void kvm_arch_hardware_disable(void) drop_user_return_notifiers(); } +static cpumask_t cpus_hardware_enabled = CPU_MASK_NONE; + +int kvm_arch_post_init_vm(struct kvm *kvm) +{ + return kvm_mmu_post_init_vm(kvm); +} + +static int __hardware_enable(void) +{ + int cpu = raw_smp_processor_id(); + int r; + + WARN_ON_ONCE(preemptible()); + + if (cpumask_test_cpu(cpu, &cpus_hardware_enabled)) + return 0; + r = kvm_arch_hardware_enable(); + if (r) + pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); + else + cpumask_set_cpu(cpu, &cpus_hardware_enabled); + return r; +} + +static void hardware_enable(void *arg) +{ + atomic_t *failed = arg; + + if (__hardware_enable()) + atomic_inc(failed); +} + +static void hardware_disable(void *junk) +{ + int cpu = raw_smp_processor_id(); + + WARN_ON_ONCE(preemptible()); + + if (!cpumask_test_cpu(cpu, &cpus_hardware_enabled)) + return; + cpumask_clear_cpu(cpu, &cpus_hardware_enabled); + kvm_arch_hardware_disable(); +} + +void kvm_arch_pre_hardware_unsetup(void) +{ + on_each_cpu(hardware_disable, NULL, 1); +} + +/* + * Called after the VM is otherwise initialized, but just before adding it to + * the vm_list. + */ +int kvm_arch_add_vm(struct kvm *kvm, int usage_count) +{ + atomic_t failed; + int r = 0; + + if (usage_count != 1) + return 0; + + atomic_set(&failed, 0); + on_each_cpu(hardware_enable, &failed, 1); + + if (atomic_read(&failed)) { + r = -EBUSY; + goto err; + } + + r = kvm_arch_post_init_vm(kvm); +err: + if (r && usage_count == 1) + on_each_cpu(hardware_disable, NULL, 1); + return r; +} + +int kvm_arch_del_vm(int usage_count) +{ + if (usage_count) + return 0; + + on_each_cpu(hardware_disable, NULL, 1); + return 0; +} + +int kvm_arch_online_cpu(unsigned int cpu, int usage_count) +{ + int r; + + if (!usage_count) + return 0; + + r = kvm_arch_check_processor_compat(); + if (r) + return r; + return __hardware_enable(); +} + +int kvm_arch_offline_cpu(unsigned int cpu, int usage_count) +{ + if (usage_count) + hardware_disable(NULL); + return 0; +} + +int kvm_arch_reboot(int val) +{ + on_each_cpu(hardware_disable, NULL, 1); + return NOTIFY_OK; +} + +int kvm_arch_suspend(int usage_count) +{ + if (usage_count) + hardware_disable(NULL); + return 0; +} + void kvm_arch_resume(int usage_count) { struct kvm *kvm; @@ -11853,6 +11971,8 @@ void kvm_arch_resume(int usage_count) if (!usage_count) return; + if (kvm_arch_check_processor_compat()) + return; if (kvm_arch_hardware_enable()) return; @@ -12104,11 +12224,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return ret; } -int kvm_arch_post_init_vm(struct kvm *kvm) -{ - return kvm_mmu_post_init_vm(kvm); -} - static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { vcpu_load(vcpu); -- 2.25.1