Move the CPU compatibility checks to pure x86 code, i.e. drop x86's use of the common kvm_x86_check_cpu_compat() arch hook. x86 is the only architecture that "needs" to do per-CPU compatibility checks, moving the logic to x86 will allow dropping the common code, and will also give x86 more control over when/how the compatibility checks are performed, e.g. TDX will need to enable hardware (do VMXON) in order to perform compatibility checks. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/vmx/vmx.c | 2 +- arch/x86/kvm/x86.c | 49 ++++++++++++++++++++++++++++++++---------- 3 files changed, 40 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f48d07bfc3d7..368b4db4b240 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5144,7 +5144,7 @@ static int __init svm_init(void) * Common KVM initialization _must_ come last, after this, /dev/kvm is * exposed to userspace! */ - r = kvm_init(&svm_init_ops, sizeof(struct vcpu_svm), + r = kvm_init(NULL, sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm), THIS_MODULE); if (r) goto err_kvm_init; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 81690fce0eb1..26baaccb659a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8562,7 +8562,7 @@ static int __init vmx_init(void) * Common KVM initialization _must_ come last, after this, /dev/kvm is * exposed to userspace! */ - r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx), + r = kvm_init(NULL, sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx), THIS_MODULE); if (r) goto err_kvm_init; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2b4530a33298..94831f1a1d04 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9271,10 +9271,36 @@ static inline void kvm_ops_update(struct kvm_x86_init_ops *ops) kvm_pmu_ops_update(ops->pmu_ops); } +struct kvm_cpu_compat_check { + struct kvm_x86_init_ops *ops; + int *ret; +}; + +static int kvm_x86_check_processor_compatibility(struct kvm_x86_init_ops *ops) +{ + struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); + + WARN_ON(!irqs_disabled()); + + if (__cr4_reserved_bits(cpu_has, c) != + __cr4_reserved_bits(cpu_has, &boot_cpu_data)) + return -EIO; + + return ops->check_processor_compatibility(); +} + +static void kvm_x86_check_cpu_compat(void *data) +{ + struct kvm_cpu_compat_check *c = data; + + *c->ret = kvm_x86_check_processor_compatibility(c->ops); +} + static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) { + struct kvm_cpu_compat_check c; u64 host_pat; - int r; + int r, cpu; if (kvm_x86_ops.hardware_enable) { pr_err("kvm: already loaded vendor module '%s'\n", kvm_x86_ops.name); @@ -9354,6 +9380,14 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) if (r != 0) goto out_mmu_exit; + c.ret = &r; + c.ops = ops; + for_each_online_cpu(cpu) { + smp_call_function_single(cpu, kvm_x86_check_cpu_compat, &c, 1); + if (r < 0) + goto out_hardware_unsetup; + } + /* * Point of no return! DO NOT add error paths below this point unless * absolutely necessary, as most operations from this point forward @@ -9396,6 +9430,8 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) kvm_init_msr_list(); return 0; +out_hardware_unsetup: + ops->runtime_ops->hardware_unsetup(); out_mmu_exit: kvm_mmu_vendor_module_exit(); out_free_percpu: @@ -12002,16 +12038,7 @@ void kvm_arch_hardware_disable(void) int kvm_arch_check_processor_compat(void *opaque) { - struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); - struct kvm_x86_init_ops *ops = opaque; - - WARN_ON(!irqs_disabled()); - - if (__cr4_reserved_bits(cpu_has, c) != - __cr4_reserved_bits(cpu_has, &boot_cpu_data)) - return -EIO; - - return ops->check_processor_compatibility(); + return 0; } bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) -- 2.38.1.431.g37b22c650d-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm