Configure VMX's runtime hooks by modifying vmx_x86_ops directly instead of using on the global kvm_x86_ops. This sets the stage for waiting until after ->hardware_setup() to set kvm_x86_ops with the vendor's implementation. Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> --- arch/x86/kvm/vmx/nested.c | 15 ++++++++------- arch/x86/kvm/vmx/nested.h | 3 ++- arch/x86/kvm/vmx/vmx.c | 27 ++++++++++++++------------- 3 files changed, 24 insertions(+), 21 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 7608924ee8c1..ebea848b86e9 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6157,7 +6157,8 @@ void nested_vmx_hardware_unsetup(void) } } -__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) +__init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops, + int (*exit_handlers[])(struct kvm_vcpu *)) { int i; @@ -6193,12 +6194,12 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; - kvm_x86_ops->check_nested_events = vmx_check_nested_events; - kvm_x86_ops->get_nested_state = vmx_get_nested_state; - kvm_x86_ops->set_nested_state = vmx_set_nested_state; - kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages; - kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs; - kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version; + ops->check_nested_events = vmx_check_nested_events; + ops->get_nested_state = vmx_get_nested_state; + ops->set_nested_state = vmx_set_nested_state; + ops->get_vmcs12_pages = nested_get_vmcs12_pages; + ops->nested_enable_evmcs = nested_enable_evmcs; + ops->nested_get_evmcs_version = nested_get_evmcs_version; return 0; } diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index fc874d4ead0f..e356871a7cfa 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -20,7 +20,8 @@ void vmx_leave_nested(struct kvm_vcpu *vcpu); void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, bool apicv); void nested_vmx_hardware_unsetup(void); -__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)); +__init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops, + int (*exit_handlers[])(struct kvm_vcpu *)); void nested_vmx_set_vmcs_shadowing_bitmap(void); void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu); enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e32d3b85ddcb..f3bd438b29bc 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7784,10 +7784,10 @@ static __init int hardware_setup(void) * using the APIC_ACCESS_ADDR VMCS field. */ if (!flexpriority_enabled) - kvm_x86_ops->set_apic_access_page_addr = NULL; + vmx_x86_ops.set_apic_access_page_addr = NULL; if (!cpu_has_vmx_tpr_shadow()) - kvm_x86_ops->update_cr8_intercept = NULL; + vmx_x86_ops.update_cr8_intercept = NULL; if (enable_ept && !cpu_has_vmx_ept_2m_page()) kvm_disable_largepages(); @@ -7795,8 +7795,8 @@ static __init int hardware_setup(void) #if IS_ENABLED(CONFIG_HYPERV) if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH && enable_ept) { - kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb; - kvm_x86_ops->tlb_remote_flush_with_range = + vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb; + vmx_x86_ops.tlb_remote_flush_with_range = hv_remote_flush_tlb_with_range; } #endif @@ -7811,7 +7811,7 @@ static __init int hardware_setup(void) if (!cpu_has_vmx_apicv()) { enable_apicv = 0; - kvm_x86_ops->sync_pir_to_irr = NULL; + vmx_x86_ops.sync_pir_to_irr = NULL; } if (cpu_has_vmx_tsc_scaling()) { @@ -7835,10 +7835,10 @@ static __init int hardware_setup(void) enable_pml = 0; if (!enable_pml) { - kvm_x86_ops->slot_enable_log_dirty = NULL; - kvm_x86_ops->slot_disable_log_dirty = NULL; - kvm_x86_ops->flush_log_dirty = NULL; - kvm_x86_ops->enable_log_dirty_pt_masked = NULL; + vmx_x86_ops.slot_enable_log_dirty = NULL; + vmx_x86_ops.slot_disable_log_dirty = NULL; + vmx_x86_ops.flush_log_dirty = NULL; + vmx_x86_ops.enable_log_dirty_pt_masked = NULL; } if (!cpu_has_vmx_preemption_timer()) @@ -7866,9 +7866,9 @@ static __init int hardware_setup(void) } if (!enable_preemption_timer) { - kvm_x86_ops->set_hv_timer = NULL; - kvm_x86_ops->cancel_hv_timer = NULL; - kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit; + vmx_x86_ops.set_hv_timer = NULL; + vmx_x86_ops.cancel_hv_timer = NULL; + vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit; } kvm_set_posted_intr_wakeup_handler(wakeup_handler); @@ -7884,7 +7884,8 @@ static __init int hardware_setup(void) nested_vmx_setup_ctls_msrs(&vmcs_config.nested, vmx_capability.ept, enable_apicv); - r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers); + r = nested_vmx_hardware_setup(&vmx_x86_ops, + kvm_vmx_exit_handlers); if (r) return r; } -- 2.24.1