Initialize constant VMCS state in vcpu_vcpu_reset() instead of in vmx_vcpu_create(), which allows for the removal of the open coded "vCPU load" sequence since ->vcpu_reset() is invoked while the vCPU is properly loaded (which is the entire point of vCPU reset...). Deferring initialization is effectively a nop as it's impossible to safely access the VMCS between the current call site and its new home, as both the vCPU and the pCPU are put immediately after init_vmcs(), i.e. the VMCS isn't guaranteed to be loaded. Note, task preemption is not a problem as vmx_sched_in() _can't_ touch the VMCS as ->sched_in() is invoked before the vCPU, and thus VMCS, is reloaded. I.e. the preemption path also can't consume VMCS state. Reviewed-by: Reiji Watanabe <reijiw@xxxxxxxxxx> Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/kvm/vmx/vmx.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 0f5e97a904e5..26c0e776827c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4293,10 +4293,6 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) #define VMX_XSS_EXIT_BITMAP 0 -/* - * Noting that the initialization of Guest-state Area of VMCS is in - * vmx_vcpu_reset(). - */ static void init_vmcs(struct vcpu_vmx *vmx) { if (nested) @@ -4395,6 +4391,9 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 eax, dummy; u64 cr0; + if (!init_event) + init_vmcs(vmx); + vmx->rmode.vm86_active = 0; vmx->spec_ctrl = 0; @@ -6782,7 +6781,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) { struct vmx_uret_msr *tsx_ctrl; struct vcpu_vmx *vmx; - int i, cpu, err; + int i, err; BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0); vmx = to_vmx(vcpu); @@ -6844,12 +6843,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) vmx->msr_bitmap_mode = 0; vmx->loaded_vmcs = &vmx->vmcs01; - cpu = get_cpu(); - vmx_vcpu_load(vcpu, cpu); - vcpu->cpu = cpu; - init_vmcs(vmx); - vmx_vcpu_put(vcpu); - put_cpu(); + if (cpu_need_virtualize_apic_accesses(vcpu)) { err = alloc_apic_access_page(vcpu->kvm); if (err) -- 2.32.0.93.g670b81a890-goog