Currently, after a VM boots with APICv enabled, it could be deactivated due to various reasons (e.g. Hyper-v synic). Introduce KVM APICv deactivate bits along with a new variable struct kvm_arch.apicv_deact_msk to help keep track of why APICv is deactivated for each VM. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> --- arch/x86/include/asm/kvm_host.h | 5 +++++ arch/x86/kvm/svm.c | 3 +++ arch/x86/kvm/vmx/vmx.c | 4 ++++ arch/x86/kvm/x86.c | 22 +++++++++++++++++++++- 4 files changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 843799b..1c05363 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -852,6 +852,8 @@ enum kvm_irqchip_mode { KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */ }; +#define APICV_DEACT_BIT_DISABLE 0 + struct kvm_arch { unsigned long n_used_mmu_pages; unsigned long n_requested_mmu_pages; @@ -881,6 +883,7 @@ struct kvm_arch { struct kvm_apic_map *apic_map; bool apic_access_page_done; + unsigned long apicv_deact_msk; gpa_t wall_clock; @@ -1416,6 +1419,8 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception); void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu); +bool kvm_apicv_activated(struct kvm *kvm); +void kvm_apicv_init(struct kvm *kvm, bool enable); int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 7090306..a0caf66 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1985,6 +1985,9 @@ static int avic_vm_init(struct kvm *kvm) hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id); spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags); + /* Enable KVM APICv support */ + kvm_apicv_init(kvm, true); + return 0; free_avic: diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e4faa00..28b97fb 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6775,6 +6775,10 @@ static int vmx_vm_init(struct kvm *kvm) break; } } + + /* Enable KVM APICv support */ + kvm_apicv_init(kvm, vmx_get_enable_apicv(kvm)); + return 0; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2341f48..70a70a1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7201,6 +7201,21 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu); } +bool kvm_apicv_activated(struct kvm *kvm) +{ + return (READ_ONCE(kvm->arch.apicv_deact_msk) == 0); +} +EXPORT_SYMBOL_GPL(kvm_apicv_activated); + +void kvm_apicv_init(struct kvm *kvm, bool enable) +{ + if (enable) + clear_bit(APICV_DEACT_BIT_DISABLE, &kvm->arch.apicv_deact_msk); + else + set_bit(APICV_DEACT_BIT_DISABLE, &kvm->arch.apicv_deact_msk); +} +EXPORT_SYMBOL_GPL(kvm_apicv_init); + static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id) { struct kvm_vcpu *target = NULL; @@ -9217,13 +9232,15 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) goto fail_free_pio_data; if (irqchip_in_kernel(vcpu->kvm)) { - vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu->kvm); r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); if (r < 0) goto fail_mmu_destroy; } else static_key_slow_inc(&kvm_no_apic_vcpu); + if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm)) + vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu->kvm); + vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, GFP_KERNEL_ACCOUNT); if (!vcpu->arch.mce_banks) { @@ -9322,6 +9339,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_page_track_init(kvm); kvm_mmu_init_vm(kvm); + /* Default to APICv disable */ + kvm_apicv_init(kvm, false); + if (kvm_x86_ops->vm_init) return kvm_x86_ops->vm_init(kvm); -- 1.8.3.1