Certain types of interrupt cannot be supported by AMD AVIC hardware. Therefore, there is a need to temporary deactivate AVIC and fallback to legacy interrupt injection mechanism. Since AMD AVIC requires all vcpus to be operating in the same mode. So, introduce new interface to request all vCPUs to activate/deactivate APICV during run-time. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> --- arch/x86/include/asm/kvm_host.h | 8 ++++++++ arch/x86/kvm/x86.c | 36 +++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4660ce90de7f..05b5778c769e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -78,6 +78,10 @@ #define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22) #define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23) #define KVM_REQ_GET_VMCS12_PAGES KVM_ARCH_REQ(24) +#define KVM_REQ_APICV_ACTIVATE \ + KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_APICV_DEACTIVATE \ + KVM_ARCH_REQ_FLAGS(26, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define CR0_RESERVED_BITS \ (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ @@ -1536,6 +1540,10 @@ bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); void kvm_make_mclock_inprogress_request(struct kvm *kvm); void kvm_make_scan_ioapic_request(struct kvm *kvm); +void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu); +void kvm_vcpu_activate_apicv(struct kvm_vcpu *vcpu); +void kvm_make_apicv_activate_request(struct kvm *kvm); +void kvm_make_apicv_deactivate_request(struct kvm *kvm); void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 02c8e095a239..e93e2ef923b4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7045,11 +7045,19 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); } +void kvm_vcpu_activate_apicv(struct kvm_vcpu *vcpu) +{ + vcpu->arch.apicv_active = true; + kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_activate_apicv); + void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) { vcpu->arch.apicv_active = false; kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu); } +EXPORT_SYMBOL_GPL(kvm_vcpu_deactivate_apicv); int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { @@ -7541,6 +7549,30 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm) kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); } +void kvm_make_apicv_activate_request(struct kvm *kvm) +{ + int i; + struct kvm_vcpu *v; + + kvm_for_each_vcpu(i, v, kvm) + kvm_clear_request(KVM_REQ_APICV_DEACTIVATE, v); + + kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_ACTIVATE); +} +EXPORT_SYMBOL_GPL(kvm_make_apicv_activate_request); + +void kvm_make_apicv_deactivate_request(struct kvm *kvm) +{ + int i; + struct kvm_vcpu *v; + + kvm_for_each_vcpu(i, v, kvm) + kvm_clear_request(KVM_REQ_APICV_ACTIVATE, v); + + kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_DEACTIVATE); +} +EXPORT_SYMBOL_GPL(kvm_make_apicv_deactivate_request); + static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { if (!kvm_apic_present(vcpu)) @@ -7727,6 +7759,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) */ if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) kvm_hv_process_stimers(vcpu); + if (kvm_check_request(KVM_REQ_APICV_ACTIVATE, vcpu)) + kvm_vcpu_activate_apicv(vcpu); + if (kvm_check_request(KVM_REQ_APICV_DEACTIVATE, vcpu)) + kvm_vcpu_deactivate_apicv(vcpu); } if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { -- 2.17.1