[PATCH 2/5]KVM:x86, apicv: adjust for virtual interrupt delivery

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Virtual interrupt delivery avoids KVM to inject vAPIC interrupts
manually, which is fully taken care of by the hardware. This needs
some special awareness into existing interrupr injection path:

  - for pending interrupt, instead of direct injection, we may need
    update architecture specific indicators before resuming to guest.

  - A pending interrupt, which is masked by ISR, should be also
    considered in above update action, since hardware will decide
    when to inject it at right time. Current has_interrupt and
    get_interrupt only returns a valid vector from injection p.o.v.

Three new interfaces are introduced accordingly:
        kvm_apic_get_highest_irr
        kvm_cpu_has_interrupt_apicv_vid
        kvm_cpu_get_interrupt_apic_vid

Signed-off-by: Kevin Tian <kevin.tian@xxxxxxxxx>
Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx>
---
 arch/x86/include/asm/kvm_host.h |    2 +
 arch/x86/kvm/irq.c              |   44 +++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/lapic.c            |   13 +++++++++++
 arch/x86/kvm/lapic.h            |   10 ++++++++
 arch/x86/kvm/svm.c              |    6 +++++
 arch/x86/kvm/vmx.c              |    6 +++++
 arch/x86/kvm/x86.c              |   22 +++++++++++++++++-
 7 files changed, 101 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 09155d6..ef74df5 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -669,6 +669,8 @@ struct kvm_x86_ops {
 	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
 	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
 	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
+	int (*has_virtual_interrupt_delivery)(struct kvm_vcpu *vcpu);
+	void (*update_irq)(struct kvm_vcpu *vcpu);
 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
 	int (*get_tdp_level)(void);
 	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 7e06ba1..abd3831 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -60,6 +60,29 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
 EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
 
 /*
+ * check if there is pending interrupt without
+ * intack. This _apicv version is used when hardware
+ * supports APIC virtualization with virtual interrupt
+ * delivery support. In such case, KVM is not required
+ * to poll pending APIC interrupt, and thus this
+ * interface is used to poll pending interupts from
+ * non-APIC source.
+ */
+int kvm_cpu_has_interrupt_apic_vid(struct kvm_vcpu *v)
+{
+	struct kvm_pic *s;
+
+	if (!irqchip_in_kernel(v->kvm))
+		return v->arch.interrupt.pending;
+
+	if (kvm_apic_accept_pic_intr(v)) {
+		s = pic_irqchip(v->kvm);	/* PIC */
+		return s->output;
+	} else
+		return 0;
+}
+
+/*
  * Read pending interrupt vector and intack.
  */
 int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
@@ -82,6 +105,27 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
 }
 EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
 
+/*
+ * Read pending interrupt vector and intack.
+ * Similar to kvm_cpu_has_interrupt_apicv, to get
+ * interrupts from non-APIC sources.
+ */
+int kvm_cpu_get_interrupt_apic_vid(struct kvm_vcpu *v)
+{
+	struct kvm_pic *s;
+	int vector = -1;
+
+	if (!irqchip_in_kernel(v->kvm))
+		return v->arch.interrupt.nr;
+
+	if (kvm_apic_accept_pic_intr(v)) {
+		s = pic_irqchip(v->kvm);
+		s->output = 0;		/* PIC */
+		vector = kvm_pic_read_irq(v->kvm);
+	}
+	return vector;
+}
+
 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
 {
 	kvm_inject_apic_timer_irqs(vcpu);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 4a6d3a4..c47f3d3 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1310,6 +1310,8 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
 	kvm_lapic_reset(vcpu);
 	kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
 
+	if (kvm_x86_ops->has_virtual_interrupt_delivery(vcpu))
+		apic->vid_enabled = true;
 	return 0;
 nomem_free_apic:
 	kfree(apic);
@@ -1333,6 +1335,17 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
 	return highest_irr;
 }
 
+int kvm_apic_get_highest_irr(struct kvm_vcpu *vcpu)
+{
+        struct kvm_lapic *apic = vcpu->arch.apic;
+
+        if (!apic || !apic_enabled(apic))
+                return -1;
+
+        return apic_find_highest_irr(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_apic_get_highest_irr);
+
 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
 {
 	u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index cd4875e..4e3b435 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -13,6 +13,7 @@ struct kvm_lapic {
 	u32 divide_count;
 	struct kvm_vcpu *vcpu;
 	bool irr_pending;
+	bool vid_enabled;
 	/* Number of bits set in ISR. */
 	s16 isr_count;
 	/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
@@ -32,6 +33,9 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
+int kvm_cpu_has_interrupt_apic_vid(struct kvm_vcpu *v);
+int kvm_cpu_get_interrupt_apic_vid(struct kvm_vcpu *v);
+int kvm_apic_get_highest_irr(struct kvm_vcpu *vcpu);
 void kvm_lapic_reset(struct kvm_vcpu *vcpu);
 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
@@ -72,5 +76,11 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
 	return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE;
 }
 
+static inline bool kvm_apic_vid_enabled(struct kvm_vcpu *vcpu)
+{
+	struct kvm_lapic *apic = vcpu->arch.apic;
+	return apic->vid_enabled;
+}
+
 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
 #endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index baead95..65a301a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3580,6 +3580,11 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
 		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 }
 
+static int svm_has_virtual_interrupt_delivery(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -4305,6 +4310,7 @@ static struct kvm_x86_ops svm_x86_ops = {
 	.enable_nmi_window = enable_nmi_window,
 	.enable_irq_window = enable_irq_window,
 	.update_cr8_intercept = update_cr8_intercept,
+	.has_virtual_interrupt_delivery = svm_has_virtual_interrupt_delivery,
 
 	.set_tss_addr = svm_set_tss_addr,
 	.get_tdp_level = get_npt_level,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3d92277..4a26d04 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6080,6 +6080,11 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
 	vmcs_write32(TPR_THRESHOLD, irr);
 }
 
+static int vmx_has_virtual_interrupt_delivery(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
 {
 	u32 exit_intr_info;
@@ -7339,6 +7344,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
 	.enable_nmi_window = enable_nmi_window,
 	.enable_irq_window = enable_irq_window,
 	.update_cr8_intercept = update_cr8_intercept,
+	.has_virtual_interrupt_delivery = vmx_has_virtual_interrupt_delivery,
 
 	.set_tss_addr = vmx_set_tss_addr,
 	.get_tdp_level = get_ept_level,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 148ed66..47ac609 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5194,6 +5194,13 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
 			vcpu->arch.nmi_injected = true;
 			kvm_x86_ops->set_nmi(vcpu);
 		}
+	} else if (kvm_apic_vid_enabled(vcpu)) {
+		if (kvm_cpu_has_interrupt_apic_vid(vcpu) &&
+		    kvm_x86_ops->interrupt_allowed(vcpu)) {
+			kvm_queue_interrupt(vcpu,
+				kvm_cpu_get_interrupt_apic_vid(vcpu), false);
+			kvm_x86_ops->set_irq(vcpu);
+		}
 	} else if (kvm_cpu_has_interrupt(vcpu)) {
 		if (kvm_x86_ops->interrupt_allowed(vcpu)) {
 			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
@@ -5293,16 +5300,27 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	}
 
 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+		/* update archtecture specific hints for APIC virtual interrupt delivery */
+		if (kvm_apic_vid_enabled(vcpu))
+			kvm_x86_ops->update_irq(vcpu);
+
 		inject_pending_event(vcpu);
 
 		/* enable NMI/IRQ window open exits if needed */
 		if (vcpu->arch.nmi_pending)
 			kvm_x86_ops->enable_nmi_window(vcpu);
-		else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+		else if (kvm_apic_vid_enabled(vcpu)) {
+			if (kvm_cpu_has_interrupt_apic_vid(vcpu))
+				kvm_x86_ops->enable_irq_window(vcpu);
+		} else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
 			kvm_x86_ops->enable_irq_window(vcpu);
 
 		if (kvm_lapic_enabled(vcpu)) {
-			update_cr8_intercept(vcpu);
+			/* no need for tpr_threshold update if APIC virtual
+			 * interrupt delivery is enabled
+			 */
+			if (!kvm_apic_vid_enabled(vcpu))
+				update_cr8_intercept(vcpu);
 			kvm_lapic_sync_to_vapic(vcpu);
 		}
 	}
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux