Re: [PATCH v2 1/2] KVM: x86: Use vector-hashing to deliver lowest-priority interrupts

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 2015/12/16 9:37, Feng Wu wrote:
Use vector-hashing to deliver lowest-priority interrupts, As an
example, modern Intel CPUs in server platform use this method to
handle lowest-priority interrupts.

Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
---
  arch/x86/kvm/irq_comm.c | 27 ++++++++++++++++++-----
  arch/x86/kvm/lapic.c    | 57 ++++++++++++++++++++++++++++++++++++++++---------
  arch/x86/kvm/lapic.h    |  2 ++
  arch/x86/kvm/x86.c      |  9 ++++++++
  arch/x86/kvm/x86.h      |  1 +
  5 files changed, 81 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 84b96d3..c8c5f61 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -32,6 +32,7 @@
  #include "ioapic.h"

  #include "lapic.h"
+#include "x86.h"

  static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
  			   struct kvm *kvm, int irq_source_id, int level,
@@ -53,8 +54,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
  int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
  		struct kvm_lapic_irq *irq, unsigned long *dest_map)
  {
-	int i, r = -1;
+	int i, r = -1, idx = 0;
  	struct kvm_vcpu *vcpu, *lowest = NULL;
+	unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+	unsigned int dest_vcpus = 0;

  	if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
  			kvm_lowest_prio_delivery(irq)) {
@@ -65,6 +68,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
  	if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
  		return r;

+	memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap));
+
  	kvm_for_each_vcpu(i, vcpu, kvm) {
  		if (!kvm_apic_present(vcpu))
  			continue;
@@ -78,13 +83,25 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
  				r = 0;
  			r += kvm_apic_set_irq(vcpu, irq, dest_map);
  		} else if (kvm_lapic_enabled(vcpu)) {
-			if (!lowest)
-				lowest = vcpu;
-			else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
-				lowest = vcpu;
+			if (!kvm_vector_hashing_enabled()) {
+				if (!lowest)
+					lowest = vcpu;
+				else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
+					lowest = vcpu;
+			} else {
+				__set_bit(vcpu->vcpu_id, dest_vcpu_bitmap);
+				dest_vcpus++;
+			}
  		}
  	}

+	if (dest_vcpus != 0) {
+		idx = kvm_vector_2_index(irq->vector, dest_vcpus,
+					 dest_vcpu_bitmap, KVM_MAX_VCPUS);
+
+		lowest = kvm_get_vcpu(kvm, idx - 1);
+	}
+
  	if (lowest)
  		r = kvm_apic_set_irq(lowest, irq, dest_map);

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index ecd4ea1..e29001f 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -678,6 +678,22 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
  	}
  }

+int kvm_vector_2_index(u32 vector, u32 dest_vcpus,
+		       const unsigned long *bitmap, u32 bitmap_size)
+{
+	u32 mod;
+	int i, idx = 0;
+
+	mod = vector % dest_vcpus;
+
+	for (i = 0; i <= mod; i++) {
+		idx = find_next_bit(bitmap, bitmap_size, idx) + 1;
+		BUG_ON(idx > bitmap_size);
+	}
+
+	return idx;
+}
+
  bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
  		struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map)
  {
@@ -731,17 +747,38 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
  		dst = map->logical_map[cid];

  		if (kvm_lowest_prio_delivery(irq)) {
-			int l = -1;
-			for_each_set_bit(i, &bitmap, 16) {
-				if (!dst[i])
-					continue;
-				if (l < 0)
-					l = i;
-				else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0)
-					l = i;
+			if (!kvm_vector_hashing_enabled()) {
+				int l = -1;
+				for_each_set_bit(i, &bitmap, 16) {
+					if (!dst[i])
+						continue;
+					if (l < 0)
+						l = i;
+					else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0)
+						l = i;
+				}
+				bitmap = (l >= 0) ? 1 << l : 0;
+			} else {
+				int idx = 0;
+				unsigned int dest_vcpus = 0;
+
+				for_each_set_bit(i, &bitmap, 16) {
+					if (!dst[i] && !kvm_lapic_enabled(dst[i]->vcpu)) {

It should be or(||) not and (&&).

+						__clear_bit(i, &bitmap);
+						continue;
+					}
+				}
+
+				dest_vcpus = hweight16(bitmap);
+
+				if (dest_vcpus != 0) {
+					idx = kvm_vector_2_index(irq->vector,
+						dest_vcpus, &bitmap, 16);
+
+					bitmap = 0;
+					__set_bit(idx-1, &bitmap);
+				}
  			}
-
-			bitmap = (l >= 0) ? 1 << l : 0;
  		}
  	}

diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index fde8e35d..6890ef0 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -170,4 +170,6 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu);

  bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
  			struct kvm_vcpu **dest_vcpu);
+int kvm_vector_2_index(u32 vector, u32 dest_vcpus,
+		       const unsigned long *bitmap, u32 bitmap_size);
  #endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4a6eff1..fb47730 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -118,6 +118,9 @@ module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
  unsigned int lapic_timer_advance_ns = 0;
  module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);

+bool __read_mostly enable_vector_hashing = 1;
+module_param(enable_vector_hashing, bool, S_IRUGO);
+
  static bool backwards_tsc_observed = false;

  #define KVM_NR_SHARED_MSRS 16
@@ -8165,6 +8168,12 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
  	return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
  }

+bool kvm_vector_hashing_enabled(void)
+{
+	return enable_vector_hashing;
+}
+EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);
+
  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
  EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index f2afa5f..04bd0f9 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -179,6 +179,7 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
  int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
  bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
  					  int page_num);
+bool kvm_vector_hashing_enabled(void);

  #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
  				| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \



--
best regards
yang
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux