[patch 2/2] KVM: x86: add option to advance tscdeadline hrtimer expiration

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



For the hrtimer which emulates the tscdeadline timer in the guest,
add an option to advance expiration, and busy spin on VM-entry waiting
for the actual expiration time to elapse.

This allows achieving low latencies in cyclictest (or any scenario 
which requires strict timing regarding timer expiration).

Reduces cyclictest avg latency by 50%.

Note: this option requires tuning to find the appropriate value 
for a particular hardware/guest combination. One method is to measure the 
average delay between apic_timer_fn and VM-entry. 
Another method is to start with 1000ns, and increase the value
in say 500ns increments until avg cyclictest numbers stop decreasing.

Signed-off-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx>

Index: kvm/arch/x86/kvm/lapic.c
===================================================================
--- kvm.orig/arch/x86/kvm/lapic.c
+++ kvm/arch/x86/kvm/lapic.c
@@ -33,6 +33,7 @@
 #include <asm/page.h>
 #include <asm/current.h>
 #include <asm/apicdef.h>
+#include <asm/delay.h>
 #include <linux/atomic.h>
 #include <linux/jump_label.h>
 #include "kvm_cache_regs.h"
@@ -1073,6 +1074,7 @@ static void apic_timer_expired(struct kv
 {
 	struct kvm_vcpu *vcpu = apic->vcpu;
 	wait_queue_head_t *q = &vcpu->wq;
+	struct kvm_timer *ktimer = &apic->lapic_timer;
 
 	/*
 	 * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
@@ -1087,11 +1089,59 @@ static void apic_timer_expired(struct kv
 
 	if (waitqueue_active(q))
 		wake_up_interruptible(q);
+
+	if (ktimer->timer_mode_mask == APIC_LVT_TIMER_TSCDEADLINE)
+		ktimer->expired_tscdeadline = ktimer->tscdeadline;
+}
+
+static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
+{
+	struct kvm_lapic *apic = vcpu->arch.apic;
+	u32 reg = kvm_apic_get_reg(apic, APIC_LVTT);
+
+	if (kvm_apic_hw_enabled(apic)) {
+		int vec = reg & APIC_VECTOR_MASK;
+
+		if (kvm_x86_ops->test_posted_interrupt)
+			return kvm_x86_ops->test_posted_interrupt(vcpu, vec);
+		else {
+			if (apic_test_vector(vec, apic->regs + APIC_ISR))
+				return true;
+		}
+	}
+	return false;
+}
+
+void wait_lapic_expire(struct kvm_vcpu *vcpu)
+{
+	struct kvm_lapic *apic = vcpu->arch.apic;
+	u64 guest_tsc, tsc_deadline;
+
+	if (!kvm_vcpu_has_lapic(vcpu))
+		return;
+
+	if (!apic_lvtt_tscdeadline(apic))
+		return;
+
+	if (!lapic_timer_int_injected(vcpu))
+		return;
+
+	tsc_deadline = apic->lapic_timer.expired_tscdeadline;
+	guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+
+	while (guest_tsc < tsc_deadline) {
+		int delay = min(tsc_deadline - guest_tsc, 1000ULL);
+
+		ndelay(delay);
+		guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+	}
 }
 
 static void start_apic_timer(struct kvm_lapic *apic)
 {
 	ktime_t now;
+	struct kvm_arch *kvm_arch = &apic->vcpu->kvm->arch;
+
 	atomic_set(&apic->lapic_timer.pending, 0);
 
 	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
@@ -1137,6 +1187,7 @@ static void start_apic_timer(struct kvm_
 		/* lapic timer in tsc deadline mode */
 		u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
 		u64 ns = 0;
+		ktime_t expire;
 		struct kvm_vcpu *vcpu = apic->vcpu;
 		unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
 		unsigned long flags;
@@ -1149,10 +1200,14 @@ static void start_apic_timer(struct kvm_
 		now = apic->lapic_timer.timer.base->get_time();
 		guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
 		if (likely(tscdeadline > guest_tsc)) {
+			u32 advance = kvm_arch->lapic_tscdeadline_advance_ns;
+
 			ns = (tscdeadline - guest_tsc) * 1000000ULL;
 			do_div(ns, this_tsc_khz);
+			expire = ktime_add_ns(now, ns);
+			expire = ktime_sub_ns(expire, advance);
 			hrtimer_start(&apic->lapic_timer.timer,
-				ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
+				      expire, HRTIMER_MODE_ABS);
 		} else
 			apic_timer_expired(apic);
 
Index: kvm/arch/x86/kvm/lapic.h
===================================================================
--- kvm.orig/arch/x86/kvm/lapic.h
+++ kvm/arch/x86/kvm/lapic.h
@@ -14,6 +14,7 @@ struct kvm_timer {
 	u32 timer_mode;
 	u32 timer_mode_mask;
 	u64 tscdeadline;
+	u64 expired_tscdeadline;
 	atomic_t pending;			/* accumulated triggered timers */
 };
 
@@ -170,4 +171,6 @@ static inline bool kvm_apic_has_events(s
 
 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
 
+void wait_lapic_expire(struct kvm_vcpu *vcpu);
+
 #endif
Index: kvm/arch/x86/kvm/x86.c
===================================================================
--- kvm.orig/arch/x86/kvm/x86.c
+++ kvm/arch/x86/kvm/x86.c
@@ -2742,6 +2742,7 @@ int kvm_vm_ioctl_check_extension(struct
 	case KVM_CAP_ASSIGN_DEV_IRQ:
 	case KVM_CAP_PCI_2_3:
 #endif
+	case KVM_CAP_TSCDEADLINE_ADVANCE:
 		r = 1;
 		break;
 	case KVM_CAP_COALESCED_MMIO:
@@ -4079,6 +4080,22 @@ long kvm_arch_vm_ioctl(struct file *filp
 		r = 0;
 		break;
 	}
+	case KVM_SET_TSCDEADLINE_ADVANCE: {
+		struct kvm_tscdeadline_advance adv;
+
+		r = -EFAULT;
+		if (copy_from_user(&adv, argp, sizeof(adv)))
+			goto out;
+
+		/* cap at 50us to avoid spinning for too long */
+		r = -EINVAL;
+		if (adv.timer_advance > 50000)
+			goto out;
+
+		kvm->arch.lapic_tscdeadline_advance_ns = adv.timer_advance;
+		r = 0;
+		break;
+	}
 
 	default:
 		r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
@@ -6311,6 +6328,7 @@ static int vcpu_enter_guest(struct kvm_v
 	}
 
 	trace_kvm_entry(vcpu->vcpu_id);
+	wait_lapic_expire(vcpu);
 	kvm_x86_ops->run(vcpu);
 
 	/*
Index: kvm/arch/x86/include/asm/kvm_host.h
===================================================================
--- kvm.orig/arch/x86/include/asm/kvm_host.h
+++ kvm/arch/x86/include/asm/kvm_host.h
@@ -612,6 +612,8 @@ struct kvm_arch {
 	u64 hv_hypercall;
 	u64 hv_tsc_page;
 
+	u32 lapic_tscdeadline_advance_ns;
+
 	#ifdef CONFIG_KVM_MMU_AUDIT
 	int audit_point;
 	#endif
Index: kvm/arch/x86/include/uapi/asm/kvm.h
===================================================================
--- kvm.orig/arch/x86/include/uapi/asm/kvm.h
+++ kvm/arch/x86/include/uapi/asm/kvm.h
@@ -277,6 +277,11 @@ struct kvm_reinject_control {
 	__u8 reserved[31];
 };
 
+struct kvm_tscdeadline_advance {
+	__u32 timer_advance;
+	__u32 reserved[3];
+};
+
 /* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
 #define KVM_VCPUEVENT_VALID_NMI_PENDING	0x00000001
 #define KVM_VCPUEVENT_VALID_SIPI_VECTOR	0x00000002
Index: kvm/include/uapi/linux/kvm.h
===================================================================
--- kvm.orig/include/uapi/linux/kvm.h
+++ kvm/include/uapi/linux/kvm.h
@@ -753,6 +753,7 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_PPC_FIXUP_HCALL 103
 #define KVM_CAP_PPC_ENABLE_HCALL 104
 #define KVM_CAP_CHECK_EXTENSION_VM 105
+#define KVM_CAP_TSCDEADLINE_ADVANCE 106
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1053,6 +1054,8 @@ struct kvm_s390_ucas_mapping {
 #define KVM_GET_DEVICE_ATTR	  _IOW(KVMIO,  0xe2, struct kvm_device_attr)
 #define KVM_HAS_DEVICE_ATTR	  _IOW(KVMIO,  0xe3, struct kvm_device_attr)
 
+#define KVM_SET_TSCDEADLINE_ADVANCE  _IOW(KVMIO,  0xe4, struct kvm_tscdeadline_advance)
+
 /*
  * ioctls for vcpu fds
  */


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux