On 10/12/2014 21:57, Marcelo Tosatti wrote: > For the hrtimer which emulates the tscdeadline timer in the guest, > add an option to advance expiration, and busy spin on VM-entry waiting > for the actual expiration time to elapse. > > This allows achieving low latencies in cyclictest (or any scenario > which requires strict timing regarding timer expiration). > > Reduces cyclictest avg latency by 50%. > > Note: this option requires tuning to find the appropriate value > for a particular hardware/guest combination. One method is to measure the > average delay between apic_timer_fn and VM-entry. > Another method is to start with 1000ns, and increase the value > in say 500ns increments until avg cyclictest numbers stop decreasing. What values are you using in practice for the parameter? > Signed-off-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx> > > Index: kvm/arch/x86/kvm/lapic.c > =================================================================== > --- kvm.orig/arch/x86/kvm/lapic.c > +++ kvm/arch/x86/kvm/lapic.c > @@ -33,6 +33,7 @@ > #include <asm/page.h> > #include <asm/current.h> > #include <asm/apicdef.h> > +#include <asm/delay.h> > #include <linux/atomic.h> > #include <linux/jump_label.h> > #include "kvm_cache_regs.h" > @@ -1073,6 +1074,7 @@ static void apic_timer_expired(struct kv > { > struct kvm_vcpu *vcpu = apic->vcpu; > wait_queue_head_t *q = &vcpu->wq; > + struct kvm_timer *ktimer = &apic->lapic_timer; > > /* > * Note: KVM_REQ_PENDING_TIMER is implicitly checked in > @@ -1087,11 +1089,58 @@ static void apic_timer_expired(struct kv > > if (waitqueue_active(q)) > wake_up_interruptible(q); > + > + if (ktimer->timer_mode_mask == APIC_LVT_TIMER_TSCDEADLINE) > + ktimer->expired_tscdeadline = ktimer->tscdeadline; > +} > + > +static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu) > +{ > + struct kvm_lapic *apic = vcpu->arch.apic; > + u32 reg = kvm_apic_get_reg(apic, APIC_LVTT); > + > + if (kvm_apic_hw_enabled(apic)) { > + int vec = reg & APIC_VECTOR_MASK; > + > + if (kvm_x86_ops->test_posted_interrupt) > + return kvm_x86_ops->test_posted_interrupt(vcpu, vec); > + else { > + if (apic_test_vector(vec, apic->regs + APIC_ISR)) > + return true; > + } One branch here is testing IRR, the other is testing ISR. I think testing ISR is right; on APICv, the above test will cause a busy wait during a higher-priority task (or during an interrupt service routine for the timer itself), just because the timer interrupt was delivered. So, on APICv, if the interrupt is in PIR but it has bits 7:4 <= PPR[7:4], you have a problem. :( There is no APICv hook that lets you get a vmexit when the PPR becomes low enough. > + } > + return false; > +} > + > +void wait_lapic_expire(struct kvm_vcpu *vcpu) > +{ > + struct kvm_lapic *apic = vcpu->arch.apic; > + u64 guest_tsc, tsc_deadline; > + > + if (!kvm_vcpu_has_lapic(vcpu)) > + return; > + > + if (!apic_lvtt_tscdeadline(apic)) > + return; This test is wrong, I think. You need to check whether the timer interrupt was a TSC deadline interrupt. Instead, you are checking whether the current mode is TSC-deadline. This can be different if the interrupt could not be delivered immediately after it was received. This is easy to fix: replace the first two tests with "apic->lapic_timer.expired_tscdeadline != 0" and... > + if (!lapic_timer_int_injected(vcpu)) > + return; > + tsc_deadline = apic->lapic_timer.expired_tscdeadline; ... set apic->lapic_timer.expired_tscdeadline to 0 here. But I'm not sure how to solve the above problem with APICv. That's a pity. Knowing what values you use in practice for the parameter, would also make it easier to understand the problem. Please report that together with the graphs produced by the unit test you added. Paolo > + guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); > + > + while (guest_tsc < tsc_deadline) { > + int delay = min(tsc_deadline - guest_tsc, 1000ULL); > + > + ndelay(delay); > + guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); > + } > } > > static void start_apic_timer(struct kvm_lapic *apic) > { > ktime_t now; > + > atomic_set(&apic->lapic_timer.pending, 0); > > if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { > @@ -1137,6 +1186,7 @@ static void start_apic_timer(struct kvm_ > /* lapic timer in tsc deadline mode */ > u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; > u64 ns = 0; > + ktime_t expire; > struct kvm_vcpu *vcpu = apic->vcpu; > unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; > unsigned long flags; > @@ -1151,8 +1201,10 @@ static void start_apic_timer(struct kvm_ > if (likely(tscdeadline > guest_tsc)) { > ns = (tscdeadline - guest_tsc) * 1000000ULL; > do_div(ns, this_tsc_khz); > + expire = ktime_add_ns(now, ns); > + expire = ktime_sub_ns(expire, lapic_timer_advance_ns); > hrtimer_start(&apic->lapic_timer.timer, > - ktime_add_ns(now, ns), HRTIMER_MODE_ABS); > + expire, HRTIMER_MODE_ABS); > } else > apic_timer_expired(apic); > > Index: kvm/arch/x86/kvm/lapic.h > =================================================================== > --- kvm.orig/arch/x86/kvm/lapic.h > +++ kvm/arch/x86/kvm/lapic.h > @@ -14,6 +14,7 @@ struct kvm_timer { > u32 timer_mode; > u32 timer_mode_mask; > u64 tscdeadline; > + u64 expired_tscdeadline; > atomic_t pending; /* accumulated triggered timers */ > }; > > @@ -170,4 +171,6 @@ static inline bool kvm_apic_has_events(s > > bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector); > > +void wait_lapic_expire(struct kvm_vcpu *vcpu); > + > #endif > Index: kvm/arch/x86/kvm/x86.c > =================================================================== > --- kvm.orig/arch/x86/kvm/x86.c > +++ kvm/arch/x86/kvm/x86.c > @@ -108,6 +108,10 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz) > static u32 tsc_tolerance_ppm = 250; > module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); > > +/* lapic timer advance (tscdeadline mode only) in nanoseconds */ > +unsigned int lapic_timer_advance_ns = 0; > +module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR); > + > static bool backwards_tsc_observed = false; > > #define KVM_NR_SHARED_MSRS 16 > @@ -6311,6 +6315,7 @@ static int vcpu_enter_guest(struct kvm_v > } > > trace_kvm_entry(vcpu->vcpu_id); > + wait_lapic_expire(vcpu); > kvm_x86_ops->run(vcpu); > > /* > Index: kvm/arch/x86/kvm/x86.h > =================================================================== > --- kvm.orig/arch/x86/kvm/x86.h > +++ kvm/arch/x86/kvm/x86.h > @@ -170,5 +170,7 @@ extern u64 kvm_supported_xcr0(void); > > extern unsigned int min_timer_period_us; > > +extern unsigned int lapic_timer_advance_ns; > + > extern struct static_key kvm_no_apic_vcpu; > #endif > > > -- > To unsubscribe from this list: send the line "unsubscribe kvm" in > the body of a message to majordomo@xxxxxxxxxxxxxxx > More majordomo info at http://vger.kernel.org/majordomo-info.html > -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html