KVM added a global variable to guarantee monotonicity in the guest. It is necessary because the time between 1. ktime_get_ts(×pec); 2. rdtscll(tsc); Is variable. That is, given a host with stable TSC, suppose that two VCPUs read the same time via ktime_get_ts() above. The time required to execute 2. is not the same on those two instances executing in different VCPUS (cache misses, interrupts...). If the TSC value that is used by the host to interpolate when calculating the monotonic time is the same value used to calculate the tsc_timestamp value stored in the pvclock data structure, then this problem disappears. Monotonicity is then guaranteed by the synchronicity of the host TSCs. Set TSC stable pvclock flag in that case, allowing the guest to read clock from userspace. Signed-off-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx> Index: vsyscall/arch/x86/kvm/x86.c =================================================================== --- vsyscall.orig/arch/x86/kvm/x86.c +++ vsyscall/arch/x86/kvm/x86.c @@ -46,6 +46,7 @@ #include <linux/uaccess.h> #include <linux/hash.h> #include <linux/pci.h> +#include <linux/pvclock_gtod.h> #include <trace/events/kvm.h> #define CREATE_TRACE_POINTS @@ -1135,8 +1136,91 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu EXPORT_SYMBOL_GPL(kvm_write_tsc); +static cycle_t read_tsc(void) +{ + cycle_t ret; + u64 last; + + /* + * Empirically, a fence (of type that depends on the CPU) + * before rdtsc is enough to ensure that rdtsc is ordered + * with respect to loads. The various CPU manuals are unclear + * as to whether rdtsc can be reordered with later loads, + * but no one has ever seen it happen. + */ + rdtsc_barrier(); + ret = (cycle_t)vget_cycles(); + + last = pvclock_gtod_data.clock.cycle_last; + + if (likely(ret >= last)) + return ret; + + /* + * GCC likes to generate cmov here, but this branch is extremely + * predictable (it's just a funciton of time and the likely is + * very likely) and there's a data dependence, so force GCC + * to generate a branch instead. I don't barrier() because + * we don't actually need a barrier, and if this function + * ever gets inlined it will generate worse code. + */ + asm volatile (""); + return last; +} + +static inline u64 vgettsc(cycle_t *cycle_now) +{ + long v; + struct pvclock_gtod_data *gtod = &pvclock_gtod_data; + + *cycle_now = read_tsc(); + + v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; + return v * gtod->clock.mult; +} + +static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) +{ + unsigned long seq; + u64 ns; + int mode; + struct pvclock_gtod_data *gtod = &pvclock_gtod_data; + + ts->tv_nsec = 0; + do { + seq = read_seqcount_begin(>od->seq); + mode = gtod->clock.vclock_mode; + ts->tv_sec = gtod->monotonic_time_sec; + ns = gtod->monotonic_time_snsec; + ns += vgettsc(cycle_now); + ns >>= gtod->clock.shift; + } while (unlikely(read_seqcount_retry(>od->seq, seq))); + timespec_add_ns(ts, ns); + + return mode; +} + +/* returns true if host is using tsc clocksource */ +static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) +{ + struct timespec ts; + + /* checked again under seqlock below */ + if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) + return false; + + if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC) + return false; + + monotonic_to_bootbased(&ts); + *kernel_ns = timespec_to_ns(&ts); + + return true; +} + static void kvm_write_pvtime(struct kvm_vcpu *v, struct page *page, - unsigned int offset_in_page, gpa_t gpa) + unsigned int offset_in_page, gpa_t gpa, + bool host_tsc_clocksource) { struct kvm_vcpu_arch *vcpu = &v->arch; void *shared_kaddr; @@ -1155,6 +1239,10 @@ static void kvm_write_pvtime(struct kvm_ vcpu->pvclock_set_guest_stopped_request = false; } + /* If the host uses TSC clocksource, then it is stable */ + if (host_tsc_clocksource) + pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; + vcpu->hv_clock.flags = pvclock_flags; memcpy(shared_kaddr + offset_in_page, &vcpu->hv_clock, @@ -1172,11 +1260,12 @@ static int kvm_guest_time_update(struct unsigned long this_tsc_khz; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp; + cycle_t cycle_now; + u64 host_tsc; + bool host_tsc_clocksource; /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); - tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, native_read_tsc()); - kernel_ns = get_kernel_ns(); this_tsc_khz = __get_cpu_var(cpu_tsc_khz); if (unlikely(this_tsc_khz == 0)) { local_irq_restore(flags); @@ -1185,6 +1274,20 @@ static int kvm_guest_time_update(struct } /* + * If the host uses TSC clock, then passthrough TSC as stable + * to the guest. + */ + host_tsc_clocksource = kvm_get_time_and_clockread(&kernel_ns, &cycle_now); + if (host_tsc_clocksource) + host_tsc = cycle_now; + else { + host_tsc = native_read_tsc(); + kernel_ns = get_kernel_ns(); + } + + tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc); + + /* * We may have to catch up the TSC to match elapsed wall clock * time for two reasons, even if kvmclock is used. * 1) CPU could have been running below the maximum TSC rate @@ -1262,10 +1365,12 @@ static int kvm_guest_time_update(struct */ vcpu->hv_clock.version += 2; - kvm_write_pvtime(v, vcpu->time_page, vcpu->time_offset, vcpu->time); + kvm_write_pvtime(v, vcpu->time_page, vcpu->time_offset, vcpu->time, + host_tsc_clocksource); if (vcpu->uspace_time_page) kvm_write_pvtime(v, vcpu->uspace_time_page, - vcpu->uspace_time_offset, vcpu->uspace_time); + vcpu->uspace_time_offset, vcpu->uspace_time, + host_tsc_clocksource); return 0; } -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html