Instead of hunting misterious stalls/hungs all over the kernel when overflow occurs at pvclock.c:pvclock_get_nsec_offset u64 delta = native_read_tsc() - shadow->tsc_timestamp; and introducing hooks when places of unexpected access found, pv_clock should be initialized for the calling cpu if overflow condition is detected. Signed-off-by: Igor Mammedov <imammedo@xxxxxxxxxx> --- arch/x86/kernel/pvclock.c | 18 +++++++++++++++--- 1 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 42eb330..b486756 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -41,9 +41,14 @@ void pvclock_set_flags(u8 flags) valid_flags = flags; } -static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow) +static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow, + bool *overflow) { - u64 delta = native_read_tsc() - shadow->tsc_timestamp; + u64 delta; + u64 tsc = native_read_tsc(); + u64 shadow_timestamp = shadow->tsc_timestamp; + *overflow = tsc < shadow_timestamp; + delta = tsc - shadow_timestamp; return pvclock_scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift); } @@ -94,12 +99,19 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) unsigned version; cycle_t ret, offset; u64 last; + bool overflow; do { version = pvclock_get_time_values(&shadow, src); barrier(); - offset = pvclock_get_nsec_offset(&shadow); + offset = pvclock_get_nsec_offset(&shadow, &overflow); ret = shadow.system_timestamp + offset; + if (unlikely(overflow)) { + memset(src, 0, sizeof(*src)); + barrier(); + x86_cpuinit.early_percpu_clock_init(); + continue; + } barrier(); } while (version != src->version); -- 1.7.7.6 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html