From: Wanpeng Li <wanpeng.li@xxxxxxxxxxx> There is a logic to handle first-time write when updating the pvclock/wall clock/steal time shared memory pages each time, actually we should do this logic during pv stuffs setup if we suspect the version-field can't be guranteed to be initialized to an even number by the guest. This patch fixes it by handling the first-time write of pvclock/steal time during setup since the update is frequent, and keeping the wall clock since it is rare updating. Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx> Cc: Radim Krčmář <rkrcmar@xxxxxxxxxx> Cc: Liran Alon <liran.alon@xxxxxxxxxx> Signed-off-by: Wanpeng Li <wanpeng.li@xxxxxxxxxxx> --- arch/x86/kvm/x86.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4552427..19311e0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1833,9 +1833,6 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) */ BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); - if (guest_hv_clock.version & 1) - ++guest_hv_clock.version; /* first time write, random junk */ - vcpu->hv_clock.version = guest_hv_clock.version + 1; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, @@ -2126,9 +2123,6 @@ static void record_steal_time(struct kvm_vcpu *vcpu) vcpu->arch.st.steal.preempted = 0; - if (vcpu->arch.st.steal.version & 1) - vcpu->arch.st.steal.version += 1; /* first time write, random junk */ - vcpu->arch.st.steal.version += 1; kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, @@ -2256,8 +2250,19 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) &vcpu->arch.pv_time, data & ~1ULL, sizeof(struct pvclock_vcpu_time_info))) vcpu->arch.pv_time_enabled = false; - else + else { + struct pvclock_vcpu_time_info guest_hv_clock; + vcpu->arch.pv_time_enabled = true; + if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_time, + &guest_hv_clock, sizeof(guest_hv_clock)))) + break; + if (guest_hv_clock.version & 1) + ++guest_hv_clock.version; /* first time write, random junk */ + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_time, + &vcpu->arch.hv_clock, + sizeof(vcpu->arch.hv_clock.version)); + } break; } @@ -2283,6 +2288,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!(data & KVM_MSR_ENABLED)) break; + if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, + &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) + break; + + if (vcpu->arch.st.steal.version & 1) + vcpu->arch.st.steal.version += 1; /* first time write, random junk */ + + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, + &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); break; -- 2.7.4