When the guest executes HLT, the vCPU transitions from virtual C0 to C1 state. Its virtual IA32_APERF and IA32_MPERF MSRs should stop counting at this point, just as the host's MSRs stop when it enters C1. Save a checkpoint of the current hardware MSR values and host TSC. Later, if/when the vCPU becomes runnable again, we will start accumulating C0 cycles from this checkpoint. To avoid complications, also restore host MSR values at this time, Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx> Co-developed-by: Jim Mattson <jmattson@xxxxxxxxxx> Signed-off-by: Jim Mattson <jmattson@xxxxxxxxxx> --- arch/x86/kvm/x86.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ad5351673362c..793f5d2afeb2b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5139,6 +5139,21 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); } +static void kvm_put_guest_aperfmperf(struct kvm_vcpu *vcpu) +{ + unsigned long flags; + + local_irq_save(flags); + if (vcpu->arch.aperfmperf.loaded_while_running) { + rdmsrl(MSR_IA32_APERF, vcpu->arch.aperfmperf.guest_aperf); + rdmsrl(MSR_IA32_MPERF, vcpu->arch.aperfmperf.guest_mperf); + vcpu->arch.aperfmperf.host_tsc = rdtsc(); + if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) + vcpu->arch.aperfmperf.loaded_while_running = false; + } + local_irq_restore(flags); +} + void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { int idx; @@ -11363,10 +11378,13 @@ static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) */ ++vcpu->stat.halt_exits; if (lapic_in_kernel(vcpu)) { - if (kvm_vcpu_has_events(vcpu)) + if (kvm_vcpu_has_events(vcpu)) { vcpu->arch.pv.pv_unhalted = false; - else + } else { vcpu->arch.mp_state = state; + if (guest_can_use(vcpu, X86_FEATURE_APERFMPERF)) + kvm_put_guest_aperfmperf(vcpu); + } return 1; } else { vcpu->run->exit_reason = reason; -- 2.47.0.371.ga323438b13-goog