On 15/11/2017 12:43, Liran Alon wrote: > This MSR returns the number of #SMIs that occurred on CPU since > boot. > > It was seen to be used frequently by ESXi guest. > > Patch adds a new vcpu-arch specific var called smi_count to > save the number of #SMIs which occurred on CPU since boot. > It is exposed as a read-only MSR to guest (causing #GP > on wrmsr) in RDMSR/WRMSR emulation code. > MSR_SMI_COUNT is also added to emulated_msrs[] to make sure > user-space can save/restore it for migration purposes. > > Signed-off-by: Liran Alon <liran.alon@xxxxxxxxxx> > Suggested-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> > Reviewed-by: Nikita Leshenko <nikita.leshchenko@xxxxxxxxxx> > Reviewed-by: Bhavesh Davda <bhavesh.davda@xxxxxxxxxx> > Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> > --- > > Notes: > To make vmsave/vload work for MSR_SMI_COUNT (for migration purposes), > a patch for QEMU is also required. > We have written such patch and verified that it works. > It will be submitted seperately to the QEMU mailing list. > > arch/x86/include/asm/kvm_host.h | 1 + > arch/x86/kvm/x86.c | 11 +++++++++++ > 2 files changed, 12 insertions(+) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index c73e493adf07..089856eeac5d 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -504,6 +504,7 @@ struct kvm_vcpu_arch { > int mp_state; > u64 ia32_misc_enable_msr; > u64 smbase; > + u64 smi_count; > bool tpr_access_reporting; > u64 ia32_xss; > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 21c00594f230..dedf5539f924 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -1036,6 +1036,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu) > MSR_IA32_MCG_CTL, > MSR_IA32_MCG_EXT_CTL, > MSR_IA32_SMBASE, > + MSR_SMI_COUNT, > MSR_PLATFORM_INFO, > MSR_MISC_FEATURES_ENABLES, > }; > @@ -2217,6 +2218,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > return 1; > vcpu->arch.smbase = data; > break; > + case MSR_SMI_COUNT: > + if (!msr_info->host_initiated) > + return 1; > + vcpu->arch.smi_count = data; > + break; > case MSR_KVM_WALL_CLOCK_NEW: > case MSR_KVM_WALL_CLOCK: > vcpu->kvm->arch.wall_clock = data; > @@ -2491,6 +2497,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > return 1; > msr_info->data = vcpu->arch.smbase; > break; > + case MSR_SMI_COUNT: > + msr_info->data = vcpu->arch.smi_count; > + break; > case MSR_IA32_PERF_STATUS: > /* TSC increment by tick */ > msr_info->data = 1000ULL; > @@ -6437,6 +6446,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) > kvm_x86_ops->queue_exception(vcpu); > } else if (vcpu->arch.smi_pending && !is_smm(vcpu)) { > vcpu->arch.smi_pending = false; > + ++vcpu->arch.smi_count; > enter_smm(vcpu); > } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { > --vcpu->arch.nmi_pending; > @@ -7781,6 +7791,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) > vcpu->arch.hflags = 0; > > vcpu->arch.smi_pending = 0; > + vcpu->arch.smi_count = 0; > atomic_set(&vcpu->arch.nmi_queued, 0); > vcpu->arch.nmi_pending = 0; > vcpu->arch.nmi_injected = false; > Reviewed-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>