On Wed, Sep 26, 2018 at 07:02:56PM +0200, Vitaly Kuznetsov wrote: > In most common cases VP index of a vcpu matches its vcpu index. Userspace > is, however, free to set any mapping it wishes and we need to account for > that when we need to find a vCPU with a particular VP index. To keep search > algorithms optimal in both cases introduce 'num_mismatched_vp_indexes' > counter showing how many vCPUs with mismatching VP index we have. In case > the counter is zero we can assume vp_index == vcpu_idx. > > Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> > --- > arch/x86/include/asm/kvm_host.h | 3 +++ > arch/x86/kvm/hyperv.c | 26 +++++++++++++++++++++++--- > 2 files changed, 26 insertions(+), 3 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 09b2e3e2cf1b..711f79f1b5e6 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -781,6 +781,9 @@ struct kvm_hv { > u64 hv_reenlightenment_control; > u64 hv_tsc_emulation_control; > u64 hv_tsc_emulation_status; > + > + /* How many vCPUs have VP index != vCPU index */ > + atomic_t num_mismatched_vp_indexes; > }; > > enum kvm_irqchip_mode { > diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c > index c8764faf783b..6a19c8e3c432 100644 > --- a/arch/x86/kvm/hyperv.c > +++ b/arch/x86/kvm/hyperv.c > @@ -1045,11 +1045,31 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) > struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; > > switch (msr) { > - case HV_X64_MSR_VP_INDEX: > - if (!host || (u32)data >= KVM_MAX_VCPUS) > + case HV_X64_MSR_VP_INDEX: { > + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; > + int vcpu_idx = kvm_vcpu_get_idx(vcpu); > + u32 new_vp_index = (u32)data; > + > + if (!host || new_vp_index >= KVM_MAX_VCPUS) > return 1; > - hv_vcpu->vp_index = (u32)data; > + > + if (new_vp_index == hv_vcpu->vp_index) > + return 0; > + > + /* > + * VP index is changing, increment num_mismatched_vp_indexes in > + * case it was equal to vcpu_idx before; on the other hand, if > + * the new VP index matches vcpu_idx num_mismatched_vp_indexes > + * needs to be decremented. It may be worth mentioning that the initial balance is provided by kvm_hv_vcpu_postcreate setting vp_index = vcpu_idx. > + */ > + if (hv_vcpu->vp_index == vcpu_idx) > + atomic_inc(&hv->num_mismatched_vp_indexes); > + else if (new_vp_index == vcpu_idx) > + atomic_dec(&hv->num_mismatched_vp_indexes); > + > + hv_vcpu->vp_index = new_vp_index; > break; > + } > case HV_X64_MSR_VP_ASSIST_PAGE: { > u64 gfn; > unsigned long addr; Reviewed-by: Roman Kagan <rkagan@xxxxxxxxxxxxx>