Re: [PATCH v2 10/15] KVM: x86: hyper-v: Always use to_hv_vcpu() accessor to get to 'struct kvm_vcpu_hv'

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, 2021-01-26 at 14:48 +0100, Vitaly Kuznetsov wrote:


...
> _vcpu_mask(
>  static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool ex)
>  {
>  	struct kvm *kvm = vcpu->kvm;
> -	struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
> +	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(current_vcpu);
You probably mean vcpu here instead of current_vcpu. Today I smoke tested the kvm/nested-svm branch,
and had this fail on me while testing windows guests.


Other than that HyperV seems to work and even survive nested migration (I had one
windows reboot but I suspect windows update did it.)
I'll leave my test overnight (now with updates disabled) to see if it is stable.

Best regards,
	Maxim Levitsky


>  	struct hv_tlb_flush_ex flush_ex;
>  	struct hv_tlb_flush flush;
>  	u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
> diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
> index fdb321ba9c3f..be1e3f5d1df6 100644
> --- a/arch/x86/kvm/hyperv.h
> +++ b/arch/x86/kvm/hyperv.h
> @@ -119,7 +119,9 @@ static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stim
>  
>  static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
>  {
> -	return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap,
> +	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
> +
> +	return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
>  			     HV_SYNIC_STIMER_COUNT);
>  }
>  
> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
> index 4de7579e206c..4ad2fbbd962a 100644
> --- a/arch/x86/kvm/lapic.h
> +++ b/arch/x86/kvm/lapic.h
> @@ -6,6 +6,8 @@
>  
>  #include <linux/kvm_host.h>
>  
> +#include "hyperv.h"
> +
>  #define KVM_APIC_INIT		0
>  #define KVM_APIC_SIPI		1
>  #define KVM_APIC_LVT_NUM	6
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 9db84508aa0b..443878dd775c 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -6733,12 +6733,14 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
>  	x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
>  
>  	/* All fields are clean at this point */
> -	if (static_branch_unlikely(&enable_evmcs))
> +	if (static_branch_unlikely(&enable_evmcs)) {
> +		struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
> +
>  		current_evmcs->hv_clean_fields |=
>  			HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
>  
> -	if (static_branch_unlikely(&enable_evmcs))
> -		current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index;
> +		current_evmcs->hv_vp_id = hv_vcpu->vp_index;
> +	}
>  
>  	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
>  	if (vmx->host_debugctlmsr)
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 868d2bf8fb95..4c2b1f4260c6 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -8894,8 +8894,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>  			goto out;
>  		}
>  		if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
> +			struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
> +
>  			vcpu->run->exit_reason = KVM_EXIT_HYPERV;
> -			vcpu->run->hyperv = vcpu->arch.hyperv.exit;
> +			vcpu->run->hyperv = hv_vcpu->exit;
>  			r = 0;
>  			goto out;
>  		}





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux