Remove the INVVPID capabilities checks from vpid_sync_vcpu_single() and vpid_sync_vcpu_global() now that all callers ensure the INVVPID variant is supported. Note, in some cases the guarantee is provided in concert with hardware_setup(), which enables VPID if and only if at least of invvpid_single() or invvpid_global() is supported. Drop the WARN_ON_ONCE() from vmx_flush_tlb() as vpid_sync_vcpu_single() will trigger a WARN() on INVVPID failure, i.e. if SINGLE_CONTEXT isn't supported. Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> --- arch/x86/kvm/vmx/ops.h | 6 ++---- arch/x86/kvm/vmx/vmx.h | 1 - 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h index 39122699cfeb..aa1aab52971a 100644 --- a/arch/x86/kvm/vmx/ops.h +++ b/arch/x86/kvm/vmx/ops.h @@ -258,14 +258,12 @@ static inline void vpid_sync_vcpu_single(int vpid) if (vpid == 0) return; - if (cpu_has_vmx_invvpid_single()) - __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0); + __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0); } static inline void vpid_sync_vcpu_global(void) { - if (cpu_has_vmx_invvpid_global()) - __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); + __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); } static inline void vpid_sync_context(int vpid) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index d6d67b816ebe..3770ae111e6a 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -537,7 +537,6 @@ static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) if (cpu_has_vmx_invvpid_global()) { vpid_sync_vcpu_global(); } else { - WARN_ON_ONCE(!cpu_has_vmx_invvpid_single()); vpid_sync_vcpu_single(vmx->vpid); vpid_sync_vcpu_single(vmx->nested.vpid02); } -- 2.24.1