Fold __vmx_flush_tlb() into its sole caller, vmx_flush_tlb(), now that all call sites that previously bounced through __vmx_flush_tlb() to force the INVVPID path instead call vpid_sync_context() directly. Opportunistically add a comment to explain why INVEPT is necessary when EPT is enabled, even if VPID is disabled. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> --- arch/x86/kvm/vmx/vmx.h | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 6e0ca57cc41c..6204fa5897bb 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -501,23 +501,25 @@ static inline struct vmcs *alloc_vmcs(bool shadow) u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); -static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) +static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu) { + /* + * INVEPT must be issued when EPT is enabled, irrespective of VPID, as + * the CPU is not required to invalidate GPA->HPA mappings on VM-Entry, + * even if VPID is disabled. GPA->HPA mappings are associated with the + * root EPT structure and not any particular VPID (INVVPID is also not + * required to invalidate GPA->HPA mappings). + */ if (enable_ept) { if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return; ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu->root_hpa)); } else { - vpid_sync_context(vpid); + vpid_sync_context(to_vmx(vcpu)->vpid); } } -static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu) -{ - __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); -} - static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx) { vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; -- 2.24.1