----- junaids@xxxxxxxxxx wrote: > kvm_mmu_invlpg() and kvm_mmu_invpcid_gva() only need to flush the TLB > entries for the specific guest virtual address, instead of flushing > all > TLB entries associated with the VM. > > Signed-off-by: Junaid Shahid <junaids@xxxxxxxxxx> > --- > arch/x86/include/asm/kvm_host.h | 6 ++++++ > arch/x86/kvm/mmu.c | 6 +++--- > arch/x86/kvm/svm.c | 8 ++++++++ > arch/x86/kvm/vmx.c | 24 ++++++++++++++++++++++++ > 4 files changed, 41 insertions(+), 3 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h > b/arch/x86/include/asm/kvm_host.h > index 7b93ea4e16f3..55aaf12c13ea 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -976,6 +976,12 @@ struct kvm_x86_ops { > > void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa); > > + /* > + * Flush any TLB entries associated with the given GVA. > + * Does not need to flush GPA->HPA mappings. > + */ > + void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr); > + > void (*run)(struct kvm_vcpu *vcpu); > int (*handle_exit)(struct kvm_vcpu *vcpu); > void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 160dc84c15be..7cfc30409517 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -5166,7 +5166,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t > gva) > if (VALID_PAGE(mmu->prev_root_hpa)) > mmu->invlpg(vcpu, gva, mmu->prev_root_hpa); > > - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); > + kvm_x86_ops->tlb_flush_gva(vcpu, gva); > ++vcpu->stat.invlpg; > } > EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); > @@ -5177,13 +5177,13 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu > *vcpu, gva_t gva, unsigned long pcid) > > if (pcid == kvm_get_active_pcid(vcpu)) { > mmu->invlpg(vcpu, gva, mmu->root_hpa); > - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); > + kvm_x86_ops->tlb_flush_gva(vcpu, gva); > } > > if (VALID_PAGE(mmu->prev_root_hpa) && > pcid == kvm_get_pcid(vcpu, mmu->prev_cr3)) { > mmu->invlpg(vcpu, gva, mmu->prev_root_hpa); > - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); > + kvm_x86_ops->tlb_flush_gva(vcpu, gva); > } > > ++vcpu->stat.invlpg; > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c > index fd8b0dc5136f..ffc92aa2ebf5 100644 > --- a/arch/x86/kvm/svm.c > +++ b/arch/x86/kvm/svm.c > @@ -5401,6 +5401,13 @@ static void svm_flush_tlb(struct kvm_vcpu > *vcpu, bool invalidate_gpa) > svm->asid_generation--; > } > > +static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) > +{ > + struct vcpu_svm *svm = to_svm(vcpu); > + > + invlpga(gva, svm->vmcb->control.asid); > +} > + > static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) > { > } > @@ -7059,6 +7066,7 @@ static struct kvm_x86_ops svm_x86_ops > __ro_after_init = { > .set_rflags = svm_set_rflags, > > .tlb_flush = svm_flush_tlb, > + .tlb_flush_gva = svm_flush_tlb_gva, > > .run = svm_vcpu_run, > .handle_exit = handle_exit, > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index b11ec063564f..45bee828bc97 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -1393,6 +1393,11 @@ static inline bool > cpu_has_vmx_invept_global(void) > return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; > } > > +static inline bool cpu_has_vmx_invvpid_addr(void) > +{ > + return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT; > +} > + > static inline bool cpu_has_vmx_invvpid_single(void) > { > return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; > @@ -1754,6 +1759,16 @@ static void loaded_vmcs_clear(struct > loaded_vmcs *loaded_vmcs) > __loaded_vmcs_clear, loaded_vmcs, 1); > } > > +static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr) > +{ > + if (cpu_has_vmx_invvpid_addr()) { > + __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr); > + return true; > + } > + > + return false; > +} > + > static inline void vpid_sync_vcpu_single(int vpid) > { > if (vpid == 0) > @@ -4553,6 +4568,14 @@ static void vmx_flush_tlb(struct kvm_vcpu > *vcpu, bool invalidate_gpa) > __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); > } > > +static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr) > +{ > + int vpid = to_vmx(vcpu)->vpid; > + > + if (!vpid_sync_vcpu_addr(vpid, addr)) > + vpid_sync_context(vpid); > +} If VPID is not supported or module parameter enable_vpid=0, then vmx_flush_tlb_gva() will do nothing. I think that if !enable_vpid you should invlpg addr. > + > static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) > { > if (enable_ept) > @@ -12765,6 +12788,7 @@ static struct kvm_x86_ops vmx_x86_ops > __ro_after_init = { > .set_rflags = vmx_set_rflags, > > .tlb_flush = vmx_flush_tlb, > + .tlb_flush_gva = vmx_flush_tlb_gva, > > .run = vmx_vcpu_run, > .handle_exit = vmx_handle_exit, > -- > 2.17.0.441.gb46fe60e1d-goog