> On 13 Oct 2018, at 17:53, lantianyu1986@xxxxxxxxx wrote: > > From: Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> > > This patch is to add wrapper functions for tlb_remote_flush_with_range > callback. > > Signed-off-by: Lan Tianyu <Tianyu.Lan@xxxxxxxxxxxxx> > --- > Change sicne V3: > Remove code of updating "tlbs_dirty" > Change since V2: > Fix comment in the kvm_flush_remote_tlbs_with_range() > --- > arch/x86/kvm/mmu.c | 40 ++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 40 insertions(+) > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index c73d9f650de7..ff656d85903a 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -264,6 +264,46 @@ static void mmu_spte_set(u64 *sptep, u64 spte); > static union kvm_mmu_page_role > kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); > > + > +static inline bool kvm_available_flush_tlb_with_range(void) > +{ > + return kvm_x86_ops->tlb_remote_flush_with_range; > +} Seems that kvm_available_flush_tlb_with_range() is not used in this patch… > + > +static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, > + struct kvm_tlb_range *range) > +{ > + int ret = -ENOTSUPP; > + > + if (range && kvm_x86_ops->tlb_remote_flush_with_range) > + ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range); > + > + if (ret) > + kvm_flush_remote_tlbs(kvm); > +} > + > +static void kvm_flush_remote_tlbs_with_list(struct kvm *kvm, > + struct list_head *flush_list) > +{ > + struct kvm_tlb_range range; > + > + range.flush_list = flush_list; > + > + kvm_flush_remote_tlbs_with_range(kvm, &range); > +} > + > +static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, > + u64 start_gfn, u64 pages) > +{ > + struct kvm_tlb_range range; > + > + range.start_gfn = start_gfn; > + range.pages = pages; > + range.flush_list = NULL; > + > + kvm_flush_remote_tlbs_with_range(kvm, &range); > +} > + > void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value) > { > BUG_ON((mmio_mask & mmio_value) != mmio_value); > -- > 2.14.4 >