----- junaids@xxxxxxxxxx wrote: > sync_page() calls set_spte() from a loop across a page table. It > would > work better if set_spte() left the TLB flushing to its callers, so > that > sync_page() can aggregate into a single call. > > Signed-off-by: Junaid Shahid <junaids@xxxxxxxxxx> > --- > arch/x86/kvm/mmu.c | 16 ++++++++++++---- > arch/x86/kvm/paging_tmpl.h | 12 ++++++++---- > 2 files changed, 20 insertions(+), 8 deletions(-) > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index f440d43c8d5a..6124493fe918 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -2724,6 +2724,10 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) > return true; > } > > +/* Bits which may be returned by set_spte() */ > +#define SET_SPTE_WRITE_PROTECTED_PT BIT(0) > +#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) > + > static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, > unsigned pte_access, int level, > gfn_t gfn, kvm_pfn_t pfn, bool speculative, > @@ -2800,7 +2804,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 > *sptep, > if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { > pgprintk("%s: found shadow page for %llx, marking ro\n", > __func__, gfn); > - ret = 1; > + ret |= SET_SPTE_WRITE_PROTECTED_PT; > pte_access &= ~ACC_WRITE_MASK; > spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); > } > @@ -2816,7 +2820,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 > *sptep, > > set_pte: > if (mmu_spte_update(sptep, spte)) > - kvm_flush_remote_tlbs(vcpu->kvm); > + ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; > done: > return ret; > } > @@ -2827,6 +2831,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, > u64 *sptep, unsigned pte_access, > { > int was_rmapped = 0; > int rmap_count; > + int set_spte_ret; > int ret = RET_PF_RETRY; > > pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, > @@ -2854,12 +2859,15 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, > u64 *sptep, unsigned pte_access, > was_rmapped = 1; > } > > - if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, > - true, host_writable)) { > + set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, > + speculative, true, host_writable); > + if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { > if (write_fault) > ret = RET_PF_EMULATE; > kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); > } > + if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH) > + kvm_flush_remote_tlbs(vcpu->kvm); > > if (unlikely(is_mmio_spte(*sptep))) > ret = RET_PF_EMULATE; > diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h > index 6288e9d7068e..fc5fadf5b46a 100644 > --- a/arch/x86/kvm/paging_tmpl.h > +++ b/arch/x86/kvm/paging_tmpl.h > @@ -968,6 +968,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, > struct kvm_mmu_page *sp) > int i, nr_present = 0; > bool host_writable; > gpa_t first_pte_gpa; > + int set_spte_ret = 0; > > /* direct kvm_mmu_page can not be unsync. */ > BUG_ON(sp->role.direct); > @@ -1024,12 +1025,15 @@ static int FNAME(sync_page)(struct kvm_vcpu > *vcpu, struct kvm_mmu_page *sp) > > host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE; > > - set_spte(vcpu, &sp->spt[i], pte_access, > - PT_PAGE_TABLE_LEVEL, gfn, > - spte_to_pfn(sp->spt[i]), true, false, > - host_writable); > + set_spte_ret |= set_spte(vcpu, &sp->spt[i], > + pte_access, PT_PAGE_TABLE_LEVEL, > + gfn, spte_to_pfn(sp->spt[i]), > + true, false, host_writable); > } > > + if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH) > + kvm_flush_remote_tlbs(vcpu->kvm); > + > return nr_present; > } > > -- > 2.18.0.rc1.242.g61856ae69a-goog Reviewed-by: Liran Alon <liran.alon@xxxxxxxxxx>