On Tue, Aug 24, 2021, Lai Jiangshan wrote: > From: Lai Jiangshan <laijs@xxxxxxxxxxxxxxxxx> > > In mmu_sync_children(), it can zap the invalid list after remote tlb flushing. > Emptifying the invalid list ASAP might help reduce a remote tlb flushing > in some cases. > > Signed-off-by: Lai Jiangshan <laijs@xxxxxxxxxxxxxxxxx> > --- > arch/x86/kvm/mmu/mmu.c | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index 987953a901d2..a165eb8713bc 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -2050,7 +2050,7 @@ static bool mmu_sync_children(struct kvm_vcpu *vcpu, > protected |= rmap_write_protect(vcpu, sp->gfn); > > if (protected) { > - kvm_flush_remote_tlbs(vcpu->kvm); > + kvm_mmu_flush_or_zap(vcpu, &invalid_list, true, flush); This can just be kvm_mmu_remote_flush_or_zap(vcpu, &invalid_list, true); since a remote flush always does a local flush too. Related to the tlbs_dirty revert, to avoid overzealous flushing, kvm_sync_page() can pass back a "remote_flush" flag instead of doing the flush itself. Something like the below, or maybe multiplex an 'int' return. diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ac260e01e9d8..f61de53de55a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2041,7 +2041,7 @@ static bool mmu_sync_children(struct kvm_vcpu *vcpu, struct mmu_page_path parents; struct kvm_mmu_pages pages; LIST_HEAD(invalid_list); - bool flush = false; + bool flush = false, remote_flush = false; while (mmu_unsync_walk(parent, &pages)) { bool protected = false; @@ -2050,17 +2050,17 @@ static bool mmu_sync_children(struct kvm_vcpu *vcpu, protected |= rmap_write_protect(vcpu, sp->gfn); if (protected) { - kvm_mmu_flush_or_zap(vcpu, &invalid_list, true, flush); - flush = false; + kvm_mmu_remote_flush_or_zap(vcpu, &invalid_list, true); + remote_flush = flush = false; } for_each_sp(pages, sp, parents, i) { kvm_unlink_unsync_page(vcpu->kvm, sp); - flush |= kvm_sync_page(vcpu, sp, &invalid_list); + flush |= kvm_sync_page(vcpu, sp, &invalid_list, &remote_flush); mmu_pages_clear_parents(&parents); } if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); + kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, flush); cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); /* * If @parent is not root, the caller doesn't have @@ -2074,7 +2074,7 @@ static bool mmu_sync_children(struct kvm_vcpu *vcpu, } } - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); + kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, flush); return true; } > flush = false; > } > > -- > 2.19.1.6.gb485710b >