From: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> Sync the spte only when the spte is set and avoid the indirect branch. Signed-off-by: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 4 ++-- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index a8ac37d51287..cd8c38463c97 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1942,7 +1942,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) return -1; for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { - int ret = vcpu->arch.mmu->sync_spte(vcpu, sp, i); + int ret = sp->spt[i] ? vcpu->arch.mmu->sync_spte(vcpu, sp, i) : 0; if (ret < -1) return -1; @@ -5764,7 +5764,7 @@ static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) { struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep); - if (sp->unsync) { + if (sp->unsync && *iterator.sptep) { /* * Get the gfn beforehand for later flushing. * Although mmu->sync_spte() doesn't change it, but just diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index e04950015dc4..3373d6705634 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -933,7 +933,7 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int gpa_t pte_gpa; gfn_t gfn; - if (!sp->spt[i]) + if (WARN_ON_ONCE(!sp->spt[i])) return 0; first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); -- 2.19.1.6.gb485710b