There are many places where we drop large spte and we are always doing much the same as drop_large_spte(). To avoid these duplications, this patch makes drop_large_spte() more generically usable: it now takes an argument to know if it must flush the remote tlbs and returns true or false depending on is_large_pte() check result. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@xxxxxxxxxxxxx> --- arch/x86/kvm/mmu.c | 35 +++++++++++++++++------------------ arch/x86/kvm/paging_tmpl.h | 4 ++-- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5e761ff..2db12b3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1023,6 +1023,19 @@ static void drop_spte(struct kvm *kvm, u64 *sptep) rmap_remove(kvm, sptep); } +static bool drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush_now) +{ + if (!is_large_pte(*sptep)) + return false; + + drop_spte(kvm, sptep); + --kvm->stat.lpages; + + if (flush_now) + kvm_flush_remote_tlbs(kvm); + return true; +} + int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, struct kvm_memory_slot *slot) { @@ -1052,8 +1065,7 @@ int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, BUG_ON(!is_large_pte(*spte)); pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); if (is_writable_pte(*spte)) { - drop_spte(kvm, spte); - --kvm->stat.lpages; + drop_large_spte(kvm, spte, false); spte = NULL; write_protected = 1; } @@ -1799,15 +1811,6 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) mmu_spte_set(sptep, spte); } -static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) -{ - if (is_large_pte(*sptep)) { - drop_spte(vcpu->kvm, sptep); - --vcpu->kvm->stat.lpages; - kvm_flush_remote_tlbs(vcpu->kvm); - } -} - static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned direct_access) { @@ -1839,9 +1842,8 @@ static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, pte = *spte; if (is_shadow_present_pte(pte)) { if (is_last_spte(pte, sp->role.level)) { - drop_spte(kvm, spte); - if (is_large_pte(pte)) - --kvm->stat.lpages; + if (!drop_large_spte(kvm, spte, false)) + drop_spte(kvm, spte); } else { child = page_header(pte & PT64_BASE_ADDR_MASK); drop_parent_pte(child, spte); @@ -3859,11 +3861,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) !is_last_spte(pt[i], sp->role.level)) continue; - if (is_large_pte(pt[i])) { - drop_spte(kvm, &pt[i]); - --kvm->stat.lpages; + if (drop_large_spte(kvm, &pt[i], false)) continue; - } /* avoid RMW */ if (is_writable_pte(pt[i])) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 52e9d58..c40f074 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -498,7 +498,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, gfn_t table_gfn; clear_sp_write_flooding_count(it.sptep); - drop_large_spte(vcpu, it.sptep); + drop_large_spte(vcpu->kvm, it.sptep, true); sp = NULL; if (!is_shadow_present_pte(*it.sptep)) { @@ -526,7 +526,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, clear_sp_write_flooding_count(it.sptep); validate_direct_spte(vcpu, it.sptep, direct_access); - drop_large_spte(vcpu, it.sptep); + drop_large_spte(vcpu->kvm, it.sptep, true); if (is_shadow_present_pte(*it.sptep)) continue; -- 1.7.5.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html