If getting the gpa for a gva fails, e.g. because the gva isn't mapped in the guest page tables, don't try to unprotect the invalid gfn. This is mostly a performance fix (avoids unnecessarily taking mmu_lock), as for_each_gfn_valid_sp_with_gptes() won't explode on garbage input, it's simply pointless. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 065bb6180988..a5d1f6232f8c 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2739,8 +2739,11 @@ bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa) if (!vcpu->kvm->arch.indirect_shadow_pages) return false; - if (!vcpu->arch.mmu->root_role.direct) + if (!vcpu->arch.mmu->root_role.direct) { gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); + if (gpa == INVALID_GPA) + return false; + } r = kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); if (r) { @@ -2759,6 +2762,8 @@ static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) return 0; gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); + if (gpa == INVALID_GPA) + return 0; r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); -- 2.46.0.76.ge559c4bf1a-goog