We can past the page fault to guest directly if gpte's reserved is set Signed-off-by: Xiao Guangrong <xiaoguangrong@xxxxxxxxxxxxxx> --- arch/x86/kvm/paging_tmpl.h | 10 ++++++---- 1 files changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 291342d..952357a 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -760,6 +760,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, pt_element_t gpte; gpa_t pte_gpa; gfn_t gfn; + bool gpte_invalid; if (!is_shadow_present_pte(sp->spt[i])) continue; @@ -771,12 +772,13 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, return -EINVAL; gfn = gpte_to_gfn(gpte); - if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL) - || gfn != sp->gfns[i] || !is_present_gpte(gpte) - || !(gpte & PT_ACCESSED_MASK)) { + gpte_invalid = is_present_gpte(gpte) || + is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL); + if (gpte_invalid || gfn != sp->gfns[i] || + !(gpte & PT_ACCESSED_MASK)) { u64 nonpresent; - if (is_present_gpte(gpte) || !clear_unsync) + if (gpte_invalid || !clear_unsync) nonpresent = shadow_trap_nonpresent_pte; else nonpresent = shadow_notrap_nonpresent_pte; -- 1.7.0.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html