[PATCH 15/18] KVM: MMU: Propagate the right fault back to the guest after gva_to_gpa

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch implements logic to make sure that either a
page-fault/page-fault-vmexit or a nested-page-fault-vmexit
is propagated back to the guest.

Signed-off-by: Joerg Roedel <joerg.roedel@xxxxxxx>
---
 arch/x86/kvm/mmu.h         |    1 +
 arch/x86/kvm/paging_tmpl.h |    2 ++
 arch/x86/kvm/x86.c         |   15 ++++++++++++++-
 3 files changed, 17 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 64f619b..b42b27e 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -47,6 +47,7 @@
 #define PFERR_USER_MASK (1U << 2)
 #define PFERR_RSVD_MASK (1U << 3)
 #define PFERR_FETCH_MASK (1U << 4)
+#define PFERR_NESTED_MASK (1U << 31)
 
 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
 int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index c0158d8..9fc5fb1 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -154,6 +154,7 @@ walk:
 
 		pte_gpa = mmu->translate_gpa(vcpu, pte_gpa, &error);
 		if (pte_gpa == UNMAPPED_GVA) {
+			error |= PFERR_NESTED_MASK;
 			walker->error_code = error;
 			return 0;
 		}
@@ -223,6 +224,7 @@ walk:
 			pte_gpa = gfn_to_gpa(walker->gfn);
 			pte_gpa = mmu->translate_gpa(vcpu, pte_gpa, &error);
 			if (pte_gpa == UNMAPPED_GVA) {
+				error |= PFERR_NESTED_MASK;
 				walker->error_code = error;
 				return 0;
 			}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2883ce8..9f8b02d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -314,6 +314,19 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
 	kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
 }
 
+void kvm_propagate_fault(struct kvm_vcpu *vcpu, unsigned long addr, u32 error_code)
+{
+	u32 nested, error;
+
+	nested = error_code &  PFERR_NESTED_MASK;
+	error  = error_code & ~PFERR_NESTED_MASK;
+
+	if (vcpu->arch.mmu.nested && !(error_code && PFERR_NESTED_MASK))
+		vcpu->arch.nested_mmu.inject_page_fault(vcpu, addr, error);
+	else
+		vcpu->arch.mmu.inject_page_fault(vcpu, addr, error);
+}
+
 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
 {
 	vcpu->arch.nmi_pending = 1;
@@ -3546,7 +3559,7 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
 		ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
 
 	if (ret == X86EMUL_PROPAGATE_FAULT)
-		kvm_inject_page_fault(vcpu, q, error_code);
+		kvm_propagate_fault(vcpu, q, error_code);
 
 	return ret;
 }
-- 
1.7.0


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux