According to section "Canonicalization and Consistency Checks" in APM vol. 2 the following guest state is illegal: "Any MBZ bit of CR3 is set." "Any MBZ bit of CR4 is set." Signed-off-by: Krish Sadhukhan <krish.sadhukhan@xxxxxxxxxx> --- arch/x86/kvm/svm/nested.c | 18 ++++++++++++++++++ arch/x86/kvm/svm/svm.h | 7 ++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 90a1ca9..1804a97 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -207,6 +207,24 @@ static bool nested_vmcb_checks(struct vmcb *vmcb) if ((vmcb->save.efer & EFER_SVME) == 0) return false; + if (!(vmcb->save.efer & EFER_LMA)) { + if (vmcb->save.cr4 & X86_CR4_PAE) { + if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK) + return false; + } else { + if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK) + return false; + } + if (vmcb->save.cr4 & MSR_CR4_LEGACY_RESERVED_MASK) + return false; + } else { + if ((vmcb->save.cr4 & X86_CR4_PAE) && + (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK)) + return false; + if (vmcb->save.cr4 & MSR_CR4_RESERVED_MASK) + return false; + } + if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0) return false; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index df3474f..796c083 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -354,7 +354,12 @@ static inline bool gif_set(struct vcpu_svm *svm) } /* svm.c */ -#define MSR_INVALID 0xffffffffU +#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U +#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U +#define MSR_CR3_LONG_RESERVED_MASK 0xfff0000000000fe7U +#define MSR_CR4_LEGACY_RESERVED_MASK 0xffbaf000U +#define MSR_CR4_RESERVED_MASK 0xffffffffffbaf000U +#define MSR_INVALID 0xffffffffU u32 svm_msrpm_offset(u32 msr); void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); -- 1.8.3.1