On 7/8/20 3:03 AM, Paolo Bonzini wrote:
On 08/07/20 02:39, Krish Sadhukhan wrote:
+extern int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+
This should be added in x86.h, not here.
+static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
{
if ((vmcb->save.efer & EFER_SVME) == 0)
return false;
@@ -231,6 +233,22 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
(vmcb->save.cr0 & X86_CR0_NW))
return false;
+ if (!is_long_mode(&(svm->vcpu))) {
+ if (vmcb->save.cr4 & X86_CR4_PAE) {
+ if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
+ return false;
+ } else {
+ if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
+ return false;
+ }
+ } else {
+ if ((vmcb->save.cr4 & X86_CR4_PAE) &&
+ (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
+ return false;
+ }
is_long_mode here is wrong, as it refers to the host.
You need to do something like this:
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 385461496cf5..cbbab83f19cc 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -222,8 +222,9 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
return true;
}
-static bool nested_vmcb_checks(struct vmcb *vmcb)
+static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
{
+ bool nested_vmcb_lma;
if ((vmcb->save.efer & EFER_SVME) == 0)
return false;
@@ -234,6 +237,27 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7))
return false;
+ nested_vmcb_lma =
+ (vmcb->save.efer & EFER_LME) &&
Just curious about using LME instead of LMA. According to APM,
" The processor behaves as a 32-bit x86 processor in all respects
until long mode is activated, even if long mode is enabled. None of the
new 64-bit data sizes, addressing, or system aspects available in long
mode can be used until EFER.LMA=1."
Is it possible that L1 sets LME, but not LMA, in L2's VMCS and this
code will execute even if the processor is not in long-mode ?
+ (vmcb->save.cr0 & X86_CR0_PG);
+
+ if (!nested_vmcb_lma) {
+ if (vmcb->save.cr4 & X86_CR4_PAE) {
+ if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
+ return false;
+ } else {
+ if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
+ return false;
+ }
+ } else {
+ if (!(vmcb->save.cr4 & X86_CR4_PAE) ||
+ !(vmcb->save.cr0 & X86_CR0_PE) ||
+ (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
+ return false;
+ }
+ if (kvm_valid_cr4(&(svm->vcpu), vmcb->save.cr4))
+ return false;
+
return nested_vmcb_check_controls(&vmcb->control);
}
which also takes care of other CR0/CR4 checks in the APM.
I'll test this a bit more and queue it. Are you also going to add
more checks in svm_set_nested_state?
Paolo