The path for SVM_SET_NESTED_STATE needs to have the same checks for the CPU registers, as we have in the VMRUN path for a nested guest. This patch adds those missing checks to svm_set_nested_state(). Suggested-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@xxxxxxxxxx> --- arch/x86/kvm/svm/nested.c | 49 +++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index e90bc436f584..28a931fa599e 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -215,9 +215,29 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control) return true; } +static bool nested_vmcb_check_cr3_cr4(struct vcpu_svm *svm, + struct vmcb_save_area *save) +{ + if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { + if (!(save->cr4 & X86_CR4_PAE) || !(save->cr0 & X86_CR0_PE) || + (save->cr3 & MSR_CR3_LONG_MBZ_MASK)) + return false; + } else if (save->cr4 & X86_CR4_PAE) { + if (save->cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK) + return false; + } else { + if (save->cr3 & MSR_CR3_LEGACY_RESERVED_MASK) + return false; + } + + if (kvm_valid_cr4(&svm->vcpu, save->cr4)) + return false; + + return true; +} + static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb) { - bool nested_vmcb_lma; if ((vmcb->save.efer & EFER_SVME) == 0) return false; @@ -228,25 +248,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb) if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7)) return false; - nested_vmcb_lma = - (vmcb->save.efer & EFER_LME) && - (vmcb->save.cr0 & X86_CR0_PG); - - if (!nested_vmcb_lma) { - if (vmcb->save.cr4 & X86_CR4_PAE) { - if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK) - return false; - } else { - if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK) - return false; - } - } else { - if (!(vmcb->save.cr4 & X86_CR4_PAE) || - !(vmcb->save.cr0 & X86_CR0_PE) || - (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK)) - return false; - } - if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4)) + if (!nested_vmcb_check_cr3_cr4(svm, &(vmcb->save))) return false; return nested_vmcb_check_controls(&vmcb->control); @@ -1116,9 +1118,12 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, /* * Validate host state saved from before VMRUN (see * nested_svm_check_permissions). - * TODO: validate reserved bits for all saved state. */ - if (!(save.cr0 & X86_CR0_PG)) + if (!(save.cr0 & X86_CR0_PG) || + !nested_vmcb_check_cr3_cr4(svm, &save) || + !kvm_dr6_valid(save.dr6) || + !kvm_dr7_valid(save.dr7) || + !kvm_valid_efer(vcpu, save.efer)) return -EINVAL; /* -- 2.18.4