Move the checks done by nested_vmcb_valid_sregs and nested_vmcb_check_controls directly in enter_svm_guest_mode, and use svm->nested.save cached fields (EFER, CR0, CR4) instead of vmcb12's. This prevents from creating TOC/TOU races. This also avoids the need of force-setting EFER_SVME in nested_vmcb02_prepare_save. Signed-off-by: Emanuele Giuseppe Esposito <eesposit@xxxxxxxxxx> --- arch/x86/kvm/svm/nested.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 2491c77203c7..487810cfefde 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -280,13 +280,6 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu, struct vmcb_save_area *save) { - /* - * FIXME: these should be done after copying the fields, - * to avoid TOC/TOU races. For these save area checks - * the possible damage is limited since kvm_set_cr0 and - * kvm_set_cr4 handle failure; EFER_SVME is an exception - * so it is force-set later in nested_prepare_vmcb_save. - */ if (CC(!(save->efer & EFER_SVME))) return false; @@ -459,7 +452,8 @@ void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm) svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat; } -static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12) +static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, + struct vmcb *vmcb12) { bool new_vmcb12 = false; @@ -488,15 +482,10 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); - /* - * Force-set EFER_SVME even though it is checked earlier on the - * VMCB12, because the guest can flip the bit between the check - * and now. Clearing EFER_SVME would call svm_free_nested. - */ - svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME); + svm_set_efer(&svm->vcpu, svm->nested.save.efer); - svm_set_cr0(&svm->vcpu, vmcb12->save.cr0); - svm_set_cr4(&svm->vcpu, vmcb12->save.cr4); + svm_set_cr0(&svm->vcpu, svm->nested.save.cr0); + svm_set_cr4(&svm->vcpu, svm->nested.save.cr4); svm->vcpu.arch.cr2 = vmcb12->save.cr2; @@ -671,7 +660,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) nested_load_control_from_vmcb12(svm, &vmcb12->control); nested_load_save_from_vmcb12(svm, &vmcb12->save); - if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) || + if (!nested_vmcb_valid_sregs(vcpu, &svm->nested.save) || !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) { vmcb12->control.exit_code = SVM_EXIT_ERR; vmcb12->control.exit_code_hi = 0; -- 2.27.0