Undesired triple fault gets injected to L1 guest on SVM when L2 is launched with certain CR3 values. #TF is raised by mmu_check_root() check in fast_pgd_switch() and the root cause is that when kvm_set_cr3() is called from nested_prepare_vmcb_save() with NPT enabled CR3 points to a nGPA so we can't check it with kvm_is_visible_gfn(). Calling kvm_mmu_new_pgd() with L2's CR3 idea when NPT is in use seems to be wrong, an acceptable place for it seems to be kvm_init_shadow_npt_mmu(). This also matches nVMX code. Fixes: 7c390d350f8b ("kvm: x86: Add fast CR3 switch code path") Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 7 ++++++- arch/x86/kvm/mmu/mmu.c | 2 ++ arch/x86/kvm/svm/nested.c | 2 +- arch/x86/kvm/x86.c | 8 +++++--- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index be5363b21540..49b62f024f51 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1459,7 +1459,12 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code); int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); -int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); +int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool cr3_is_nested); +static inline int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) +{ + return __kvm_set_cr3(vcpu, cr3, false); +} + int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 167d12ab957a..ebf0cb3f1ce0 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4987,6 +4987,8 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, union kvm_mmu_role new_role = kvm_calc_shadow_mmu_root_page_role(vcpu, false); + __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, true, true); + if (new_role.as_u64 != context->mmu_role.as_u64) shadow_mmu_init_context(vcpu, cr0, cr4, efer, new_role); } diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index e424bce13e6c..b467917a9784 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -324,7 +324,7 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); - (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); + (void)__kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3, npt_enabled); svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3b92db412335..3761135eb052 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1004,7 +1004,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) } EXPORT_SYMBOL_GPL(kvm_set_cr4); -int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) +int __kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool cr3_is_nested) { bool skip_tlb_flush = false; #ifdef CONFIG_X86_64 @@ -1031,13 +1031,15 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; - kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush); + if (!cr3_is_nested) + kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush); + vcpu->arch.cr3 = cr3; kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); return 0; } -EXPORT_SYMBOL_GPL(kvm_set_cr3); +EXPORT_SYMBOL_GPL(__kvm_set_cr3); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { -- 2.25.4