This generalizes the lockless CR3 switch path to be able to work across different MMU modes (e.g. nested vs non-nested) by checking that the expected page role of the new root page matches the page role of the previously stored root page in addition to checking that the new CR3 matches the previous CR3. Furthermore, instead of loading the hardware CR3 in fast_cr3_switch(), it is now done in vcpu_enter_guest(), as by that time the MMU context would be up-to-date with the VCPU mode. Signed-off-by: Junaid Shahid <junaids@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/mmu.c | 19 ++++++++++--------- arch/x86/kvm/x86.c | 2 +- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 42a6b5616ce2..c3cae44e7c34 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1308,7 +1308,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code, void *insn, int insn_len); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); -void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3); +void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, + union kvm_mmu_page_role new_role); void kvm_enable_tdp(void); void kvm_disable_tdp(void); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8fddcb456a44..4c11221e83ac 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4027,7 +4027,8 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu, context->nx = false; } -static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3) +static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, + union kvm_mmu_page_role new_role) { struct kvm_mmu *mmu = &vcpu->arch.mmu; @@ -4046,7 +4047,10 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3) swap(mmu->root_hpa, mmu->prev_root.hpa); mmu->prev_root.cr3 = kvm_read_cr3(vcpu); - if (new_cr3 == prev_cr3 && VALID_PAGE(mmu->root_hpa)) { + if (new_cr3 == prev_cr3 && + VALID_PAGE(mmu->root_hpa) && + page_header(mmu->root_hpa) != NULL && + new_role.word == page_header(mmu->root_hpa)->role.word) { /* * It is possible that the cached previous root page is * obsolete because of a change in the MMU @@ -4055,15 +4059,11 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3) * have set here and allocate a new one. */ + kvm_make_request(KVM_REQ_LOAD_CR3, vcpu); kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); __clear_sp_write_flooding_count( page_header(mmu->root_hpa)); - vcpu->arch.cr3 = new_cr3; - __set_bit(VCPU_EXREG_CR3, - (ulong *)&vcpu->arch.regs_avail); - mmu->set_cr3(vcpu, mmu->root_hpa); - return true; } } @@ -4071,9 +4071,10 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3) return false; } -void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3) +void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, + union kvm_mmu_page_role new_role) { - if (!fast_cr3_switch(vcpu, new_cr3)) + if (!fast_cr3_switch(vcpu, new_cr3, new_role)) kvm_mmu_free_roots(vcpu, false); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 239c814c07c3..f1b352a2604b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -867,7 +867,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; - kvm_mmu_new_cr3(vcpu, cr3); + kvm_mmu_new_cr3(vcpu, cr3, kvm_mmu_calc_root_page_role(vcpu)); vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); -- 2.18.0.rc1.242.g61856ae69a-goog