MMU re-initialization is expensive, in particular, update_permission_bitmask() and update_pkru_bitmask() are. Cache the data used to setup shadow EPT MMU and avoid full re-init when it is unchanged. Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 14 +++++++++++ arch/x86/kvm/mmu.c | 51 ++++++++++++++++++++++++++++++----------- 2 files changed, 52 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6ca7d28d57e9..ab46e9493bd4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -273,8 +273,22 @@ union kvm_mmu_page_role { }; }; +/* + * This structure complements kvm_mmu_page_role caching everything needed for + * MMU configuration. If nothing in both these structures changed, MMU + * re-configuration can be skipped. @valid bit is set on first usage so we don't + * treat all-zero structure as valid data. + */ union kvm_mmu_scache { unsigned int word; + struct { + unsigned int valid:1; + unsigned int execonly:1; + unsigned int cr4_pse:1; + unsigned int cr4_pke:1; + unsigned int cr4_smap:1; + unsigned int cr4_smep:1; + }; }; union kvm_mmu_role { diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8d8e6fa75fa3..9e9e12d7cb28 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4708,6 +4708,24 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu, paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); } +static union kvm_mmu_role +kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu) +{ + union kvm_mmu_role role = {0}; + + role.base_role.access = ACC_ALL; + role.base_role.cr0_wp = is_write_protection(vcpu); + + role.scache.cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0; + role.scache.cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0; + role.scache.cr4_pse = !!is_pse(vcpu); + role.scache.cr4_pke = kvm_read_cr4_bits(vcpu, X86_CR4_PKE) != 0; + + role.scache.valid = 1; + + return role; +} + static union kvm_mmu_page_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu) { @@ -4814,16 +4832,18 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); -static union kvm_mmu_page_role -kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty) +static union kvm_mmu_role +kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, + bool execonly) { - union kvm_mmu_page_role role = vcpu->arch.mmu->mmu_role.base_role; + union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu); - role.level = PT64_ROOT_4LEVEL; - role.direct = false; - role.ad_disabled = !accessed_dirty; - role.guest_mode = true; - role.access = ACC_ALL; + role.base_role.level = PT64_ROOT_4LEVEL; + role.base_role.direct = false; + role.base_role.ad_disabled = !accessed_dirty; + role.base_role.guest_mode = true; + + role.scache.execonly = execonly; return role; } @@ -4832,10 +4852,16 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, bool accessed_dirty, gpa_t new_eptp) { struct kvm_mmu *context = vcpu->arch.mmu; - union kvm_mmu_page_role root_page_role = - kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty); + union kvm_mmu_role new_role = + kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, + execonly); + + __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base_role, false); + + new_role.base_role.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == context->mmu_role.as_u64) + return; - __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role, false); context->shadow_root_level = PT64_ROOT_4LEVEL; context->nx = true; @@ -4847,8 +4873,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->update_pte = ept_update_pte; context->root_level = PT64_ROOT_4LEVEL; context->direct_map = false; - context->mmu_role.base_role.word = - root_page_role.word & mmu_base_role_mask.word; + context->mmu_role.as_u64 = new_role.as_u64; context->get_pdptr = kvm_pdptr_read; update_permission_bitmask(vcpu, context, true); -- 2.14.4