Pass the already-computed CPU role, instead of redoing it. Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6c0287d60781..92dade92462c 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4803,15 +4803,14 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, static union kvm_mmu_role kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) + union kvm_mmu_role cpu_role) { - union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs); union kvm_mmu_role role; role = cpu_role; - if (!____is_efer_lma(regs)) + if (!cpu_role.ext.efer_lma) role.base.level = PT32E_ROOT_LEVEL; - else if (____is_cr4_la57(regs)) + else if (cpu_role.ext.cr4_la57) role.base.level = PT64_ROOT_5LEVEL; else role.base.level = PT64_ROOT_4LEVEL; @@ -4850,16 +4849,15 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context = &vcpu->arch.root_mmu; union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs); union kvm_mmu_role mmu_role = - kvm_calc_shadow_mmu_root_page_role(vcpu, regs); + kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_role); shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role); } static union kvm_mmu_role kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) + union kvm_mmu_role cpu_role) { - union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs); union kvm_mmu_role role; WARN_ON_ONCE(cpu_role.base.direct); @@ -4879,7 +4877,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, .efer = efer, }; union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s); - union kvm_mmu_role mmu_role = kvm_calc_shadow_npt_root_page_role(vcpu, ®s);; + union kvm_mmu_role mmu_role = kvm_calc_shadow_npt_root_page_role(vcpu, cpu_role); shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role); kvm_mmu_new_pgd(vcpu, nested_cr3); -- 2.31.1