The init_kvm_*mmu functions, with the exception of shadow NPT, do not need to know the full values of CR0/CR4/EFER; they only need to know the bits that make up the "role". This cleanup however will take quite a few incremental steps. As a start, pull the common computation of the struct kvm_mmu_role_regs into their caller: all of them extract the struct from the vcpu as the very first step. Reviewed-by: David Matlack <dmatlack@xxxxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 07b8550e68e9..d56875938c29 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4778,12 +4778,12 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, return role; } -static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) +static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, + const struct kvm_mmu_role_regs *regs) { struct kvm_mmu *context = &vcpu->arch.root_mmu; - struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu); union kvm_mmu_role new_role = - kvm_calc_tdp_mmu_root_page_role(vcpu, ®s, false); + kvm_calc_tdp_mmu_root_page_role(vcpu, regs, false); if (new_role.as_u64 == context->mmu_role.as_u64) return; @@ -4797,7 +4797,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->get_guest_pgd = get_cr3; context->get_pdptr = kvm_pdptr_read; context->inject_page_fault = kvm_inject_page_fault; - context->root_level = role_regs_to_root_level(®s); + context->root_level = role_regs_to_root_level(regs); if (!is_cr0_pg(context)) context->gva_to_gpa = nonpaging_gva_to_gpa; @@ -4966,12 +4966,12 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, } EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); -static void init_kvm_softmmu(struct kvm_vcpu *vcpu) +static void init_kvm_softmmu(struct kvm_vcpu *vcpu, + const struct kvm_mmu_role_regs *regs) { struct kvm_mmu *context = &vcpu->arch.root_mmu; - struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu); - kvm_init_shadow_mmu(vcpu, ®s); + kvm_init_shadow_mmu(vcpu, regs); context->get_guest_pgd = get_cr3; context->get_pdptr = kvm_pdptr_read; @@ -4995,10 +4995,10 @@ kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs * return role; } -static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) +static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, + const struct kvm_mmu_role_regs *regs) { - struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu); - union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, ®s); + union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, regs); struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; if (new_role.as_u64 == g_context->mmu_role.as_u64) @@ -5038,12 +5038,14 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) void kvm_init_mmu(struct kvm_vcpu *vcpu) { + struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu); + if (mmu_is_nested(vcpu)) - init_kvm_nested_mmu(vcpu); + init_kvm_nested_mmu(vcpu, ®s); else if (tdp_enabled) - init_kvm_tdp_mmu(vcpu); + init_kvm_tdp_mmu(vcpu, ®s); else - init_kvm_softmmu(vcpu); + init_kvm_softmmu(vcpu, ®s); } EXPORT_SYMBOL_GPL(kvm_init_mmu); -- 2.31.1