On Fri, Feb 04, 2022 at 06:57:15AM -0500, Paolo Bonzini wrote: > Do not lead init_kvm_*mmu into the temptation of poking > into struct kvm_mmu_role_regs, by passing to it directly > the CPU role. > > Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> > --- > arch/x86/kvm/mmu/mmu.c | 21 +++++++++------------ > 1 file changed, 9 insertions(+), 12 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index 01027da82e23..6f9d876ce429 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -4721,11 +4721,9 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, > return role; > } > > -static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, > - const struct kvm_mmu_role_regs *regs) > +static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, union kvm_mmu_role cpu_role) > { > struct kvm_mmu *context = &vcpu->arch.root_mmu; > - union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs); > union kvm_mmu_page_role mmu_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role); > > if (cpu_role.as_u64 == context->cpu_role.as_u64 && > @@ -4779,10 +4777,9 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *conte > } > > static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, > - const struct kvm_mmu_role_regs *regs) > + union kvm_mmu_role cpu_role) > { > struct kvm_mmu *context = &vcpu->arch.root_mmu; > - union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs); > union kvm_mmu_page_role mmu_role; > > mmu_role = cpu_role.base; > @@ -4874,20 +4871,19 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, > EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); > > static void init_kvm_softmmu(struct kvm_vcpu *vcpu, > - const struct kvm_mmu_role_regs *regs) > + union kvm_mmu_role cpu_role) > { > struct kvm_mmu *context = &vcpu->arch.root_mmu; > > - kvm_init_shadow_mmu(vcpu, regs); > + kvm_init_shadow_mmu(vcpu, cpu_role); > > context->get_guest_pgd = get_cr3; > context->get_pdptr = kvm_pdptr_read; > context->inject_page_fault = kvm_inject_page_fault; > } > > -static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) > +static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, union kvm_mmu_role new_role) > { > - union kvm_mmu_role new_role = kvm_calc_cpu_role(vcpu, regs); > struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; > > if (new_role.as_u64 == g_context->cpu_role.as_u64) > @@ -4928,13 +4924,14 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, const struct kvm_mmu_role > void kvm_init_mmu(struct kvm_vcpu *vcpu) > { > struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu); > + union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, ®s); WDYT about also inlining vcpu_to_role_regs() in kvm_calc_cpu_role()? > > if (mmu_is_nested(vcpu)) > - init_kvm_nested_mmu(vcpu, ®s); > + init_kvm_nested_mmu(vcpu, cpu_role); > else if (tdp_enabled) > - init_kvm_tdp_mmu(vcpu, ®s); > + init_kvm_tdp_mmu(vcpu, cpu_role); > else > - init_kvm_softmmu(vcpu, ®s); > + init_kvm_softmmu(vcpu, cpu_role); > } > EXPORT_SYMBOL_GPL(kvm_init_mmu); > > -- > 2.31.1 > >