kvm_calc_shadow_root_page_role_common is the same as kvm_calc_cpu_role except for the level, which is overwritten afterwards in kvm_calc_shadow_mmu_root_page_role and kvm_calc_shadow_npt_root_page_role. role.base.direct is already set correctly for the CPU role, and CR0.PG=1 is required for VMRUN so it will also be correct for nested NPT. Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d6b5d8c1c0dc..19abf1e4cee9 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4772,27 +4772,11 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, reset_tdp_shadow_zero_bits_mask(context); } -static union kvm_mmu_role -kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, - const struct kvm_mmu_role_regs *regs) -{ - union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs); - - role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs); - role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs); - role.base.has_4_byte_gpte = ____is_cr0_pg(regs) && !____is_cr4_pae(regs); - - return role; -} - static union kvm_mmu_role kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) { - union kvm_mmu_role role = - kvm_calc_shadow_root_page_role_common(vcpu, regs); - - role.base.direct = !____is_cr0_pg(regs); + union kvm_mmu_role role = kvm_calc_cpu_role(vcpu, regs); if (!____is_efer_lma(regs)) role.base.level = PT32E_ROOT_LEVEL; @@ -4853,9 +4837,8 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) { union kvm_mmu_role role = - kvm_calc_shadow_root_page_role_common(vcpu, regs); + kvm_calc_cpu_role(vcpu, regs); - role.base.direct = false; role.base.level = kvm_mmu_get_tdp_level(vcpu); return role; -- 2.31.1