The TDP MMU page fault handler will need to be able to create non-leaf SPTEs to build up the paging structures. Rather than re-implementing the function, factor the SPTE creation out of link_shadow_page. Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell machine. This series introduced no new failures. This series can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538 Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7a17cca19b0c1..6344e7863a0f5 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2555,21 +2555,30 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) __shadow_walk_next(iterator, *iterator->sptep); } -static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, - struct kvm_mmu_page *sp) +static u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) { u64 spte; - BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK); - - spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK | + spte = __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | shadow_user_mask | shadow_x_mask | shadow_me_mask; - if (sp_ad_disabled(sp)) + if (ad_disabled) spte |= SPTE_AD_DISABLED_MASK; else spte |= shadow_accessed_mask; + return spte; +} + +static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, + struct kvm_mmu_page *sp) +{ + u64 spte; + + BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK); + + spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp)); + mmu_spte_set(sptep, spte); mmu_page_add_parent_pte(vcpu, sp, sptep); -- 2.28.0.709.gb0816b6eb0-goog