Factor out the logic to atomically replace an SPTE with an SPTE that points to a new page table. This will be used in a follow-up commit to split a large page SPTE into one level lower. Signed-off-by: David Matlack <dmatlack@xxxxxxxxxx> --- arch/x86/kvm/mmu/tdp_mmu.c | 53 ++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index cc9fe33c9b36..9ee3f4f7fdf5 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -945,6 +945,39 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, return ret; } +/* + * tdp_mmu_install_sp_atomic - Atomically replace the given spte with an + * spte pointing to the provided page table. + * + * @kvm: kvm instance + * @iter: a tdp_iter instance currently on the SPTE that should be set + * @sp: The new TDP page table to install. + * @account_nx: True if this page table is being installed to split a + * non-executable huge page. + * + * Returns: True if the new page table was installed. False if spte being + * replaced changed, causing the atomic compare-exchange to fail. + * If this function returns false the sp will be freed before + * returning. + */ +static bool tdp_mmu_install_sp_atomic(struct kvm *kvm, + struct tdp_iter *iter, + struct kvm_mmu_page *sp, + bool account_nx) +{ + u64 spte; + + spte = make_nonleaf_spte(sp->spt, !shadow_accessed_mask); + + if (tdp_mmu_set_spte_atomic(kvm, iter, spte)) { + tdp_mmu_link_page(kvm, sp, account_nx); + return true; + } else { + tdp_mmu_free_sp(sp); + return false; + } +} + /* * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing * page tables and SPTEs to translate the faulting guest physical address. @@ -954,8 +987,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) struct kvm_mmu *mmu = vcpu->arch.mmu; struct tdp_iter iter; struct kvm_mmu_page *sp; - u64 *child_pt; - u64 new_spte; int ret; kvm_mmu_hugepage_adjust(vcpu, fault); @@ -983,6 +1014,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) } if (!is_shadow_present_pte(iter.old_spte)) { + bool account_nx = fault->huge_page_disallowed && + fault->req_level >= iter.level; + /* * If SPTE has been frozen by another thread, just * give up and retry, avoiding unnecessary page table @@ -992,21 +1026,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) break; sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1); - child_pt = sp->spt; - - new_spte = make_nonleaf_spte(child_pt, - !shadow_accessed_mask); - - if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, new_spte)) { - tdp_mmu_link_page(vcpu->kvm, sp, - fault->huge_page_disallowed && - fault->req_level >= iter.level); - - trace_kvm_mmu_get_page(sp, true); - } else { - tdp_mmu_free_sp(sp); + if (!tdp_mmu_install_sp_atomic(vcpu->kvm, &iter, sp, account_nx)) break; - } } } -- 2.34.0.rc2.393.gf8c9666880-goog