Hi Will, On 9/7/20 4:23 PM, Will Deacon wrote: > Convert the page-aging functions and access fault handler to use the > generic page-table code instead of walking the page-table directly. > > Cc: Marc Zyngier <maz@xxxxxxxxxx> > Cc: Quentin Perret <qperret@xxxxxxxxxx> > Reviewed-by: Gavin Shan <gshan@xxxxxxxxxx> > Signed-off-by: Will Deacon <will@xxxxxxxxxx> > --- > arch/arm64/kvm/mmu.c | 74 ++++++++++---------------------------------- > 1 file changed, 16 insertions(+), 58 deletions(-) > > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c > index a7021509231c..a036aac3ed24 100644 > --- a/arch/arm64/kvm/mmu.c > +++ b/arch/arm64/kvm/mmu.c > @@ -1703,46 +1703,23 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > return ret; > } > > -/* > - * Resolve the access fault by making the page young again. > - * Note that because the faulting entry is guaranteed not to be > - * cached in the TLB, we don't need to invalidate anything. > - * Only the HW Access Flag updates are supported for Stage 2 (no DBM), > - * so there is no need for atomic (pte|pmd)_mkyoung operations. > - */ > +/* Resolve the access fault by making the page young again. */ > static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) > { > - pud_t *pud; > - pmd_t *pmd; > - pte_t *pte; > - kvm_pfn_t pfn; > - bool pfn_valid = false; > + pte_t pte; > + kvm_pte_t kpte; > + struct kvm_s2_mmu *mmu; > > trace_kvm_access_fault(fault_ipa); > > spin_lock(&vcpu->kvm->mmu_lock); > - > - if (!stage2_get_leaf_entry(vcpu->arch.hw_mmu, fault_ipa, &pud, &pmd, &pte)) > - goto out; > - > - if (pud) { /* HugeTLB */ > - *pud = kvm_s2pud_mkyoung(*pud); > - pfn = kvm_pud_pfn(*pud); > - pfn_valid = true; > - } else if (pmd) { /* THP, HugeTLB */ > - *pmd = pmd_mkyoung(*pmd); > - pfn = pmd_pfn(*pmd); > - pfn_valid = true; > - } else { > - *pte = pte_mkyoung(*pte); /* Just a page... */ > - pfn = pte_pfn(*pte); > - pfn_valid = true; > - } > - > -out: > + mmu = vcpu->arch.hw_mmu; > + kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa); > spin_unlock(&vcpu->kvm->mmu_lock); > - if (pfn_valid) > - kvm_set_pfn_accessed(pfn); > + > + pte = __pte(kpte); > + if (pte_valid(pte)) > + kvm_set_pfn_accessed(pte_pfn(pte)); > } > > /** > @@ -1959,38 +1936,19 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) > > static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) > { > - pud_t *pud; > - pmd_t *pmd; > - pte_t *pte; > + pte_t pte; > + kvm_pte_t kpte; > > WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); > - if (!stage2_get_leaf_entry(&kvm->arch.mmu, gpa, &pud, &pmd, &pte)) > - return 0; > - > - if (pud) > - return stage2_pudp_test_and_clear_young(pud); > - else if (pmd) > - return stage2_pmdp_test_and_clear_young(pmd); > - else > - return stage2_ptep_test_and_clear_young(pte); > + kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa); > + pte = __pte(kpte); > + return pte_valid(pte) && pte_young(pte); I checked that the semantics of the return value are preserved. The old version of the function returned 1 if stage2_get_leaf_entry() returned true (entry was found and it was valid) and the entry was young, which is equivalent to pte_valid(pte) && pte_young(pte). > } > > static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) > { > - pud_t *pud; > - pmd_t *pmd; > - pte_t *pte; > - > WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); > - if (!stage2_get_leaf_entry(&kvm->arch.mmu, gpa, &pud, &pmd, &pte)) > - return 0; > - > - if (pud) > - return kvm_s2pud_young(*pud); > - else if (pmd) > - return pmd_young(*pmd); > - else > - return pte_young(*pte); > + return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa); > } Reviewed-by: Alexandru Elisei <alexandru.elisei@xxxxxxx> Thanks, Alex > > int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm