On Friday 31 Jul 2020 at 09:14:36 (+0100), Quentin Perret wrote: > Hey Will, > > On Thursday 30 Jul 2020 at 16:33:48 (+0100), Will Deacon wrote: > > +void *kvm_pgtable_hyp_alloc_cookie(u32 va_bits) > > +{ > > + struct kvm_pgtable *pgt = kzalloc(sizeof(*pgt), GFP_KERNEL); > > + > > + if (!pgt) > > + return NULL; > > + > > + pgt->ia_bits = va_bits; > > + pgt->start_level = kvm_start_level(va_bits); > > + > > + pgt->pgd = (kvm_pte_t *)get_zeroed_page(GFP_KERNEL); > > + if (!pgt->pgd) { > > + kfree(pgt); > > + pgt = NULL; > > + } > > + > > + return pgt; > > +} > > + > > +static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, > > + enum kvm_pgtable_walk_flags flag, void * const arg) > > +{ > > + free_page((unsigned long)kvm_pte_follow(*ptep)); > > + return 0; > > +} > > + > > +void kvm_pgtable_hyp_free_cookie(void *cookie) > > +{ > > + size_t pgd_sz; > > + struct kvm_pgtable *pgt = cookie; > > + struct kvm_pgtable_walker walker = { > > + .cb = hyp_free_walker, > > + .flags = KVM_PGTABLE_WALK_TABLE_POST, > > + }; > > + > > + if (kvm_pgtable_walk(cookie, 0, BIT(pgt->ia_bits), &walker)) > > + kvm_err("Failed to free page-table pages\n"); > > + > > + pgd_sz = kvm_pgd_pages(pgt) * PAGE_SIZE; > > + free_pages_exact(pgt->pgd, pgd_sz); > > Given that the pgd is unconditionally a single page for the stage 1 case > (as per kvm_pgtable_hyp_alloc_cookie() above), should we simplify this > to a simple free_page()? Or did you want to factorize this with the > stage 2 free path? Hmm, or maybe it's the alloc() path that needs fixing actually ... _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm