On Fri, Apr 15, 2022 at 09:58:48PM +0000, Oliver Upton wrote: > Use RCU to safely traverse the page tables in parallel; the tables > themselves will only be freed from an RCU synchronized context. Don't > even bother with adding support to hyp, and instead just assume > exclusive access of the page tables. > > Signed-off-by: Oliver Upton <oupton@xxxxxxxxxx> > --- > arch/arm64/kvm/hyp/pgtable.c | 23 ++++++++++++++++++++++- > 1 file changed, 22 insertions(+), 1 deletion(-) > > diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c > index 5b64fbca8a93..d4699f698d6e 100644 > --- a/arch/arm64/kvm/hyp/pgtable.c > +++ b/arch/arm64/kvm/hyp/pgtable.c > @@ -132,9 +132,28 @@ static kvm_pte_t kvm_phys_to_pte(u64 pa) > return pte; > } > > + > +#if defined(__KVM_NVHE_HYPERVISOR__) > +static inline void kvm_pgtable_walk_begin(void) > +{} > + > +static inline void kvm_pgtable_walk_end(void) > +{} > + > +#define kvm_dereference_ptep rcu_dereference_raw > +#else > +#define kvm_pgtable_walk_begin rcu_read_lock > + > +#define kvm_pgtable_walk_end rcu_read_unlock > + > +#define kvm_dereference_ptep rcu_dereference > +#endif > + > static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops) > { > - return mm_ops->phys_to_virt(kvm_pte_to_phys(pte)); > + kvm_pte_t __rcu *ptep = mm_ops->phys_to_virt(kvm_pte_to_phys(pte)); > + > + return kvm_dereference_ptep(ptep); > } > > static void kvm_clear_pte(kvm_pte_t *ptep) > @@ -288,7 +307,9 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, > .walker = walker, > }; > > + kvm_pgtable_walk_begin(); > return _kvm_pgtable_walk(&walk_data); > + kvm_pgtable_walk_end(); This might be fixed later in the series, but at this point the rcu_read_unlock is never called. > } > > struct leaf_walk_data { > -- > 2.36.0.rc0.470.gd361397f0d-goog > _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm