On Tue, Aug 30, 2022 at 07:41:26PM +0000, Oliver Upton wrote: > The use of RCU is necessary to change the paging structures in parallel. > Acquire and release an RCU read lock when traversing the page tables. > > Signed-off-by: Oliver Upton <oliver.upton@xxxxxxxxx> > --- > arch/arm64/include/asm/kvm_pgtable.h | 19 ++++++++++++++++++- > arch/arm64/kvm/hyp/pgtable.c | 7 ++++++- > 2 files changed, 24 insertions(+), 2 deletions(-) > > diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h > index 78fbb7be1af6..7d2de0a98ccb 100644 > --- a/arch/arm64/include/asm/kvm_pgtable.h > +++ b/arch/arm64/include/asm/kvm_pgtable.h > @@ -578,9 +578,26 @@ enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte); > */ > enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte); > > +#if defined(__KVM_NVHE_HYPERVISOR___) > + Future readers will wonder why NVHE stubs out RCU support and how that is even correct. Some comments here would be useful explain it. > +static inline void kvm_pgtable_walk_begin(void) {} > +static inline void kvm_pgtable_walk_end(void) {} > + > +#define kvm_dereference_ptep rcu_dereference_raw How does NVHE have access rcu_dereference_raw()? > + > +#else /* !defined(__KVM_NVHE_HYPERVISOR__) */ > + > +#define kvm_pgtable_walk_begin rcu_read_lock > +#define kvm_pgtable_walk_end rcu_read_unlock > +#define kvm_dereference_ptep rcu_dereference > + > +#endif /* defined(__KVM_NVHE_HYPERVISOR__) */ > + > static inline kvm_pte_t kvm_pte_read(kvm_pte_t *ptep) > { > - return READ_ONCE(*ptep); > + kvm_pte_t __rcu *p = (kvm_pte_t __rcu *)ptep; > + > + return READ_ONCE(*kvm_dereference_ptep(p)); What about all the other places where page table memory is accessed? If RCU is going to be used to protect page table memory, then all accesses have to go under an RCU critical section. This means that page table memory should only be accessed through __rcu annotated pointers and dereferenced with rcu_dereference(). > } > > #endif /* __ARM64_KVM_PGTABLE_H__ */ > diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c > index f911509e6512..215a14c434ed 100644 > --- a/arch/arm64/kvm/hyp/pgtable.c > +++ b/arch/arm64/kvm/hyp/pgtable.c > @@ -284,8 +284,13 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, > .end = PAGE_ALIGN(walk_data.addr + size), > .walker = walker, > }; > + int r; > > - return _kvm_pgtable_walk(&walk_data); > + kvm_pgtable_walk_begin(); > + r = _kvm_pgtable_walk(&walk_data); > + kvm_pgtable_walk_end(); > + > + return r; > } > > struct leaf_walk_data { > -- > 2.37.2.672.g94769d06f0-goog > _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm