On Tue, Dec 10, 2024 at 4:06 AM Vlastimil Babka <vbabka@xxxxxxx> wrote: > > On 12/6/24 23:52, Suren Baghdasaryan wrote: > > To enable SLAB_TYPESAFE_BY_RCU for vma cache we need to ensure that > > object reuse before RCU grace period is over will be detected inside > > lock_vma_under_rcu(). > > lock_vma_under_rcu() enters RCU read section, finds the vma at the > > given address, locks the vma and checks if it got detached or remapped > > to cover a different address range. These last checks are there > > to ensure that the vma was not modified after we found it but before > > locking it. > > vma reuse introduces several new possibilities: > > 1. vma can be reused after it was found but before it is locked; > > 2. vma can be reused and reinitialized (including changing its vm_mm) > > while being locked in vma_start_read(); > > 3. vma can be reused and reinitialized after it was found but before > > it is locked, then attached at a new address or to a new mm while > > read-locked; > > For case #1 current checks will help detecting cases when: > > - vma was reused but not yet added into the tree (detached check) > > - vma was reused at a different address range (address check); > > We are missing the check for vm_mm to ensure the reused vma was not > > attached to a different mm. This patch adds the missing check. > > For case #2, we pass mm to vma_start_read() to prevent access to > > unstable vma->vm_mm. This might lead to vma_start_read() returning > > a false locked result but that's not critical if it's rare because > > it will only lead to a retry under mmap_lock. > > For case #3, we ensure the order in which vma->detached flag and > > vm_start/vm_end/vm_mm are set and checked. vma gets attached after > > vm_start/vm_end/vm_mm were set and lock_vma_under_rcu() should check > > vma->detached before checking vm_start/vm_end/vm_mm. This is required > > because attaching vma happens without vma write-lock, as opposed to > > vma detaching, which requires vma write-lock. This patch adds memory > > barriers inside is_vma_detached() and vma_mark_attached() needed to > > order reads and writes to vma->detached vs vm_start/vm_end/vm_mm. > > After these provisions, SLAB_TYPESAFE_BY_RCU is added to vm_area_cachep. > > This will facilitate vm_area_struct reuse and will minimize the number > > of call_rcu() calls. > > > > Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> > > --- > > include/linux/mm.h | 36 +++++-- > > include/linux/mm_types.h | 10 +- > > include/linux/slab.h | 6 -- > > kernel/fork.c | 157 +++++++++++++++++++++++++------ > > mm/memory.c | 15 ++- > > mm/vma.c | 2 +- > > tools/testing/vma/vma_internal.h | 7 +- > > 7 files changed, 179 insertions(+), 54 deletions(-) > > > > diff --git a/include/linux/mm.h b/include/linux/mm.h > > index 2bf38c1e9cca..3568bcbc7c81 100644 > > --- a/include/linux/mm.h > > +++ b/include/linux/mm.h > > @@ -257,7 +257,7 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *); > > struct vm_area_struct *vm_area_dup(struct vm_area_struct *); > > void vm_area_free(struct vm_area_struct *); > > /* Use only if VMA has no other users */ > > -void __vm_area_free(struct vm_area_struct *vma); > > +void vm_area_free_unreachable(struct vm_area_struct *vma); > > > > #ifndef CONFIG_MMU > > extern struct rb_root nommu_region_tree; > > @@ -706,8 +706,10 @@ static inline void vma_lock_init(struct vm_area_struct *vma) > > * Try to read-lock a vma. The function is allowed to occasionally yield false > > * locked result to avoid performance overhead, in which case we fall back to > > * using mmap_lock. The function should never yield false unlocked result. > > + * False locked result is possible if mm_lock_seq overflows or if vma gets > > + * reused and attached to a different mm before we lock it. > > */ > > -static inline bool vma_start_read(struct vm_area_struct *vma) > > +static inline bool vma_start_read(struct mm_struct *mm, struct vm_area_struct *vma) > > { > > /* > > * Check before locking. A race might cause false locked result. > > @@ -716,7 +718,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) > > * we don't rely on for anything - the mm_lock_seq read against which we > > * need ordering is below. > > */ > > - if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence)) > > + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence)) > > return false; > > > > if (unlikely(down_read_trylock(&vma->vm_lock.lock) == 0)) > > @@ -733,7 +735,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) > > * after it has been unlocked. > > * This pairs with RELEASE semantics in vma_end_write_all(). > > */ > > - if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) { > > + if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) { > > up_read(&vma->vm_lock.lock); > > return false; > > } > > This could have been perhaps another preparatory patch to make this one smaller? > > > > > +static void vm_area_ctor(void *data) > > +{ > > + struct vm_area_struct *vma = (struct vm_area_struct *)data; > > + > > +#ifdef CONFIG_PER_VMA_LOCK > > + /* vma is not locked, can't use vma_mark_detached() */ > > + vma->detached = true; > > +#endif > > + INIT_LIST_HEAD(&vma->anon_vma_chain); > > + vma_lock_init(vma); > > +} > > + > > +#ifdef CONFIG_PER_VMA_LOCK > > + > > +static void vma_clear(struct vm_area_struct *vma, struct mm_struct *mm) > > +{ > > + vma->vm_mm = mm; > > + vma->vm_ops = &vma_dummy_vm_ops; > > + vma->vm_start = 0; > > + vma->vm_end = 0; > > + vma->anon_vma = NULL; > > + vma->vm_pgoff = 0; > > + vma->vm_file = NULL; > > + vma->vm_private_data = NULL; > > + vm_flags_init(vma, 0); > > + memset(&vma->vm_page_prot, 0, sizeof(vma->vm_page_prot)); > > + memset(&vma->shared, 0, sizeof(vma->shared)); > > + memset(&vma->vm_userfaultfd_ctx, 0, sizeof(vma->vm_userfaultfd_ctx)); > > + vma_numab_state_init(vma); > > +#ifdef CONFIG_ANON_VMA_NAME > > + vma->anon_name = NULL; > > +#endif > > +#ifdef CONFIG_SWAP > > + memset(&vma->swap_readahead_info, 0, sizeof(vma->swap_readahead_info)); > > +#endif > > +#ifndef CONFIG_MMU > > + vma->vm_region = NULL; > > +#endif > > +#ifdef CONFIG_NUMA > > + vma->vm_policy = NULL; > > +#endif > > This isn't the ideal pattern I think, now that we have a ctor. Ideally the > ctor would do all this (except setting the vm_mm), and then we need to make > sure it's also done when freeing the vma, to make sure the freed object is > in the same state as a new object after the constructor. > > On freeing, things like numab_state and anon_name could be NULL'd (by the > respective destructors) only when they are non-NULL and thus freeing the > objects pointed to. vm_policy and vm_file could perhaps be handled same way > after some refactoring (see remove_vma()), vma_dummy_vm_ops are possibly > already reset by vma_close(), etc. Ok, let me look some more into it and see if I can improve and simplify the initialization/freeing logic. Thanks! > > > +} > > + > > +static void vma_copy(const struct vm_area_struct *src, struct vm_area_struct *dest) > > +{ > > + dest->vm_mm = src->vm_mm; > > + dest->vm_ops = src->vm_ops; > > + dest->vm_start = src->vm_start; > > + dest->vm_end = src->vm_end; > > + dest->anon_vma = src->anon_vma; > > + dest->vm_pgoff = src->vm_pgoff; > > + dest->vm_file = src->vm_file; > > + dest->vm_private_data = src->vm_private_data; > > + vm_flags_init(dest, src->vm_flags); > > + memcpy(&dest->vm_page_prot, &src->vm_page_prot, > > + sizeof(dest->vm_page_prot)); > > + memcpy(&dest->shared, &src->shared, sizeof(dest->shared)); > > + memcpy(&dest->vm_userfaultfd_ctx, &src->vm_userfaultfd_ctx, > > + sizeof(dest->vm_userfaultfd_ctx)); > > +#ifdef CONFIG_ANON_VMA_NAME > > + dest->anon_name = src->anon_name; > > +#endif > > +#ifdef CONFIG_SWAP > > + memcpy(&dest->swap_readahead_info, &src->swap_readahead_info, > > + sizeof(dest->swap_readahead_info)); > > +#endif > > +#ifndef CONFIG_MMU > > + dest->vm_region = src->vm_region; > > +#endif > > +#ifdef CONFIG_NUMA > > + dest->vm_policy = src->vm_policy; > > +#endif > > +} > > + > > +#else /* CONFIG_PER_VMA_LOCK */