* Suren Baghdasaryan <surenb@xxxxxxxxxx> [210528 20:42]: > On Wed, Apr 28, 2021 at 8:36 AM Liam Howlett <liam.howlett@xxxxxxxxxx> wrote: > > > > Use the maple tree iterator to duplicate the mm_struct trees. > > > > Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> > > --- > > include/linux/mm.h | 2 -- > > include/linux/sched/mm.h | 3 +++ > > kernel/fork.c | 24 +++++++++++++++++++----- > > mm/mmap.c | 4 ---- > > 4 files changed, 22 insertions(+), 11 deletions(-) > > > > diff --git a/include/linux/mm.h b/include/linux/mm.h > > index e89bacfa9145..7f7dff6ad884 100644 > > --- a/include/linux/mm.h > > +++ b/include/linux/mm.h > > @@ -2498,8 +2498,6 @@ extern bool arch_has_descending_max_zone_pfns(void); > > /* nommu.c */ > > extern atomic_long_t mmap_pages_allocated; > > extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); > > -/* maple_tree */ > > -void vma_store(struct mm_struct *mm, struct vm_area_struct *vma); > > > > /* interval_tree.c */ > > void vma_interval_tree_insert(struct vm_area_struct *node, > > diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h > > index e24b1fe348e3..76cab3aea6ab 100644 > > --- a/include/linux/sched/mm.h > > +++ b/include/linux/sched/mm.h > > @@ -8,6 +8,7 @@ > > #include <linux/mm_types.h> > > #include <linux/gfp.h> > > #include <linux/sync_core.h> > > +#include <linux/maple_tree.h> > > > > /* > > * Routines for handling mm_structs > > @@ -67,11 +68,13 @@ static inline void mmdrop(struct mm_struct *mm) > > */ > > static inline void mmget(struct mm_struct *mm) > > { > > + mt_set_in_rcu(&mm->mm_mt); > > atomic_inc(&mm->mm_users); > > } > > > > static inline bool mmget_not_zero(struct mm_struct *mm) > > { > > + mt_set_in_rcu(&mm->mm_mt); > > Should you be calling mt_set_in_rcu() if atomic_inc_not_zero() failed? > I don't think mmput() is called after mmget_not_zero() fails and > mt_clear_in_rcu() will not be called. Good catch, but having it the way it is will be faster with the possibility of re-entering RCU mode if there is a race during tear down. Entering RCU mode during tear-down mean that the nodes that are not already freed would remain for an RCU cycle before being freed. I don't think it is worth checking every time this is called for such a low payoff. I should probably add a comment about this though. > > > return atomic_inc_not_zero(&mm->mm_users); > > } > > > > diff --git a/kernel/fork.c b/kernel/fork.c > > index c37abaf28eb9..832416ff613e 100644 > > --- a/kernel/fork.c > > +++ b/kernel/fork.c > > @@ -477,7 +477,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, > > struct vm_area_struct *mpnt, *tmp, *prev, **pprev; > > struct rb_node **rb_link, *rb_parent; > > int retval; > > - unsigned long charge; > > + unsigned long charge = 0; > > + MA_STATE(old_mas, &oldmm->mm_mt, 0, 0); > > + MA_STATE(mas, &mm->mm_mt, 0, 0); > > LIST_HEAD(uf); > > > > uprobe_start_dup_mmap(); > > @@ -511,7 +513,13 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, > > goto out; > > > > prev = NULL; > > - for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { > > + > > + retval = mas_entry_count(&mas, oldmm->map_count); > > + if (retval) > > + goto fail_nomem; > > + > > + rcu_read_lock(); > > + mas_for_each(&old_mas, mpnt, ULONG_MAX) { > > struct file *file; > > > > if (mpnt->vm_flags & VM_DONTCOPY) { > > @@ -525,7 +533,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, > > */ > > if (fatal_signal_pending(current)) { > > retval = -EINTR; > > - goto out; > > + goto loop_out; > > } > > if (mpnt->vm_flags & VM_ACCOUNT) { > > unsigned long len = vma_pages(mpnt); > > @@ -594,7 +602,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, > > rb_parent = &tmp->vm_rb; > > > > /* Link the vma into the MT */ > > - vma_store(mm, tmp); > > + mas.index = tmp->vm_start; > > + mas.last = tmp->vm_end - 1; > > + mas_store(&mas, tmp); > > > > mm->map_count++; > > if (!(tmp->vm_flags & VM_WIPEONFORK)) > > @@ -604,14 +614,17 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, > > tmp->vm_ops->open(tmp); > > > > if (retval) > > - goto out; > > + goto loop_out; > > } > > /* a new mm has just been created */ > > retval = arch_dup_mmap(oldmm, mm); > > +loop_out: > > out: > > + rcu_read_unlock(); > > mmap_write_unlock(mm); > > flush_tlb_mm(oldmm); > > mmap_write_unlock(oldmm); > > + mas_destroy(&mas); > > dup_userfaultfd_complete(&uf); > > fail_uprobe_end: > > uprobe_end_dup_mmap(); > > @@ -1092,6 +1105,7 @@ static inline void __mmput(struct mm_struct *mm) > > { > > VM_BUG_ON(atomic_read(&mm->mm_users)); > > > > + mt_clear_in_rcu(&mm->mm_mt); > > uprobe_clear_state(mm); > > exit_aio(mm); > > ksm_exit(mm); > > diff --git a/mm/mmap.c b/mm/mmap.c > > index 929c2f9eb3f5..1bd43f4db28e 100644 > > --- a/mm/mmap.c > > +++ b/mm/mmap.c > > @@ -780,10 +780,6 @@ static inline void vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma > > GFP_KERNEL); > > } > > > > -void vma_store(struct mm_struct *mm, struct vm_area_struct *vma) { > > - vma_mt_store(mm, vma); > > -} > > - > > static void > > __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, > > struct vm_area_struct *prev, struct rb_node **rb_link, > > -- > > 2.30.2