On 21.06.22 22:46, Liam Howlett wrote: > From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> > > This thin layer of abstraction over the maple tree state is for iterating > over VMAs. You can go forwards, go backwards or ask where the iterator > is. Rename the existing vma_next() to __vma_next() -- it will be removed > by the end of this series. > > Link: https://lkml.kernel.org/r/20220504010716.661115-11-Liam.Howlett@xxxxxxxxxx > Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> > Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> > Acked-by: Vlastimil Babka <vbabka@xxxxxxx> > Cc: Catalin Marinas <catalin.marinas@xxxxxxx> > Cc: David Howells <dhowells@xxxxxxxxxx> > Cc: SeongJae Park <sj@xxxxxxxxxx> > Cc: Will Deacon <will@xxxxxxxxxx> > Cc: Davidlohr Bueso <dave@xxxxxxxxxxxx> > Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> > --- > include/linux/mm.h | 31 +++++++++++++++++++++++++++++++ > include/linux/mm_types.h | 21 +++++++++++++++++++++ > mm/mmap.c | 10 +++++----- > 3 files changed, 57 insertions(+), 5 deletions(-) > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 810b3dd929e4..f22c6f71a18c 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -658,6 +658,37 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma) > return vma->vm_flags & VM_ACCESS_FLAGS; > } > > +static inline > +struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) > +{ > + return mas_find(&vmi->mas, max); > +} > + > +static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) > +{ > + /* > + * Uses vma_find() to get the first VMA when the iterator starts. > + * Calling mas_next() could skip the first entry. > + */ > + return vma_find(vmi, ULONG_MAX); > +} > + > +static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) > +{ > + return mas_prev(&vmi->mas, 0); > +} > + > +static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) > +{ > + return vmi->mas.index; > +} > + > +#define for_each_vma(vmi, vma) while ((vma = vma_next(&(vmi))) != NULL) > + > +/* The MM code likes to work with exclusive end addresses */ > +#define for_each_vma_range(vmi, vma, end) \ > + while ((vma = vma_find(&(vmi), (end) - 1)) != NULL) > + > #ifdef CONFIG_SHMEM > /* > * The vma_is_shmem is not inline because it is used only by slow > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h > index 254c30def2b2..1485a24796be 100644 > --- a/include/linux/mm_types.h > +++ b/include/linux/mm_types.h > @@ -696,6 +696,27 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) > return (struct cpumask *)&mm->cpu_bitmap; > } > > +struct vma_iterator { > + struct ma_state mas; > +}; > + > +#define VMA_ITERATOR(name, mm, addr) \ > + struct vma_iterator name = { \ > + .mas = { \ > + .tree = &mm->mm_mt, \ > + .index = addr, \ > + .node = MAS_START, \ > + }, \ > + } > + No __* and () macro magic? I'd have expected at least tree = &(__mm)->mm_mt, .index = (__addr), ;) Reviewed-by: David Hildenbrand <david@xxxxxxxxxx> -- Thanks, David / dhildenb