The patch titled Subject: mm: modify vma_iter_store{_gfp} to indicate if it's storing a new vma has been added to the -mm mm-unstable branch. Its filename is mm-modify-vma_iter_store_gfp-to-indicate-if-its-storing-a-new-vma.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-modify-vma_iter_store_gfp-to-indicate-if-its-storing-a-new-vma.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Suren Baghdasaryan <surenb@xxxxxxxxxx> Subject: mm: modify vma_iter_store{_gfp} to indicate if it's storing a new vma Date: Thu, 26 Dec 2024 09:06:56 -0800 vma_iter_store() functions can be used both when adding a new vma and when updating an existing one. However for existing ones we do not need to mark them attached as they are already marked that way. Add a parameter to distinguish the usage and skip vma_mark_attached() when not needed. Link: https://lkml.kernel.org/r/20241226170710.1159679-5-surenb@xxxxxxxxxx Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> Cc: Christian Brauner <brauner@xxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: David Howells <dhowells@xxxxxxxxxx> Cc: Davidlohr Bueso <dave@xxxxxxxxxxxx> Cc: Hillf Danton <hdanton@xxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Jann Horn <jannh@xxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Jonathan Corbet <corbet@xxxxxxx> Cc: kernel test robot <oliver.sang@xxxxxxxxx> Cc: Klara Modin <klarasmodin@xxxxxxxxx> Cc: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> Cc: Lokesh Gidra <lokeshgidra@xxxxxxxxxx> Cc: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx> Cc: Mateusz Guzik <mjguzik@xxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Oleg Nesterov <oleg@xxxxxxxxxx> Cc: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx> Cc: Paul E. McKenney <paulmck@xxxxxxxxxx> Cc: Peter Xu <peterx@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Shakeel Butt <shakeel.butt@xxxxxxxxx> Cc: Sourav Panda <souravpanda@xxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/mm.h | 12 ++++++++++++ mm/nommu.c | 4 ++-- mm/vma.c | 16 ++++++++-------- mm/vma.h | 13 +++++++++---- 4 files changed, 31 insertions(+), 14 deletions(-) --- a/include/linux/mm.h~mm-modify-vma_iter_store_gfp-to-indicate-if-its-storing-a-new-vma +++ a/include/linux/mm.h @@ -821,6 +821,16 @@ static inline void vma_assert_locked(str vma_assert_write_locked(vma); } +static inline void vma_assert_attached(struct vm_area_struct *vma) +{ + VM_BUG_ON_VMA(vma->detached, vma); +} + +static inline void vma_assert_detached(struct vm_area_struct *vma) +{ + VM_BUG_ON_VMA(!vma->detached, vma); +} + static inline void vma_mark_attached(struct vm_area_struct *vma) { vma->detached = false; @@ -866,6 +876,8 @@ static inline void vma_end_read(struct v static inline void vma_start_write(struct vm_area_struct *vma) {} static inline void vma_assert_write_locked(struct vm_area_struct *vma) { mmap_assert_write_locked(vma->vm_mm); } +static inline void vma_assert_attached(struct vm_area_struct *vma) {} +static inline void vma_assert_detached(struct vm_area_struct *vma) {} static inline void vma_mark_attached(struct vm_area_struct *vma) {} static inline void vma_mark_detached(struct vm_area_struct *vma) {} --- a/mm/nommu.c~mm-modify-vma_iter_store_gfp-to-indicate-if-its-storing-a-new-vma +++ a/mm/nommu.c @@ -1191,7 +1191,7 @@ share: setup_vma_to_mm(vma, current->mm); current->mm->map_count++; /* add the VMA to the tree */ - vma_iter_store(&vmi, vma); + vma_iter_store(&vmi, vma, true); /* we flush the region from the icache only when the first executable * mapping of it is made */ @@ -1356,7 +1356,7 @@ static int split_vma(struct vma_iterator setup_vma_to_mm(vma, mm); setup_vma_to_mm(new, mm); - vma_iter_store(vmi, new); + vma_iter_store(vmi, new, true); mm->map_count++; return 0; --- a/mm/vma.c~mm-modify-vma_iter_store_gfp-to-indicate-if-its-storing-a-new-vma +++ a/mm/vma.c @@ -306,7 +306,7 @@ static void vma_complete(struct vma_prep * us to insert it before dropping the locks * (it may either follow vma or precede it). */ - vma_iter_store(vmi, vp->insert); + vma_iter_store(vmi, vp->insert, true); mm->map_count++; } @@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff); if (expanded) - vma_iter_store(vmg->vmi, vmg->vma); + vma_iter_store(vmg->vmi, vmg->vma, false); if (adj_start) { adjust->vm_start += adj_start; adjust->vm_pgoff += PHYS_PFN(adj_start); if (adj_start < 0) { WARN_ON(expanded); - vma_iter_store(vmg->vmi, adjust); + vma_iter_store(vmg->vmi, adjust, false); } } @@ -1689,7 +1689,7 @@ int vma_link(struct mm_struct *mm, struc return -ENOMEM; vma_start_write(vma); - vma_iter_store(&vmi, vma); + vma_iter_store(&vmi, vma, true); vma_link_file(vma); mm->map_count++; validate_mm(mm); @@ -2368,7 +2368,7 @@ static int __mmap_new_vma(struct mmap_st /* Lock the VMA since it is modified after insertion into VMA tree */ vma_start_write(vma); - vma_iter_store(vmi, vma); + vma_iter_store(vmi, vma, true); map->mm->map_count++; vma_link_file(vma); @@ -2542,7 +2542,7 @@ int do_brk_flags(struct vma_iterator *vm vm_flags_init(vma, flags); vma->vm_page_prot = vm_get_page_prot(flags); vma_start_write(vma); - if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) + if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL, true)) goto mas_store_fail; mm->map_count++; @@ -2785,7 +2785,7 @@ int expand_upwards(struct vm_area_struct anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); + vma_iter_store(&vmi, vma, false); anon_vma_interval_tree_post_update_vma(vma); perf_event_mmap(vma); @@ -2865,7 +2865,7 @@ int expand_downwards(struct vm_area_stru vma->vm_start = address; vma->vm_pgoff -= grow; /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); + vma_iter_store(&vmi, vma, false); anon_vma_interval_tree_post_update_vma(vma); perf_event_mmap(vma); --- a/mm/vma.h~mm-modify-vma_iter_store_gfp-to-indicate-if-its-storing-a-new-vma +++ a/mm/vma.h @@ -145,7 +145,7 @@ __must_check int vma_shrink(struct vma_i unsigned long start, unsigned long end, pgoff_t pgoff); static inline int vma_iter_store_gfp(struct vma_iterator *vmi, - struct vm_area_struct *vma, gfp_t gfp) + struct vm_area_struct *vma, gfp_t gfp, bool new_vma) { if (vmi->mas.status != ma_start && @@ -157,7 +157,10 @@ static inline int vma_iter_store_gfp(str if (unlikely(mas_is_err(&vmi->mas))) return -ENOMEM; - vma_mark_attached(vma); + if (new_vma) + vma_mark_attached(vma); + vma_assert_attached(vma); + return 0; } @@ -366,7 +369,7 @@ static inline struct vm_area_struct *vma /* Store a VMA with preallocated memory */ static inline void vma_iter_store(struct vma_iterator *vmi, - struct vm_area_struct *vma) + struct vm_area_struct *vma, bool new_vma) { #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) @@ -390,7 +393,9 @@ static inline void vma_iter_store(struct __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); mas_store_prealloc(&vmi->mas, vma); - vma_mark_attached(vma); + if (new_vma) + vma_mark_attached(vma); + vma_assert_attached(vma); } static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) _ Patches currently in -mm which might be from surenb@xxxxxxxxxx are seqlock-add-raw_seqcount_try_begin.patch mm-convert-mm_lock_seq-to-a-proper-seqcount.patch mm-introduce-mmap_lock_speculate_try_beginretry.patch mm-introduce-vma_start_read_locked_nested-helpers.patch mm-move-per-vma-lock-into-vm_area_struct.patch mm-mark-vma-as-detached-until-its-added-into-vma-tree.patch mm-modify-vma_iter_store_gfp-to-indicate-if-its-storing-a-new-vma.patch mm-mark-vmas-detached-upon-exit.patch mm-nommu-fix-the-last-places-where-vma-is-not-locked-before-being-attached.patch types-move-struct-rcuwait-into-typesh.patch mm-allow-vma_start_read_locked-vma_start_read_locked_nested-to-fail.patch mm-move-mmap_init_lock-out-of-the-header-file.patch mm-uninline-the-main-body-of-vma_start_write.patch refcount-introduce-__refcount_addinc_not_zero_limited.patch mm-replace-vm_lock-and-detached-flag-with-a-reference-count.patch mm-debug-print-vm_refcnt-state-when-dumping-the-vma.patch mm-debug-print-vm_refcnt-state-when-dumping-the-vma-fix.patch mm-remove-extra-vma_numab_state_init-call.patch mm-prepare-lock_vma_under_rcu-for-vma-reuse-possibility.patch mm-make-vma-cache-slab_typesafe_by_rcu.patch docs-mm-document-latest-changes-to-vm_lock.patch