On Tue, Jun 11, 2024 at 12:02 AM Lance Yang <ioworker0@xxxxxxxxx> wrote: > > Introduce the labels walk_done and walk_done_err as exit points to > eliminate duplicated exit code in the pagewalk loop. > > Reviewed-by: Zi Yan <ziy@xxxxxxxxxx> > Reviewed-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> > Reviewed-by: David Hildenbrand <david@xxxxxxxxxx> > Signed-off-by: Lance Yang <ioworker0@xxxxxxxxx> I don't think "return false" necessarily indicates an error, so "walk_done_err" doesn't seem like an appropriate name. However, this is a minor issue. Reviewed-by: Barry Song <baohua@xxxxxxxxxx> > --- > mm/rmap.c | 40 +++++++++++++++------------------------- > 1 file changed, 15 insertions(+), 25 deletions(-) > > diff --git a/mm/rmap.c b/mm/rmap.c > index e8fc5ecb59b2..ddffa30c79fb 100644 > --- a/mm/rmap.c > +++ b/mm/rmap.c > @@ -1679,9 +1679,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, > /* Restore the mlock which got missed */ > if (!folio_test_large(folio)) > mlock_vma_folio(folio, vma); > - page_vma_mapped_walk_done(&pvmw); > - ret = false; > - break; > + goto walk_done_err; > } > > pfn = pte_pfn(ptep_get(pvmw.pte)); > @@ -1719,11 +1717,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, > */ > if (!anon) { > VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); > - if (!hugetlb_vma_trylock_write(vma)) { > - page_vma_mapped_walk_done(&pvmw); > - ret = false; > - break; > - } > + if (!hugetlb_vma_trylock_write(vma)) > + goto walk_done_err; > if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { > hugetlb_vma_unlock_write(vma); > flush_tlb_range(vma, > @@ -1738,8 +1733,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, > * actual page and drop map count > * to zero. > */ > - page_vma_mapped_walk_done(&pvmw); > - break; > + goto walk_done; > } > hugetlb_vma_unlock_write(vma); > } > @@ -1811,9 +1805,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, > if (unlikely(folio_test_swapbacked(folio) != > folio_test_swapcache(folio))) { > WARN_ON_ONCE(1); > - ret = false; > - page_vma_mapped_walk_done(&pvmw); > - break; > + goto walk_done_err; > } > > /* MADV_FREE page check */ > @@ -1852,23 +1844,17 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, > */ > set_pte_at(mm, address, pvmw.pte, pteval); > folio_set_swapbacked(folio); > - ret = false; > - page_vma_mapped_walk_done(&pvmw); > - break; > + goto walk_done_err; > } > > if (swap_duplicate(entry) < 0) { > set_pte_at(mm, address, pvmw.pte, pteval); > - ret = false; > - page_vma_mapped_walk_done(&pvmw); > - break; > + goto walk_done_err; > } > if (arch_unmap_one(mm, vma, address, pteval) < 0) { > swap_free(entry); > set_pte_at(mm, address, pvmw.pte, pteval); > - ret = false; > - page_vma_mapped_walk_done(&pvmw); > - break; > + goto walk_done_err; > } > > /* See folio_try_share_anon_rmap(): clear PTE first. */ > @@ -1876,9 +1862,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, > folio_try_share_anon_rmap_pte(folio, subpage)) { > swap_free(entry); > set_pte_at(mm, address, pvmw.pte, pteval); > - ret = false; > - page_vma_mapped_walk_done(&pvmw); > - break; > + goto walk_done_err; > } > if (list_empty(&mm->mmlist)) { > spin_lock(&mmlist_lock); > @@ -1918,6 +1902,12 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, > if (vma->vm_flags & VM_LOCKED) > mlock_drain_local(); > folio_put(folio); > + continue; > +walk_done_err: > + ret = false; > +walk_done: > + page_vma_mapped_walk_done(&pvmw); > + break; > } > > mmu_notifier_invalidate_range_end(&range); > -- > 2.33.1 >