The quilt patch titled Subject: mm/memory: add any_dirty optional pointer to folio_pte_batch() has been removed from the -mm tree. Its filename was mm-memory-add-any_dirty-optional-pointer-to-folio_pte_batch.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Lance Yang <ioworker0@xxxxxxxxx> Subject: mm/memory: add any_dirty optional pointer to folio_pte_batch() Date: Thu, 18 Apr 2024 21:44:34 +0800 This commit adds the any_dirty pointer as an optional parameter to folio_pte_batch() function. By using both the any_young and any_dirty pointers, madvise_free can make smarter decisions about whether to clear the PTEs when marking large folios as lazyfree. Link: https://lkml.kernel.org/r/20240418134435.6092-4-ioworker0@xxxxxxxxx Signed-off-by: Lance Yang <ioworker0@xxxxxxxxx> Suggested-by: David Hildenbrand <david@xxxxxxxxxx> Acked-by: David Hildenbrand <david@xxxxxxxxxx> Cc: Barry Song <21cnbao@xxxxxxxxx> Cc: Jeff Xie <xiehuan09@xxxxxxxxx> Cc: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Muchun Song <songmuchun@xxxxxxxxxxxxx> Cc: Peter Xu <peterx@xxxxxxxxxx> Cc: Ryan Roberts <ryan.roberts@xxxxxxx> Cc: Yang Shi <shy828301@xxxxxxxxx> Cc: Yin Fengwei <fengwei.yin@xxxxxxxxx> Cc: Zach O'Keefe <zokeefe@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/internal.h | 12 ++++++++++-- mm/madvise.c | 19 ++++++++++++++----- mm/memory.c | 4 ++-- 3 files changed, 26 insertions(+), 9 deletions(-) --- a/mm/internal.h~mm-memory-add-any_dirty-optional-pointer-to-folio_pte_batch +++ a/mm/internal.h @@ -134,6 +134,8 @@ static inline pte_t __pte_batch_clear_ig * first one is writable. * @any_young: Optional pointer to indicate whether any entry except the * first one is young. + * @any_dirty: Optional pointer to indicate whether any entry except the + * first one is dirty. * * Detect a PTE batch: consecutive (present) PTEs that map consecutive * pages of the same large folio. @@ -149,18 +151,20 @@ static inline pte_t __pte_batch_clear_ig */ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, - bool *any_writable, bool *any_young) + bool *any_writable, bool *any_young, bool *any_dirty) { unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); const pte_t *end_ptep = start_ptep + max_nr; pte_t expected_pte, *ptep; - bool writable, young; + bool writable, young, dirty; int nr; if (any_writable) *any_writable = false; if (any_young) *any_young = false; + if (any_dirty) + *any_dirty = false; VM_WARN_ON_FOLIO(!pte_present(pte), folio); VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); @@ -176,6 +180,8 @@ static inline int folio_pte_batch(struct writable = !!pte_write(pte); if (any_young) young = !!pte_young(pte); + if (any_dirty) + dirty = !!pte_dirty(pte); pte = __pte_batch_clear_ignored(pte, flags); if (!pte_same(pte, expected_pte)) @@ -193,6 +199,8 @@ static inline int folio_pte_batch(struct *any_writable |= writable; if (any_young) *any_young |= young; + if (any_dirty) + *any_dirty |= dirty; nr = pte_batch_hint(ptep, pte); expected_pte = pte_advance_pfn(expected_pte, nr); --- a/mm/madvise.c~mm-memory-add-any_dirty-optional-pointer-to-folio_pte_batch +++ a/mm/madvise.c @@ -321,6 +321,18 @@ static inline bool can_do_file_pageout(s file_permission(vma->vm_file, MAY_WRITE) == 0; } +static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end, + struct folio *folio, pte_t *ptep, + pte_t pte, bool *any_young, + bool *any_dirty) +{ + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; + int max_nr = (end - addr) / PAGE_SIZE; + + return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL, + any_young, any_dirty); +} + static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -456,13 +468,10 @@ restart: * next pte in the range. */ if (folio_test_large(folio)) { - const fpb_t fpb_flags = FPB_IGNORE_DIRTY | - FPB_IGNORE_SOFT_DIRTY; - int max_nr = (end - addr) / PAGE_SIZE; bool any_young; - nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, - fpb_flags, NULL, &any_young); + nr = madvise_folio_pte_batch(addr, end, folio, pte, + ptent, &any_young, NULL); if (any_young) ptent = pte_mkyoung(ptent); --- a/mm/memory.c~mm-memory-add-any_dirty-optional-pointer-to-folio_pte_batch +++ a/mm/memory.c @@ -989,7 +989,7 @@ copy_present_ptes(struct vm_area_struct flags |= FPB_IGNORE_SOFT_DIRTY; nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags, - &any_writable, NULL); + &any_writable, NULL, NULL); folio_ref_add(folio, nr); if (folio_test_anon(folio)) { if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, @@ -1558,7 +1558,7 @@ static inline int zap_present_ptes(struc */ if (unlikely(folio_test_large(folio) && max_nr != 1)) { nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags, - NULL, NULL); + NULL, NULL, NULL); zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, addr, details, rss, force_flush, _ Patches currently in -mm which might be from ioworker0@xxxxxxxxx are