Replaces 11 calls to compound_head() with 1, and removes 1375 bytes of kernel text. Previously, to determine if any pte was shared, the page mapcount corresponding exactly to the pte was checked. This gave us a precise number of shared ptes. Using folio_estimated_sharers() instead uses the mapcount of the head page, giving us an estimate for tail page ptes. This means if a tail page's mapcount is greater than its head page's mapcount, folio_estimated_sharers() would be underestimating the number of shared ptes, and vice versa. Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx> Reviewed-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/khugepaged.c | 51 ++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 88433cc25d8a..7a552fe16c92 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -541,7 +541,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, struct collapse_control *cc, struct list_head *compound_pagelist) { - struct page *page = NULL; + struct folio *folio = NULL; pte_t *_pte; int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0; bool writable = false; @@ -570,15 +570,15 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, result = SCAN_PTE_UFFD_WP; goto out; } - page = vm_normal_page(vma, address, pteval); - if (unlikely(!page) || unlikely(is_zone_device_page(page))) { + folio = vm_normal_folio(vma, address, pteval); + if (unlikely(!folio) || unlikely(folio_is_zone_device(folio))) { result = SCAN_PAGE_NULL; goto out; } - VM_BUG_ON_PAGE(!PageAnon(page), page); + VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio); - if (page_mapcount(page) > 1) { + if (folio_estimated_sharers(folio) > 1) { ++shared; if (cc->is_khugepaged && shared > khugepaged_max_ptes_shared) { @@ -588,16 +588,15 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, } } - if (PageCompound(page)) { - struct page *p; - page = compound_head(page); + if (folio_test_large(folio)) { + struct folio *f; /* * Check if we have dealt with the compound page * already */ - list_for_each_entry(p, compound_pagelist, lru) { - if (page == p) + list_for_each_entry(f, compound_pagelist, lru) { + if (folio == f) goto next; } } @@ -608,7 +607,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * is needed to serialize against split_huge_page * when invoked from the VM. */ - if (!trylock_page(page)) { + if (!folio_trylock(folio)) { result = SCAN_PAGE_LOCK; goto out; } @@ -624,8 +623,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * but not from this process. The other process cannot write to * the page, only trigger CoW. */ - if (!is_refcount_suitable(page)) { - unlock_page(page); + if (!is_refcount_suitable(&folio->page)) { + folio_unlock(folio); result = SCAN_PAGE_COUNT; goto out; } @@ -634,27 +633,27 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * Isolate the page to avoid collapsing an hugepage * currently in use by the VM. */ - if (!isolate_lru_page(page)) { - unlock_page(page); + if (!folio_isolate_lru(folio)) { + folio_unlock(folio); result = SCAN_DEL_PAGE_LRU; goto out; } - mod_node_page_state(page_pgdat(page), - NR_ISOLATED_ANON + page_is_file_lru(page), - compound_nr(page)); - VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(PageLRU(page), page); + node_stat_mod_folio(folio, + NR_ISOLATED_ANON + folio_is_file_lru(folio), + folio_nr_pages(folio)); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); - if (PageCompound(page)) - list_add_tail(&page->lru, compound_pagelist); + if (folio_test_large(folio)) + list_add_tail(&folio->lru, compound_pagelist); next: /* * If collapse was initiated by khugepaged, check that there is * enough young pte to justify collapsing the page */ if (cc->is_khugepaged && - (pte_young(pteval) || page_is_young(page) || - PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, + (pte_young(pteval) || folio_test_young(folio) || + folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm, address))) referenced++; @@ -668,13 +667,13 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, result = SCAN_LACK_REFERENCED_PAGE; } else { result = SCAN_SUCCEED; - trace_mm_collapse_huge_page_isolate(page, none_or_zero, + trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero, referenced, writable, result); return result; } out: release_pte_pages(pte, _pte, compound_pagelist); - trace_mm_collapse_huge_page_isolate(page, none_or_zero, + trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero, referenced, writable, result); return result; } -- 2.40.1