From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Remove direct uses of HPAGE_PMD_NR in paths that aren't necessarily PMD sized. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/huge_memory.c | 15 ++++++++------- mm/rmap.c | 10 +++++----- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 15a86b06befc..4c4f92349829 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2518,7 +2518,7 @@ static void remap_page(struct page *page) if (PageTransHuge(page)) { remove_migration_ptes(page, page, true); } else { - for (i = 0; i < HPAGE_PMD_NR; i++) + for (i = 0; i < hpage_nr_pages(page); i++) remove_migration_ptes(page + i, page + i, true); } } @@ -2593,6 +2593,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, struct lruvec *lruvec; struct address_space *swap_cache = NULL; unsigned long offset = 0; + unsigned int nr = hpage_nr_pages(head); int i; lruvec = mem_cgroup_page_lruvec(head, pgdat); @@ -2608,7 +2609,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, xa_lock(&swap_cache->i_pages); } - for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { + for (i = nr - 1; i >= 1; i--) { __split_huge_page_tail(head, i, lruvec, list); /* Some pages can be beyond i_size: drop them from page cache */ if (head[i].index >= end) { @@ -2649,7 +2650,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, remap_page(head); - for (i = 0; i < HPAGE_PMD_NR; i++) { + for (i = 0; i < nr; i++) { struct page *subpage = head + i; if (subpage == page) continue; @@ -2731,14 +2732,14 @@ int page_trans_huge_mapcount(struct page *page, int *total_mapcount) page = compound_head(page); _total_mapcount = ret = 0; - for (i = 0; i < HPAGE_PMD_NR; i++) { + for (i = 0; i < hpage_nr_pages(page); i++) { mapcount = atomic_read(&page[i]._mapcount) + 1; ret = max(ret, mapcount); _total_mapcount += mapcount; } if (PageDoubleMap(page)) { ret -= 1; - _total_mapcount -= HPAGE_PMD_NR; + _total_mapcount -= hpage_nr_pages(page); } mapcount = compound_mapcount(page); ret += mapcount; @@ -2755,9 +2756,9 @@ bool can_split_huge_page(struct page *page, int *pextra_pins) /* Additional pins from page cache */ if (PageAnon(page)) - extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0; + extra_pins = PageSwapCache(page) ? hpage_nr_pages(page) : 0; else - extra_pins = HPAGE_PMD_NR; + extra_pins = hpage_nr_pages(page); if (pextra_pins) *pextra_pins = extra_pins; return total_mapcount(page) == page_count(page) - extra_pins - 1; diff --git a/mm/rmap.c b/mm/rmap.c index f79a206b271a..9da4b5121baa 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1199,7 +1199,7 @@ void page_add_file_rmap(struct page *page, bool compound) VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); lock_page_memcg(page); if (compound && PageTransHuge(page)) { - for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { + for (i = 0, nr = 0; i < hpage_nr_pages(page); i++) { if (atomic_inc_and_test(&page[i]._mapcount)) nr++; } @@ -1241,7 +1241,7 @@ static void page_remove_file_rmap(struct page *page, bool compound) /* page still mapped by someone else? */ if (compound && PageTransHuge(page)) { - for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { + for (i = 0, nr = 0; i < hpage_nr_pages(page); i++) { if (atomic_add_negative(-1, &page[i]._mapcount)) nr++; } @@ -1290,7 +1290,7 @@ static void page_remove_anon_compound_rmap(struct page *page) * Subpages can be mapped with PTEs too. Check how many of * them are still mapped. */ - for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { + for (i = 0, nr = 0; i < hpage_nr_pages(page); i++) { if (atomic_add_negative(-1, &page[i]._mapcount)) nr++; } @@ -1300,10 +1300,10 @@ static void page_remove_anon_compound_rmap(struct page *page) * page of the compound page is unmapped, but at least one * small page is still mapped. */ - if (nr && nr < HPAGE_PMD_NR) + if (nr && nr < hpage_nr_pages(page)) deferred_split_huge_page(page); } else { - nr = HPAGE_PMD_NR; + nr = hpage_nr_pages(page); } if (unlikely(PageMlocked(page))) -- 2.26.2