Remove a few hidden calls to compound_head, saving 387 bytes of text on my test configuration. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/vmscan.c | 54 ++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 81d61139bbb1..f8ec446041c3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2254,8 +2254,8 @@ static int too_many_isolated(struct pglist_data *pgdat, int file, } /* - * move_pages_to_lru() moves pages from private @list to appropriate LRU list. - * On return, @list is reused as a list of pages to be freed by the caller. + * move_pages_to_lru() moves folios from private @list to appropriate LRU list. + * On return, @list is reused as a list of folios to be freed by the caller. * * Returns the number of pages moved to the given lruvec. */ @@ -2263,42 +2263,42 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec, struct list_head *list) { int nr_pages, nr_moved = 0; - LIST_HEAD(pages_to_free); - struct page *page; + LIST_HEAD(folios_to_free); while (!list_empty(list)) { - page = lru_to_page(list); - VM_BUG_ON_PAGE(PageLRU(page), page); - list_del(&page->lru); - if (unlikely(!page_evictable(page))) { + struct folio *folio = lru_to_folio(list); + + VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); + list_del(&folio->lru); + if (unlikely(!folio_evictable(folio))) { spin_unlock_irq(&lruvec->lru_lock); - putback_lru_page(page); + folio_putback_lru(folio); spin_lock_irq(&lruvec->lru_lock); continue; } /* - * The SetPageLRU needs to be kept here for list integrity. + * The folio_set_lru needs to be kept here for list integrity. * Otherwise: * #0 move_pages_to_lru #1 release_pages - * if !put_page_testzero - * if (put_page_testzero()) - * !PageLRU //skip lru_lock - * SetPageLRU() - * list_add(&page->lru,) - * list_add(&page->lru,) + * if (!folio_put_testzero()) + * if (folio_put_testzero()) + * !lru //skip lru_lock + * folio_set_lru() + * list_add(&folio->lru,) + * list_add(&folio->lru,) */ - SetPageLRU(page); + folio_set_lru(folio); - if (unlikely(put_page_testzero(page))) { - __clear_page_lru_flags(page); + if (unlikely(folio_put_testzero(folio))) { + __folio_clear_lru_flags(folio); - if (unlikely(PageCompound(page))) { + if (unlikely(folio_test_large(folio))) { spin_unlock_irq(&lruvec->lru_lock); - destroy_compound_page(page); + destroy_compound_page(&folio->page); spin_lock_irq(&lruvec->lru_lock); } else - list_add(&page->lru, &pages_to_free); + list_add(&folio->lru, &folios_to_free); continue; } @@ -2307,18 +2307,18 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec, * All pages were isolated from the same lruvec (and isolation * inhibits memcg migration). */ - VM_BUG_ON_PAGE(!folio_matches_lruvec(page_folio(page), lruvec), page); - add_page_to_lru_list(page, lruvec); - nr_pages = thp_nr_pages(page); + VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); + lruvec_add_folio(lruvec, folio); + nr_pages = folio_nr_pages(folio); nr_moved += nr_pages; - if (PageActive(page)) + if (folio_test_active(folio)) workingset_age_nonresident(lruvec, nr_pages); } /* * To save our caller's stack, now use input list for pages to free. */ - list_splice(&pages_to_free, list); + list_splice(&folios_to_free, list); return nr_moved; } -- 2.35.1