This ends up being inlined into mark_page_accessed(), saving 70 bytes of text. It eliminates a lot of calls to compound_head(), but we still have to pass a page to __activate_page() because pagevec_lru_move_fn() takes a struct page. That should be cleaned up eventually. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/swap.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 828889349b0b..07244999bcc6 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -347,16 +347,16 @@ static bool need_activate_page_drain(int cpu) return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0; } -static void activate_page(struct page *page) +static void folio_activate(struct folio *folio) { - page = compound_head(page); - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { + if (folio_lru(folio) && !folio_active(folio) && + !folio_unevictable(folio)) { struct pagevec *pvec; local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.activate_page); - get_page(page); - if (pagevec_add_and_need_flush(pvec, page)) + folio_get(folio); + if (pagevec_add_and_need_flush(pvec, &folio->page)) pagevec_lru_move_fn(pvec, __activate_page); local_unlock(&lru_pvecs.lock); } @@ -367,16 +367,15 @@ static inline void activate_page_drain(int cpu) { } -static void activate_page(struct page *page) +static void folio_activate(struct folio *folio) { struct lruvec *lruvec; - page = compound_head(page); - if (TestClearPageLRU(page)) { - lruvec = lock_page_lruvec_irq(page); - __activate_page(page, lruvec); + if (folio_test_clear_lru_flag(folio)) { + lruvec = folio_lock_lruvec_irq(folio); + __activate_page(&folio->page, lruvec); unlock_page_lruvec_irq(lruvec); - SetPageLRU(page); + folio_set_lru_flag(folio); } } #endif @@ -441,7 +440,7 @@ void mark_page_accessed(struct page *page) * LRU on the next drain. */ if (PageLRU(page)) - activate_page(page); + folio_activate(page_folio(page)); else __lru_cache_activate_page(page); ClearPageReferenced(page); -- 2.30.2