This already operated on the entire compound page, but now we can avoid calling compound_head quite so many times. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/swap.h | 8 ++++++-- mm/swap.c | 28 +++++++++++++--------------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 5bba15ac5a2e..c097bc9cedd9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -338,7 +338,7 @@ extern void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages); extern void lru_note_cost_page(struct page *); extern void lru_cache_add(struct page *); -extern void mark_page_accessed(struct page *); +void mark_folio_accessed(struct folio *); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); @@ -348,10 +348,14 @@ extern void deactivate_file_page(struct page *page); extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); - extern void lru_cache_add_inactive_or_unevictable(struct page *page, struct vm_area_struct *vma); +static inline void mark_page_accessed(struct page *page) +{ + mark_folio_accessed(page_folio(page)); +} + /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, diff --git a/mm/swap.c b/mm/swap.c index 490553f3f9ef..c3638a13987f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -411,36 +411,34 @@ static void __lru_cache_activate_page(struct page *page) * When a newly allocated page is not yet visible, so safe for non-atomic ops, * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). */ -void mark_page_accessed(struct page *page) +void mark_folio_accessed(struct folio *folio) { - page = compound_head(page); - - if (!PageReferenced(page)) { - SetPageReferenced(page); - } else if (PageUnevictable(page)) { + if (!FolioReferenced(folio)) { + SetFolioReferenced(folio); + } else if (FolioUnevictable(folio)) { /* * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, * this list is never rotated or maintained, so marking an * evictable page accessed has no effect. */ - } else if (!PageActive(page)) { + } else if (!FolioActive(folio)) { /* * If the page is on the LRU, queue it for activation via * lru_pvecs.activate_page. Otherwise, assume the page is on a * pagevec, mark it active and it'll be moved to the active * LRU on the next drain. */ - if (PageLRU(page)) - activate_page(page); + if (FolioLRU(folio)) + activate_page(&folio->page); else - __lru_cache_activate_page(page); - ClearPageReferenced(page); - workingset_activation(page); + __lru_cache_activate_page(&folio->page); + ClearFolioReferenced(folio); + workingset_activation(&folio->page); } - if (page_is_idle(page)) - clear_page_idle(page); + if (page_is_idle(&folio->page)) + clear_page_idle(&folio->page); } -EXPORT_SYMBOL(mark_page_accessed); +EXPORT_SYMBOL(mark_folio_accessed); /** * lru_cache_add - add a page to a page list -- 2.29.2