Mostly this just eliminates calls to compound_head(), but NR_VMSCAN_IMMEDIATE was being incremented by 1 instead of by nr_pages. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/vmscan.c | 48 ++++++++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 8f7c32b3d65e..950eeb2f759b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1768,28 +1768,31 @@ static unsigned int shrink_page_list(struct list_head *page_list, } } - if (PageDirty(page)) { + if (folio_test_dirty(folio)) { /* - * Only kswapd can writeback filesystem pages + * Only kswapd can writeback filesystem folios * to avoid risk of stack overflow. But avoid - * injecting inefficient single-page IO into + * injecting inefficient single-folio I/O into * flusher writeback as much as possible: only - * write pages when we've encountered many - * dirty pages, and when we've already scanned - * the rest of the LRU for clean pages and see - * the same dirty pages again (PageReclaim). + * write folios when we've encountered many + * dirty folios, and when we've already scanned + * the rest of the LRU for clean folios and see + * the same dirty folios again (with the reclaim + * flag set). */ - if (page_is_file_lru(page) && - (!current_is_kswapd() || !PageReclaim(page) || + if (folio_is_file_lru(folio) && + (!current_is_kswapd() || + !folio_test_reclaim(folio) || !test_bit(PGDAT_DIRTY, &pgdat->flags))) { /* * Immediately reclaim when written back. - * Similar in principal to deactivate_page() - * except we already have the page isolated + * Similar in principle to deactivate_page() + * except we already have the folio isolated * and know it's dirty */ - inc_node_page_state(page, NR_VMSCAN_IMMEDIATE); - SetPageReclaim(page); + node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE, + nr_pages); + folio_set_reclaim(folio); goto activate_locked; } @@ -1802,8 +1805,8 @@ static unsigned int shrink_page_list(struct list_head *page_list, goto keep_locked; /* - * Page is dirty. Flush the TLB if a writable entry - * potentially exists to avoid CPU writes after IO + * Folio is dirty. Flush the TLB if a writable entry + * potentially exists to avoid CPU writes after I/O * starts and then write it out here. */ try_to_unmap_flush_dirty(); @@ -1815,23 +1818,24 @@ static unsigned int shrink_page_list(struct list_head *page_list, case PAGE_SUCCESS: stat->nr_pageout += nr_pages; - if (PageWriteback(page)) + if (folio_test_writeback(folio)) goto keep; - if (PageDirty(page)) + if (folio_test_dirty(folio)) goto keep; /* * A synchronous write - probably a ramdisk. Go - * ahead and try to reclaim the page. + * ahead and try to reclaim the folio. */ - if (!trylock_page(page)) + if (!folio_trylock(folio)) goto keep; - if (PageDirty(page) || PageWriteback(page)) + if (folio_test_dirty(folio) || + folio_test_writeback(folio)) goto keep_locked; - mapping = page_mapping(page); + mapping = folio_mapping(folio); fallthrough; case PAGE_CLEAN: - ; /* try to free the page below */ + ; /* try to free the folio below */ } } -- 2.34.1