Only convert a few easy parts of this function to use the folio passed in; convert back to struct page for the majority of it. Removes three hidden calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/zswap.h | 4 ++-- mm/page_io.c | 2 +- mm/zswap.c | 11 ++++++----- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 9f318c8bc367..2a60ce39cfde 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -11,7 +11,7 @@ extern atomic_t zswap_stored_pages; #ifdef CONFIG_ZSWAP bool zswap_store(struct folio *folio); -bool zswap_load(struct page *page); +bool zswap_load(struct folio *folio); void zswap_invalidate(int type, pgoff_t offset); void zswap_swapon(int type); void zswap_swapoff(int type); @@ -23,7 +23,7 @@ static inline bool zswap_store(struct folio *folio) return false; } -static inline bool zswap_load(struct page *page) +static inline bool zswap_load(struct folio *folio) { return false; } diff --git a/mm/page_io.c b/mm/page_io.c index e5b6f1402506..8741d3a0d48a 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -516,7 +516,7 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug) } delayacct_swapin_start(); - if (zswap_load(page)) { + if (zswap_load(folio)) { folio_mark_uptodate(folio); folio_unlock(folio); } else if (data_race(sis->flags & SWP_FS_OPS)) { diff --git a/mm/zswap.c b/mm/zswap.c index 69d097d14255..4287f8551841 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1381,11 +1381,12 @@ bool zswap_store(struct folio *folio) goto reject; } -bool zswap_load(struct page *page) +bool zswap_load(struct folio *folio) { - swp_entry_t swp = { .val = page_private(page), }; + swp_entry_t swp = folio_swap_entry(folio); int type = swp_type(swp); pgoff_t offset = swp_offset(swp); + struct page *page = &folio->page; struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry; struct scatterlist input, output; @@ -1394,8 +1395,8 @@ bool zswap_load(struct page *page) unsigned int dlen; bool ret; - VM_WARN_ON_ONCE(!PageLocked(page)); - VM_WARN_ON_ONCE(!PageSwapCache(page)); + VM_WARN_ON_ONCE(!folio_test_locked(folio)); + VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); /* find */ spin_lock(&tree->lock); @@ -1457,7 +1458,7 @@ bool zswap_load(struct page *page) spin_lock(&tree->lock); if (ret && zswap_exclusive_loads_enabled) { zswap_invalidate_entry(tree, entry); - SetPageDirty(page); + folio_mark_dirty(folio); } else if (entry->length) { spin_lock(&entry->pool->lru_lock); list_move(&entry->lru, &entry->pool->lru); -- 2.39.2