The quilt patch titled Subject: swap: add swap_cache_get_folio() has been removed from the -mm tree. Its filename was swap-add-swap_cache_get_folio.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Subject: swap: add swap_cache_get_folio() Date: Fri, 2 Sep 2022 20:46:15 +0100 Convert lookup_swap_cache() into swap_cache_get_folio() and add a lookup_swap_cache() wrapper around it. [akpm@xxxxxxxxxxxxxxxxxxxx: add CONFIG_SWAP=n stub for swap_cache_get_folio()] Link: https://lkml.kernel.org/r/20220902194653.1739778-20-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/swap.h | 8 ++++++++ mm/swap_state.c | 32 +++++++++++++++++++++----------- 2 files changed, 29 insertions(+), 11 deletions(-) --- a/mm/swap.h~swap-add-swap_cache_get_folio +++ a/mm/swap.h @@ -39,6 +39,8 @@ void __delete_from_swap_cache(struct fol void delete_from_swap_cache(struct folio *folio); void clear_shadow_from_swap_cache(int type, unsigned long begin, unsigned long end); +struct folio *swap_cache_get_folio(swp_entry_t entry, + struct vm_area_struct *vma, unsigned long addr); struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr); @@ -99,6 +101,12 @@ static inline int swap_writepage(struct return 0; } +static inline struct folio *swap_cache_get_folio(swp_entry_t entry, + struct vm_area_struct *vma, unsigned long addr) +{ + return NULL; +} + static inline struct page *lookup_swap_cache(swp_entry_t swp, struct vm_area_struct *vma, unsigned long addr) --- a/mm/swap_state.c~swap-add-swap_cache_get_folio +++ a/mm/swap_state.c @@ -317,24 +317,24 @@ static inline bool swap_use_vma_readahea } /* - * Lookup a swap entry in the swap cache. A found page will be returned + * Lookup a swap entry in the swap cache. A found folio will be returned * unlocked and with its refcount incremented - we rely on the kernel - * lock getting page table operations atomic even if we drop the page + * lock getting page table operations atomic even if we drop the folio * lock before returning. */ -struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, - unsigned long addr) +struct folio *swap_cache_get_folio(swp_entry_t entry, + struct vm_area_struct *vma, unsigned long addr) { - struct page *page; + struct folio *folio; struct swap_info_struct *si; si = get_swap_device(entry); if (!si) return NULL; - page = find_get_page(swap_address_space(entry), swp_offset(entry)); + folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry)); put_swap_device(si); - if (page) { + if (folio) { bool vma_ra = swap_use_vma_readahead(); bool readahead; @@ -342,10 +342,10 @@ struct page *lookup_swap_cache(swp_entry * At the moment, we don't support PG_readahead for anon THP * so let's bail out rather than confusing the readahead stat. */ - if (unlikely(PageTransCompound(page))) - return page; + if (unlikely(folio_test_large(folio))) + return folio; - readahead = TestClearPageReadahead(page); + readahead = folio_test_clear_readahead(folio); if (vma && vma_ra) { unsigned long ra_val; int win, hits; @@ -366,7 +366,17 @@ struct page *lookup_swap_cache(swp_entry } } - return page; + return folio; +} + +struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, + unsigned long addr) +{ + struct folio *folio = swap_cache_get_folio(entry, vma, addr); + + if (!folio) + return NULL; + return folio_file_page(folio, swp_offset(entry)); } /** _ Patches currently in -mm which might be from willy@xxxxxxxxxxxxx are