The patch titled Subject: shmem: add shmem_read_folio() and shmem_read_folio_gfp() has been added to the -mm mm-unstable branch. Its filename is shmem-add-shmem_read_folio-and-shmem_read_folio_gfp.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/shmem-add-shmem_read_folio-and-shmem_read_folio_gfp.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Subject: shmem: add shmem_read_folio() and shmem_read_folio_gfp() Date: Mon, 6 Feb 2023 16:25:20 +0000 These are the folio replacements for shmem_read_mapping_page() and shmem_read_mapping_page_gfp(). Link: https://lkml.kernel.org/r/20230206162520.4029022-2-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Mark Hemment <markhemm@xxxxxxxxxxxxxx> Cc: Charan Teja Kalla <quic_charante@xxxxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Pavankumar Kondeti <quic_pkondeti@xxxxxxxxxxx> Cc: Shakeel Butt <shakeelb@xxxxxxxxxx> Cc: Suren Baghdasaryan <surenb@xxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- --- a/include/linux/shmem_fs.h~shmem-add-shmem_read_folio-and-shmem_read_folio_gfp +++ a/include/linux/shmem_fs.h @@ -109,6 +109,14 @@ enum sgp_type { int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp); +struct folio *shmem_read_folio_gfp(struct address_space *mapping, + pgoff_t index, gfp_t gfp); + +static inline struct folio *shmem_read_folio(struct address_space *mapping, + pgoff_t index) +{ + return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping)); +} static inline struct page *shmem_read_mapping_page( struct address_space *mapping, pgoff_t index) --- a/mm/shmem.c~shmem-add-shmem_read_folio-and-shmem_read_folio_gfp +++ a/mm/shmem.c @@ -4311,9 +4311,9 @@ int shmem_zero_setup(struct vm_area_stru } /** - * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. - * @mapping: the page's address_space - * @index: the page index + * shmem_read_folio_gfp - read into page cache, using specified page allocation flags. + * @mapping: the folio's address_space + * @index: the folio index * @gfp: the page allocator flags to use if allocating * * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", @@ -4325,13 +4325,12 @@ int shmem_zero_setup(struct vm_area_stru * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. */ -struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, - pgoff_t index, gfp_t gfp) +struct folio *shmem_read_folio_gfp(struct address_space *mapping, + pgoff_t index, gfp_t gfp) { #ifdef CONFIG_SHMEM struct inode *inode = mapping->host; struct folio *folio; - struct page *page; int error; BUG_ON(!shmem_mapping(mapping)); @@ -4341,18 +4340,27 @@ struct page *shmem_read_mapping_page_gfp return ERR_PTR(error); folio_unlock(folio); - page = folio_file_page(folio, index); + return folio; +#else + /* + * The tiny !SHMEM case uses ramfs without swap + */ + return mapping_read_folio_gfp(mapping, index, gfp); +#endif +} +EXPORT_SYMBOL_GPL(shmem_read_folio_gfp); + +struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, + pgoff_t index, gfp_t gfp) +{ + struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp); + struct page *page = folio_file_page(folio, index); + if (PageHWPoison(page)) { folio_put(folio); return ERR_PTR(-EIO); } return page; -#else - /* - * The tiny !SHMEM case uses ramfs without swap - */ - return read_cache_page_gfp(mapping, index, gfp); -#endif } EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); _ Patches currently in -mm which might be from willy@xxxxxxxxxxxxx are mm-add-memcpy_from_file_folio.patch filemap-add-mapping_read_folio_gfp.patch shmem-add-shmem_read_folio-and-shmem_read_folio_gfp.patch shmem-fix-w=1-build-warnings-with-config_shmem=n.patch