These are the folio equivalent of page_mapping() and page_file_mapping(). Add an out-of-line page_mapping() wrapper around folio_mapping() in order to prevent the page_folio() call from bloating every caller of page_mapping(). Adjust page_file_mapping() and page_mapping_file() to use folios internally. This ends up saving 102 bytes of text overall. folio_mapping() is 45 bytes shorter than page_mapping() was, but the compiler chooses to inline folio_mapping() into page_mapping_file(), for a net increase of 69 bytes in the core. This is made up for by a few bytes less in dozens of nfs functions (which call page_file_mapping()). The small amount of difference appears to be a slight change in gcc's register allocation decisions, which allow: 48 8b 56 08 mov 0x8(%rsi),%rdx 48 8d 42 ff lea -0x1(%rdx),%rax 83 e2 01 and $0x1,%edx 48 0f 44 c6 cmove %rsi,%rax to become: 48 8b 46 08 mov 0x8(%rsi),%rax 48 8d 78 ff lea -0x1(%rax),%rdi a8 01 test $0x1,%al 48 0f 44 fe cmove %rsi,%rdi Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/mm.h | 14 -------------- include/linux/pagemap.h | 17 +++++++++++++++++ mm/Makefile | 2 +- mm/folio-compat.c | 13 +++++++++++++ mm/swapfile.c | 6 +++--- mm/util.c | 20 ++++++++++---------- 6 files changed, 44 insertions(+), 28 deletions(-) create mode 100644 mm/folio-compat.c diff --git a/include/linux/mm.h b/include/linux/mm.h index 4595955805f8..9199b59ee8da 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1603,19 +1603,6 @@ void page_address_init(void); extern void *page_rmapping(struct page *page); extern struct anon_vma *page_anon_vma(struct page *page); -extern struct address_space *page_mapping(struct page *page); - -extern struct address_space *__page_file_mapping(struct page *); - -static inline -struct address_space *page_file_mapping(struct page *page) -{ - if (unlikely(PageSwapCache(page))) - return __page_file_mapping(page); - - return page->mapping; -} - extern pgoff_t __page_file_index(struct page *page); /* @@ -1630,7 +1617,6 @@ static inline pgoff_t page_index(struct page *page) } bool page_mapped(struct page *page); -struct address_space *page_mapping(struct page *page); struct address_space *page_mapping_file(struct page *page); /* diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 5094b50f7680..5a2c0764d7c0 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -157,6 +157,23 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping) void release_pages(struct page **pages, int nr); +struct address_space *page_mapping(struct page *); +struct address_space *folio_mapping(struct folio *); +struct address_space *__folio_file_mapping(struct folio *); + +static inline struct address_space *folio_file_mapping(struct folio *folio) +{ + if (unlikely(FolioSwapCache(folio))) + return __folio_file_mapping(folio); + + return folio->page.mapping; +} + +static inline struct address_space *page_file_mapping(struct page *page) +{ + return folio_file_mapping(page_folio(page)); +} + /* * speculatively take a reference to a page. * If the page is free (_refcount == 0), then _refcount is untouched, and 0 diff --git a/mm/Makefile b/mm/Makefile index 72227b24a616..ceb0089efd29 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -46,7 +46,7 @@ mmu-$(CONFIG_MMU) += process_vm_access.o endif obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ - maccess.o page-writeback.o \ + maccess.o page-writeback.o folio-compat.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ util.o mmzone.o vmstat.o backing-dev.o \ mm_init.o percpu.o slab_common.o \ diff --git a/mm/folio-compat.c b/mm/folio-compat.c new file mode 100644 index 000000000000..5e107aa30a62 --- /dev/null +++ b/mm/folio-compat.c @@ -0,0 +1,13 @@ +/* + * Compatibility functions which bloat the callers too much to make inline. + * All of the callers of these functions should be converted to use folios + * eventually. + */ + +#include <linux/pagemap.h> + +struct address_space *page_mapping(struct page *page) +{ + return folio_mapping(page_folio(page)); +} +EXPORT_SYMBOL(page_mapping); diff --git a/mm/swapfile.c b/mm/swapfile.c index 084a5b9a18e5..b80af7537d9e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3535,11 +3535,11 @@ struct swap_info_struct *page_swap_info(struct page *page) /* * out-of-line __page_file_ methods to avoid include hell. */ -struct address_space *__page_file_mapping(struct page *page) +struct address_space *__folio_file_mapping(struct folio *folio) { - return page_swap_info(page)->swap_file->f_mapping; + return page_swap_info(&folio->page)->swap_file->f_mapping; } -EXPORT_SYMBOL_GPL(__page_file_mapping); +EXPORT_SYMBOL_GPL(__folio_file_mapping); pgoff_t __page_file_index(struct page *page) { diff --git a/mm/util.c b/mm/util.c index 54870226cea6..9ab72cfa4aa1 100644 --- a/mm/util.c +++ b/mm/util.c @@ -686,39 +686,39 @@ struct anon_vma *page_anon_vma(struct page *page) return __page_rmapping(page); } -struct address_space *page_mapping(struct page *page) +struct address_space *folio_mapping(struct folio *folio) { struct address_space *mapping; - page = compound_head(page); - /* This happens if someone calls flush_dcache_page on slab page */ - if (unlikely(PageSlab(page))) + if (unlikely(FolioSlab(folio))) return NULL; - if (unlikely(PageSwapCache(page))) { + if (unlikely(FolioSwapCache(folio))) { swp_entry_t entry; - entry.val = page_private(page); + entry.val = folio_private(folio); return swap_address_space(entry); } - mapping = page->mapping; + mapping = folio->page.mapping; if ((unsigned long)mapping & PAGE_MAPPING_ANON) return NULL; return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); } -EXPORT_SYMBOL(page_mapping); +EXPORT_SYMBOL(folio_mapping); /* * For file cache pages, return the address_space, otherwise return NULL */ struct address_space *page_mapping_file(struct page *page) { - if (unlikely(PageSwapCache(page))) + struct folio *folio = page_folio(page); + + if (unlikely(FolioSwapCache(folio))) return NULL; - return page_mapping(page); + return folio_mapping(folio); } /* Slow path of page_mapcount() for compound pages */ -- 2.30.0