Change the function signature to pass in the folio as all three callers have it. This removes a reference to page->index, which we're trying to get rid of. Also move page_pgoff() to mm/internal.h as code outside mm has no business calling it. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/pagemap.h | 18 ------------------ mm/internal.h | 6 ++++++ mm/memory-failure.c | 4 ++-- mm/rmap.c | 2 +- 4 files changed, 9 insertions(+), 21 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 483a191bb4df..1f295ef7d10d 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -913,24 +913,6 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping, return read_cache_folio(mapping, index, NULL, file); } -/* - * Get the offset in PAGE_SIZE (even for hugetlb pages). - */ -static inline pgoff_t page_to_pgoff(struct page *page) -{ - struct page *head; - - if (likely(!PageTransTail(page))) - return page->index; - - head = compound_head(page); - /* - * We don't initialize ->index for tail pages: calculate based on - * head page - */ - return head->index + page - head; -} - /* * Return byte-offset into filesystem object for page. */ diff --git a/mm/internal.h b/mm/internal.h index e511708b2be0..8dfd9527ac1e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -919,6 +919,12 @@ void mlock_drain_remote(int cpu); extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); +static inline pgoff_t page_pgoff(const struct folio *folio, + const struct page *page) +{ + return folio->index + folio_page_idx(folio, page); +} + /** * vma_address - Find the virtual address a page range is mapped at * @vma: The vma which maps this object. diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 581d3e5c9117..572c742ecf48 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -617,7 +617,7 @@ static void collect_procs_anon(struct folio *folio, struct page *page, if (av == NULL) /* Not actually mapped anymore */ return; - pgoff = page_to_pgoff(page); + pgoff = page_pgoff(folio, page); rcu_read_lock(); for_each_process(tsk) { struct vm_area_struct *vma; @@ -653,7 +653,7 @@ static void collect_procs_file(struct folio *folio, struct page *page, i_mmap_lock_read(mapping); rcu_read_lock(); - pgoff = page_to_pgoff(page); + pgoff = page_pgoff(folio, page); for_each_process(tsk) { struct task_struct *t = task_early_kill(tsk, force_early); unsigned long addr; diff --git a/mm/rmap.c b/mm/rmap.c index 886bf67ba382..ba1920291ac6 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1266,7 +1266,7 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page, */ VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, folio); - VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), + VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address), page); } -- 2.43.0