Convert the three remaining callers to call vma_pgoff_address() directly. This removes an ambiguity where we'd check just one page if passed a tail page and all N pages if passed a head page. Also add better kernel-doc for vma_pgoff_address(). Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/internal.h | 23 ++++++++--------------- mm/rmap.c | 12 +++++++++--- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 8e11f7b2da21..e312cb9f7368 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -803,9 +803,14 @@ void mlock_drain_remote(int cpu); extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); -/* - * Return the start of user virtual address at the specific offset within - * a vma. +/** + * vma_pgoff_address - Find the virtual address a page range is mapped at + * @pgoff: The page offset within its object. + * @nr_pages: The number of pages to consider. + * @vma: The vma which maps this object. + * + * If any page in this range is mapped by this VMA, return the first address + * where any of these pages appear. Otherwise, return -EFAULT. */ static inline unsigned long vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages, @@ -828,18 +833,6 @@ vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages, return address; } -/* - * Return the start of user virtual address of a page within a vma. - * Returns -EFAULT if all of the page is outside the range of vma. - * If page is a compound head, the entire compound page is considered. - */ -static inline unsigned long -vma_address(struct page *page, struct vm_area_struct *vma) -{ - VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ - return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma); -} - /* * Then at what user virtual address will none of the range be found in vma? * Assumes that vma_address() already returned a good starting address. diff --git a/mm/rmap.c b/mm/rmap.c index 5ee9e338d09b..4b08b1a06688 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -775,6 +775,8 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { struct folio *folio = page_folio(page); + pgoff_t pgoff; + if (folio_test_anon(folio)) { struct anon_vma *page__anon_vma = folio_anon_vma(folio); /* @@ -790,7 +792,9 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) return -EFAULT; } - return vma_address(page, vma); + /* The !page__anon_vma above handles KSM folios */ + pgoff = folio->index + folio_page_idx(folio, page); + return vma_pgoff_address(pgoff, 1, vma); } /* @@ -2588,7 +2592,8 @@ static void rmap_walk_anon(struct folio *folio, anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff_start, pgoff_end) { struct vm_area_struct *vma = avc->vma; - unsigned long address = vma_address(&folio->page, vma); + unsigned long address = vma_pgoff_address(pgoff_start, + folio_nr_pages(folio), vma); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); @@ -2649,7 +2654,8 @@ static void rmap_walk_file(struct folio *folio, lookup: vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { - unsigned long address = vma_address(&folio->page, vma); + unsigned long address = vma_pgoff_address(pgoff_start, + folio_nr_pages(folio), vma); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); -- 2.43.0