All users are gone, remove it and all traces. Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> --- include/linux/rmap.h | 2 -- mm/rmap.c | 31 ++++--------------------------- 2 files changed, 4 insertions(+), 29 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 9e1c197f50199..865d83148852d 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -235,8 +235,6 @@ void folio_add_anon_rmap_ptes(struct folio *, struct page *, unsigned int nr, folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags) void folio_add_anon_rmap_pmd(struct folio *, struct page *, struct vm_area_struct *, unsigned long address, rmap_t flags); -void page_add_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long address, rmap_t flags); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long address); void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, diff --git a/mm/rmap.c b/mm/rmap.c index 85bea11e9266b..4cb9d8b7d1d65 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1238,7 +1238,7 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page, * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. * - * We have exclusion against page_add_anon_rmap because the caller + * We have exclusion against folio_add_anon_rmap_*() because the caller * always holds the page locked. * * We have exclusion against page_add_new_anon_rmap because those pages @@ -1251,29 +1251,6 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page, page); } -/** - * page_add_anon_rmap - add pte mapping to an anonymous page - * @page: the page to add the mapping to - * @vma: the vm area in which the mapping is added - * @address: the user virtual address mapped - * @flags: the rmap flags - * - * The caller needs to hold the pte lock, and the page must be locked in - * the anon_vma case: to serialize mapping,index checking after setting, - * and to ensure that PageAnon is not being upgraded racily to PageKsm - * (but PageKsm is never downgraded to PageAnon). - */ -void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, - unsigned long address, rmap_t flags) -{ - struct folio *folio = page_folio(page); - - if (likely(!(flags & RMAP_COMPOUND))) - folio_add_anon_rmap_pte(folio, page, vma, address, flags); - else - folio_add_anon_rmap_pmd(folio, page, vma, address, flags); -} - static __always_inline void __folio_add_anon_rmap(struct folio *folio, struct page *page, unsigned int nr_pages, struct vm_area_struct *vma, unsigned long address, rmap_t flags, @@ -1384,7 +1361,7 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * - * Like page_add_anon_rmap() but must only be called on *new* folios. + * Like folio_add_anon_rmap_*() but must only be called on *new* folios. * This means the inc-and-test can be bypassed. * The folio does not have to be locked. * @@ -1432,7 +1409,7 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio, if (nr) __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); - /* See comments in page_add_anon_rmap() */ + /* See comments in folio_add_anon_rmap_*() */ if (!folio_test_large(folio)) mlock_vma_folio(folio, vma); } @@ -1546,7 +1523,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, /* * It would be tidy to reset folio_test_anon mapping when fully - * unmapped, but that might overwrite a racing page_add_anon_rmap + * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() * which increments mapcount after us but sets mapping before us: * so leave the reset to free_pages_prepare, and remember that * it's only reliable while mapped. -- 2.41.0