Like folio_add_new_anon_rmap() but batch-rmaps all the pages belonging to a folio, for effciency savings. Signed-off-by: Ryan Roberts <ryan.roberts@xxxxxxx> --- include/linux/rmap.h | 2 ++ mm/rmap.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b87d01660412..d1d731650ce8 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -196,6 +196,8 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long address); void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); +void folio_add_new_anon_rmap_range(struct folio *folio, + struct vm_area_struct *vma, unsigned long address); void page_add_file_rmap(struct page *, struct vm_area_struct *, bool compound); void page_remove_rmap(struct page *, struct vm_area_struct *, diff --git a/mm/rmap.c b/mm/rmap.c index 8632e02661ac..05a0c0a700e7 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1302,6 +1302,49 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, __page_set_anon_rmap(folio, &folio->page, vma, address, 1); } +/** + * folio_add_new_anon_rmap_range - Add mapping to a new anonymous potentially + * large but definitely non-THP folio. + * @folio: The folio to add the mapping to. + * @vma: the vm area in which the mapping is added + * @address: the user virtual address of the first page in the folio + * + * Like folio_add_new_anon_rmap() but must only be called for new *non-THP* + * folios. Like folio_add_new_anon_rmap(), the inc-and-test is bypassed and the + * folio does not have to be locked. All pages in the folio are individually + * accounted. + * + * As the folio is new, it's assumed to be mapped exclusively by a single + * process. + */ +void folio_add_new_anon_rmap_range(struct folio *folio, + struct vm_area_struct *vma, unsigned long address) +{ + int i; + int nr = folio_nr_pages(folio); + struct page *page = &folio->page; + + VM_BUG_ON_VMA(address < vma->vm_start || + address + (nr << PAGE_SHIFT) > vma->vm_end, vma); + __folio_set_swapbacked(folio); + + if (folio_test_large(folio)) { + /* increment count (starts at 0) */ + atomic_set(&folio->_nr_pages_mapped, nr); + } + + for (i = 0; i < nr; i++) { + /* increment count (starts at -1) */ + atomic_set(&page->_mapcount, 0); + __page_set_anon_rmap(folio, page, vma, address, 1); + page++; + address += PAGE_SIZE; + } + + __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); + +} + /** * page_add_file_rmap - add pte mapping to a file page * @page: the page to add the mapping to -- 2.25.1