On 1/31/2023 3:24 PM, Huang, Ying wrote: > Yin Fengwei <fengwei.yin@xxxxxxxxx> writes: > >> page_add_file_rmap_range() allows to add specific range of >> large folio rmap. >> >> Signed-off-by: Yin Fengwei <fengwei.yin@xxxxxxxxx> >> --- >> include/linux/rmap.h | 2 ++ >> mm/rmap.c | 70 ++++++++++++++++++++++++++++---------------- >> 2 files changed, 46 insertions(+), 26 deletions(-) >> >> diff --git a/include/linux/rmap.h b/include/linux/rmap.h >> index a6bd1f0a183d..063e0addcbf8 100644 >> --- a/include/linux/rmap.h >> +++ b/include/linux/rmap.h >> @@ -198,6 +198,8 @@ void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, >> unsigned long address); >> void page_add_file_rmap(struct page *, struct vm_area_struct *, >> bool compound); >> +void page_add_file_rmap_range(struct folio *, struct page *, int len, >> + struct vm_area_struct *, bool compound); >> void page_remove_rmap(struct page *, struct vm_area_struct *, >> bool compound); >> >> diff --git a/mm/rmap.c b/mm/rmap.c >> index 948ca17a96ad..cc7fe3010330 100644 >> --- a/mm/rmap.c >> +++ b/mm/rmap.c >> @@ -1301,40 +1301,19 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, >> __page_set_anon_rmap(folio, &folio->page, vma, address, 1); >> } >> >> -/** >> - * page_add_file_rmap - add pte mapping to a file page >> - * @page: the page to add the mapping to >> - * @vma: the vm area in which the mapping is added >> - * @compound: charge the page as compound or small page >> - * >> - * The caller needs to hold the pte lock. >> - */ >> -void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, >> - bool compound) >> +void page_add_file_rmap_range(struct folio *folio, struct page *page, int len, >> + struct vm_area_struct *vma, bool compound) >> { >> - struct folio *folio = page_folio(page); >> atomic_t *mapped = &folio->_nr_pages_mapped; >> - int nr = 0, nr_pmdmapped = 0; >> + int nr = 0, nr_pmdmapped = 0, nr_pages = folio_nr_pages(folio); >> bool first; >> >> - VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); >> - >> - /* Is page being mapped by PTE? Is this its first map to be added? */ >> - if (likely(!compound)) { >> - first = atomic_inc_and_test(&page->_mapcount); >> - nr = first; >> - if (first && folio_test_large(folio)) { >> - nr = atomic_inc_return_relaxed(mapped); >> - nr = (nr < COMPOUND_MAPPED); >> - } >> - } else if (folio_test_pmd_mappable(folio)) { >> - /* That test is redundant: it's for safety or to optimize out */ >> - >> + if (compound) { >> first = atomic_inc_and_test(&folio->_entire_mapcount); >> if (first) { >> nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); >> if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { >> - nr_pmdmapped = folio_nr_pages(folio); >> + nr_pmdmapped = nr_pages; >> nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); >> /* Raced ahead of a remove and another add? */ >> if (unlikely(nr < 0)) >> @@ -1344,6 +1323,20 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, >> nr = 0; >> } >> } >> + } else { >> + int i = 0, new_mapped = 0, count; >> + >> + count = min_t(int, len, nr_pages - folio_page_idx(folio, page)); > > It appears that count isn't used after assignment. Oops. This should be used in while (page++, ++i < count); Maybe this line can be removed if we are sure the range is not out of folio size? Thanks. Regards Yin, Fengwei > > Best Regards, > Huang, Ying > >> + do { >> + first = atomic_inc_and_test(&page->_mapcount); >> + new_mapped = first; >> + if (first && folio_test_large(folio)) { >> + new_mapped = atomic_inc_return_relaxed(mapped); >> + new_mapped = (new_mapped < COMPOUND_MAPPED); >> + } >> + if (new_mapped) >> + nr++; >> + } while (page++, ++i < len); >> } >> >> if (nr_pmdmapped) >> @@ -1355,6 +1348,31 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, >> mlock_vma_folio(folio, vma, compound); >> } >> >> +/** >> + * page_add_file_rmap - add pte mapping to a file page >> + * @page: the page to add the mapping to >> + * @vma: the vm area in which the mapping is added >> + * @compound: charge the page as compound or small page >> + * >> + * The caller needs to hold the pte lock. >> + */ >> +void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, >> + bool compound) >> +{ >> + struct folio *folio = page_folio(page); >> + int nr_pages; >> + >> + VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); >> + >> + if (likely(!compound)) >> + nr_pages = 1; >> + else >> + nr_pages = folio_nr_pages(folio); >> + >> + page_add_file_rmap_range(folio, page, nr_pages, vma, compound); >> +} >> + >> /** >> * page_remove_rmap - take down pte mapping from a page >> * @page: page to remove mapping from