> >> I would appreciate if we leave the rmap out here. > >> > >> Can't we handle that when actually freeing the folio? folio_test_anon() > >> is sticky until freed. > > > > To be clearer: we increment the counter when we set a folio anon, which > > should indeed only happen in folio_add_new_anon_rmap(). We'll have to > > ignore hugetlb here where we do it in hugetlb_add_new_anon_rmap(). > > > > Then, when we free an anon folio we decrement the counter. (hugetlb > > should clear the anon flag when an anon folio gets freed back to its > > allocator -- likely that is already done). > > > > Sorry that I am talking to myself: I'm wondering if we also have to > adjust the counter when splitting a large folio to multiple > smaller-but-still-large folios. Hi David, The conceptual code is shown below. Does this make more sense to you? we have a line "mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order));" @@ -3270,8 +3272,9 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, struct deferred_split *ds_queue = get_deferred_split_queue(folio); /* reset xarray order to new order after split */ XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order); - struct anon_vma *anon_vma = NULL; + bool is_anon = folio_test_anon(folio); struct address_space *mapping = NULL; + struct anon_vma *anon_vma = NULL; int order = folio_order(folio); int extra_pins, ret; pgoff_t end; @@ -3283,7 +3286,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, if (new_order >= folio_order(folio)) return -EINVAL; - if (folio_test_anon(folio)) { + if (is_anon) { /* order-1 is not supported for anonymous THP. */ if (new_order == 1) { VM_WARN_ONCE(1, "Cannot split to order-1 folio"); @@ -3323,7 +3326,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, if (folio_test_writeback(folio)) return -EBUSY; - if (folio_test_anon(folio)) { + if (is_anon) { /* * The caller does not necessarily hold an mmap_lock that would * prevent the anon_vma disappearing so we first we take a @@ -3437,6 +3440,10 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, } } + if (is_anon) { + mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); + mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order)); + } __split_huge_page(page, list, end, new_order); ret = 0; } else { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 408ef3d25cf5..c869d0601614 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1039,6 +1039,7 @@ __always_inline bool free_pages_prepare(struct page *page, bool skip_kasan_poison = should_skip_kasan_poison(page); bool init = want_init_on_free(); bool compound = PageCompound(page); + bool anon = PageAnon(page); VM_BUG_ON_PAGE(PageTail(page), page); @@ -1130,6 +1131,9 @@ __always_inline bool free_pages_prepare(struct page *page, debug_pagealloc_unmap_pages(page, 1 << order); + if (anon && compound) + mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); + return true; } diff --git a/mm/rmap.c b/mm/rmap.c index 8d432051e970..982862cbf5ba 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1467,6 +1467,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, } __folio_mod_stat(folio, nr, nr_pmdmapped); + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); } static __always_inline void __folio_add_file_rmap(struct folio *folio,