Hi Baolin, Thanks for taking time to review! On Mon, Jul 1, 2024 at 10:23 AM Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> wrote: > > > > On 2024/7/1 08:02, Barry Song wrote: > > On Sat, Jun 29, 2024 at 1:09 AM Lance Yang <ioworker0@xxxxxxxxx> wrote: > >> > >> Currently, the split counters in THP statistics no longer include > >> PTE-mapped mTHP. Therefore, we propose introducing per-order mTHP split > >> counters to monitor the frequency of mTHP splits. This will help developers > >> better analyze and optimize system performance. > >> > >> /sys/kernel/mm/transparent_hugepage/hugepages-<size>/stats > >> split > >> split_failed > >> split_deferred > >> > >> Signed-off-by: Mingzhe Yang <mingzhe.yang@xxxxxx> > >> Signed-off-by: Lance Yang <ioworker0@xxxxxxxxx> > > > > Personally, I'm not convinced that using a temporary variable order > > makes the code > > more readable. Functions like folio_test_pmd_mappable() seem more readable to > > me. In any case, it's a minor issue. > > Yes, I have the same opinion as Barry. With that: > Reviewed-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> Thanks again for your opinion! Lance > > > > Acked-by: Barry Song <baohua@xxxxxxxxxx> > > > >> --- > >> include/linux/huge_mm.h | 3 +++ > >> mm/huge_memory.c | 19 ++++++++++++++----- > >> 2 files changed, 17 insertions(+), 5 deletions(-) > >> > >> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h > >> index 212cca384d7e..cee3c5da8f0e 100644 > >> --- a/include/linux/huge_mm.h > >> +++ b/include/linux/huge_mm.h > >> @@ -284,6 +284,9 @@ enum mthp_stat_item { > >> MTHP_STAT_FILE_ALLOC, > >> MTHP_STAT_FILE_FALLBACK, > >> MTHP_STAT_FILE_FALLBACK_CHARGE, > >> + MTHP_STAT_SPLIT, > >> + MTHP_STAT_SPLIT_FAILED, > >> + MTHP_STAT_SPLIT_DEFERRED, > >> __MTHP_STAT_COUNT > >> }; > >> > >> diff --git a/mm/huge_memory.c b/mm/huge_memory.c > >> index c7ce28f6b7f3..a633206375af 100644 > >> --- a/mm/huge_memory.c > >> +++ b/mm/huge_memory.c > >> @@ -559,6 +559,9 @@ DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); > >> DEFINE_MTHP_STAT_ATTR(file_alloc, MTHP_STAT_FILE_ALLOC); > >> DEFINE_MTHP_STAT_ATTR(file_fallback, MTHP_STAT_FILE_FALLBACK); > >> DEFINE_MTHP_STAT_ATTR(file_fallback_charge, MTHP_STAT_FILE_FALLBACK_CHARGE); > >> +DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); > >> +DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); > >> +DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); > >> > >> static struct attribute *stats_attrs[] = { > >> &anon_fault_alloc_attr.attr, > >> @@ -569,6 +572,9 @@ static struct attribute *stats_attrs[] = { > >> &file_alloc_attr.attr, > >> &file_fallback_attr.attr, > >> &file_fallback_charge_attr.attr, > >> + &split_attr.attr, > >> + &split_failed_attr.attr, > >> + &split_deferred_attr.attr, > >> NULL, > >> }; > >> > >> @@ -3068,7 +3074,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, > >> XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order); > >> struct anon_vma *anon_vma = NULL; > >> struct address_space *mapping = NULL; > >> - bool is_thp = folio_test_pmd_mappable(folio); > >> + int order = folio_order(folio); > >> int extra_pins, ret; > >> pgoff_t end; > >> bool is_hzp; > >> @@ -3076,7 +3082,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, > >> VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); > >> VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); > >> > >> - if (new_order >= folio_order(folio)) > >> + if (new_order >= order) > >> return -EINVAL; > >> > >> if (folio_test_anon(folio)) { > >> @@ -3253,8 +3259,9 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, > >> i_mmap_unlock_read(mapping); > >> out: > >> xas_destroy(&xas); > >> - if (is_thp) > >> + if (order >= HPAGE_PMD_ORDER) > >> count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); > >> + count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED); > >> return ret; > >> } > >> > >> @@ -3278,13 +3285,14 @@ void deferred_split_folio(struct folio *folio) > >> #ifdef CONFIG_MEMCG > >> struct mem_cgroup *memcg = folio_memcg(folio); > >> #endif > >> + int order = folio_order(folio); > >> unsigned long flags; > >> > >> /* > >> * Order 1 folios have no space for a deferred list, but we also > >> * won't waste much memory by not adding them to the deferred list. > >> */ > >> - if (folio_order(folio) <= 1) > >> + if (order <= 1) > >> return; > >> > >> /* > >> @@ -3305,8 +3313,9 @@ void deferred_split_folio(struct folio *folio) > >> > >> spin_lock_irqsave(&ds_queue->split_queue_lock, flags); > >> if (list_empty(&folio->_deferred_list)) { > >> - if (folio_test_pmd_mappable(folio)) > >> + if (order >= HPAGE_PMD_ORDER) > >> count_vm_event(THP_DEFERRED_SPLIT_PAGE); > >> + count_mthp_stat(order, MTHP_STAT_SPLIT_DEFERRED); > >> list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); > >> ds_queue->split_queue_len++; > >> #ifdef CONFIG_MEMCG > >> -- > >> 2.45.2 > >>