At present, the split counters in THP statistics no longer include
PTE-mapped mTHP. Therefore, this commit introduces per-order mTHP split
counters to monitor the frequency of mTHP splits. This will assist
developers in better analyzing and optimizing system performance.
/sys/kernel/mm/transparent_hugepage/hugepages-<size>/stats
split_page
split_page_failed
deferred_split_page
Signed-off-by: Lance Yang <ioworker0@xxxxxxxxx>
---
include/linux/huge_mm.h | 3 +++
mm/huge_memory.c | 14 ++++++++++++--
2 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 56c7ea73090b..7b9c6590e1f7 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -272,6 +272,9 @@ enum mthp_stat_item {
MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
MTHP_STAT_ANON_SWPOUT,
MTHP_STAT_ANON_SWPOUT_FALLBACK,
+ MTHP_STAT_SPLIT_PAGE,
+ MTHP_STAT_SPLIT_PAGE_FAILED,
+ MTHP_STAT_DEFERRED_SPLIT_PAGE,
__MTHP_STAT_COUNT
};
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 055df5aac7c3..52db888e47a6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -557,6 +557,9 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback,
MTHP_STAT_ANON_FAULT_FALLBACK);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge,
MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback,
MTHP_STAT_ANON_SWPOUT_FALLBACK);
+DEFINE_MTHP_STAT_ATTR(split_page, MTHP_STAT_SPLIT_PAGE);
+DEFINE_MTHP_STAT_ATTR(split_page_failed, MTHP_STAT_SPLIT_PAGE_FAILED);
+DEFINE_MTHP_STAT_ATTR(deferred_split_page,
MTHP_STAT_DEFERRED_SPLIT_PAGE);
static struct attribute *stats_attrs[] = {
&anon_fault_alloc_attr.attr,
@@ -564,6 +567,9 @@ static struct attribute *stats_attrs[] = {
&anon_fault_fallback_charge_attr.attr,
&anon_swpout_attr.attr,
&anon_swpout_fallback_attr.attr,
+ &split_page_attr.attr,
+ &split_page_failed_attr.attr,
+ &deferred_split_page_attr.attr,
NULL,
};
@@ -3083,7 +3089,7 @@ int split_huge_page_to_list_to_order(struct
page *page, struct list_head *list,
XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index,
new_order);
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
- bool is_thp = folio_test_pmd_mappable(folio);
+ int order = folio_order(folio);
int extra_pins, ret;
pgoff_t end;
bool is_hzp;
@@ -3262,8 +3268,10 @@ int split_huge_page_to_list_to_order(struct
page *page, struct list_head *list,
i_mmap_unlock_read(mapping);
out:
xas_destroy(&xas);
- if (is_thp)
+ if (order >= HPAGE_PMD_ORDER)
count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
+ count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT_PAGE :
+ MTHP_STAT_SPLIT_PAGE_FAILED);
return ret;
}
@@ -3327,6 +3335,8 @@ void deferred_split_folio(struct folio *folio)
if (list_empty(&folio->_deferred_list)) {
if (folio_test_pmd_mappable(folio))
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+ count_mthp_stat(folio_order(folio),
+ MTHP_STAT_DEFERRED_SPLIT_PAGE);
list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
ds_queue->split_queue_len++;
#ifdef CONFIG_MEMCG