Add counter for page promotion for NUMA balancing. Signed-off-by: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx> --- include/linux/vm_event_item.h | 1 + mm/huge_memory.c | 4 ++++ mm/memory.c | 4 ++++ mm/vmstat.c | 1 + 4 files changed, 10 insertions(+) diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 499a3aa..9f52a62 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -51,6 +51,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, NUMA_HINT_FAULTS, NUMA_HINT_FAULTS_LOCAL, NUMA_PAGE_MIGRATE, + NUMA_PAGE_PROMOTE, #endif #ifdef CONFIG_MIGRATION PGMIGRATE_SUCCESS, PGMIGRATE_FAIL, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8268a3c..9d5f5ce 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1607,6 +1607,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, vmf->pmd, pmd, vmf->address, page, target_nid); if (migrated) { + if (!node_isset(page_nid, def_alloc_nodemask) && + node_isset(target_nid, def_alloc_nodemask)) + count_vm_numa_events(NUMA_PAGE_PROMOTE, HPAGE_PMD_NR); + flags |= TNF_MIGRATED; page_nid = target_nid; } else diff --git a/mm/memory.c b/mm/memory.c index 2494c11..554191b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3691,6 +3691,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) /* Migrate to the requested node */ migrated = migrate_misplaced_page(page, vma, target_nid); if (migrated) { + if (!node_isset(page_nid, def_alloc_nodemask) && + node_isset(target_nid, def_alloc_nodemask)) + count_vm_numa_event(NUMA_PAGE_PROMOTE); + page_nid = target_nid; flags |= TNF_MIGRATED; } else diff --git a/mm/vmstat.c b/mm/vmstat.c index 0e863e7..4b44fc8 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1220,6 +1220,7 @@ int fragmentation_index(struct zone *zone, unsigned int order) "numa_hint_faults", "numa_hint_faults_local", "numa_pages_migrated", + "numa_pages_promoted", #endif #ifdef CONFIG_MIGRATION "pgmigrate_success", -- 1.8.3.1