The patch titled Subject: mm, workingset: make working set detection node-aware has been added to the -mm tree. Its filename is mm-workingset-make-working-set-detection-node-aware.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-workingset-make-working-set-detection-node-aware.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-workingset-make-working-set-detection-node-aware.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/SubmitChecklist when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Subject: mm, workingset: make working set detection node-aware Working set and refault detection is still zone-based, fix it. Link: http://lkml.kernel.org/r/1467970510-21195-16-git-send-email-mgorman@xxxxxxxxxxxxxxxxxxx Signed-off-by: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx> Acked-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Hillf Danton <hillf.zj@xxxxxxxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/mmzone.h | 6 ++--- include/linux/vmstat.h | 1 mm/vmstat.c | 20 ++---------------- mm/workingset.c | 43 ++++++++++++++++++--------------------- 4 files changed, 26 insertions(+), 44 deletions(-) diff -puN include/linux/mmzone.h~mm-workingset-make-working-set-detection-node-aware include/linux/mmzone.h --- a/include/linux/mmzone.h~mm-workingset-make-working-set-detection-node-aware +++ a/include/linux/mmzone.h @@ -145,9 +145,6 @@ enum zone_stat_item { NUMA_LOCAL, /* allocation from local node */ NUMA_OTHER, /* allocation from other node */ #endif - WORKINGSET_REFAULT, - WORKINGSET_ACTIVATE, - WORKINGSET_NODERECLAIM, NR_ANON_THPS, NR_SHMEM_THPS, NR_SHMEM_PMDMAPPED, @@ -164,6 +161,9 @@ enum node_stat_item { NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ NR_PAGES_SCANNED, /* pages scanned since last reclaim */ + WORKINGSET_REFAULT, + WORKINGSET_ACTIVATE, + WORKINGSET_NODERECLAIM, NR_VM_NODE_STAT_ITEMS }; diff -puN include/linux/vmstat.h~mm-workingset-make-working-set-detection-node-aware include/linux/vmstat.h --- a/include/linux/vmstat.h~mm-workingset-make-working-set-detection-node-aware +++ a/include/linux/vmstat.h @@ -227,7 +227,6 @@ void mod_node_page_state(struct pglist_d void inc_node_page_state(struct page *, enum node_stat_item); void dec_node_page_state(struct page *, enum node_stat_item); -extern void inc_zone_state(struct zone *, enum zone_stat_item); extern void inc_node_state(struct pglist_data *, enum node_stat_item); extern void __inc_zone_state(struct zone *, enum zone_stat_item); extern void __inc_node_state(struct pglist_data *, enum node_stat_item); diff -puN mm/vmstat.c~mm-workingset-make-working-set-detection-node-aware mm/vmstat.c --- a/mm/vmstat.c~mm-workingset-make-working-set-detection-node-aware +++ a/mm/vmstat.c @@ -446,11 +446,6 @@ void mod_zone_page_state(struct zone *zo } EXPORT_SYMBOL(mod_zone_page_state); -void inc_zone_state(struct zone *zone, enum zone_stat_item item) -{ - mod_zone_state(zone, item, 1, 1); -} - void inc_zone_page_state(struct page *page, enum zone_stat_item item) { mod_zone_state(page_zone(page), item, 1, 1); @@ -539,15 +534,6 @@ void mod_zone_page_state(struct zone *zo } EXPORT_SYMBOL(mod_zone_page_state); -void inc_zone_state(struct zone *zone, enum zone_stat_item item) -{ - unsigned long flags; - - local_irq_save(flags); - __inc_zone_state(zone, item); - local_irq_restore(flags); -} - void inc_zone_page_state(struct page *page, enum zone_stat_item item) { unsigned long flags; @@ -967,9 +953,6 @@ const char * const vmstat_text[] = { "numa_local", "numa_other", #endif - "workingset_refault", - "workingset_activate", - "workingset_nodereclaim", "nr_anon_transparent_hugepages", "nr_shmem_hugepages", "nr_shmem_pmdmapped", @@ -984,6 +967,9 @@ const char * const vmstat_text[] = { "nr_isolated_anon", "nr_isolated_file", "nr_pages_scanned", + "workingset_refault", + "workingset_activate", + "workingset_nodereclaim", /* enum writeback_stat_item counters */ "nr_dirty_threshold", diff -puN mm/workingset.c~mm-workingset-make-working-set-detection-node-aware mm/workingset.c --- a/mm/workingset.c~mm-workingset-make-working-set-detection-node-aware +++ a/mm/workingset.c @@ -16,7 +16,7 @@ /* * Double CLOCK lists * - * Per zone, two clock lists are maintained for file pages: the + * Per node, two clock lists are maintained for file pages: the * inactive and the active list. Freshly faulted pages start out at * the head of the inactive list and page reclaim scans pages from the * tail. Pages that are accessed multiple times on the inactive list @@ -141,11 +141,11 @@ * * Implementation * - * For each zone's file LRU lists, a counter for inactive evictions - * and activations is maintained (zone->inactive_age). + * For each node's file LRU lists, a counter for inactive evictions + * and activations is maintained (node->inactive_age). * * On eviction, a snapshot of this counter (along with some bits to - * identify the zone) is stored in the now empty page cache radix tree + * identify the node) is stored in the now empty page cache radix tree * slot of the evicted page. This is called a shadow entry. * * On cache misses for which there are shadow entries, an eligible @@ -153,7 +153,7 @@ */ #define EVICTION_SHIFT (RADIX_TREE_EXCEPTIONAL_ENTRY + \ - ZONES_SHIFT + NODES_SHIFT + \ + NODES_SHIFT + \ MEM_CGROUP_ID_SHIFT) #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) @@ -167,33 +167,30 @@ */ static unsigned int bucket_order __read_mostly; -static void *pack_shadow(int memcgid, struct zone *zone, unsigned long eviction) +static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction) { eviction >>= bucket_order; eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; - eviction = (eviction << NODES_SHIFT) | zone_to_nid(zone); - eviction = (eviction << ZONES_SHIFT) | zone_idx(zone); + eviction = (eviction << NODES_SHIFT) | pgdat->node_id; eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT); return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY); } -static void unpack_shadow(void *shadow, int *memcgidp, struct zone **zonep, +static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, unsigned long *evictionp) { unsigned long entry = (unsigned long)shadow; - int memcgid, nid, zid; + int memcgid, nid; entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT; - zid = entry & ((1UL << ZONES_SHIFT) - 1); - entry >>= ZONES_SHIFT; nid = entry & ((1UL << NODES_SHIFT) - 1); entry >>= NODES_SHIFT; memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); entry >>= MEM_CGROUP_ID_SHIFT; *memcgidp = memcgid; - *zonep = NODE_DATA(nid)->node_zones + zid; + *pgdat = NODE_DATA(nid); *evictionp = entry << bucket_order; } @@ -208,7 +205,7 @@ static void unpack_shadow(void *shadow, void *workingset_eviction(struct address_space *mapping, struct page *page) { struct mem_cgroup *memcg = page_memcg(page); - struct zone *zone = page_zone(page); + struct pglist_data *pgdat = page_pgdat(page); int memcgid = mem_cgroup_id(memcg); unsigned long eviction; struct lruvec *lruvec; @@ -218,9 +215,9 @@ void *workingset_eviction(struct address VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); - lruvec = mem_cgroup_lruvec(zone->zone_pgdat, memcg); + lruvec = mem_cgroup_lruvec(pgdat, memcg); eviction = atomic_long_inc_return(&lruvec->inactive_age); - return pack_shadow(memcgid, zone, eviction); + return pack_shadow(memcgid, pgdat, eviction); } /** @@ -228,7 +225,7 @@ void *workingset_eviction(struct address * @shadow: shadow entry of the evicted page * * Calculates and evaluates the refault distance of the previously - * evicted page in the context of the zone it was allocated in. + * evicted page in the context of the node it was allocated in. * * Returns %true if the page should be activated, %false otherwise. */ @@ -240,10 +237,10 @@ bool workingset_refault(void *shadow) unsigned long eviction; struct lruvec *lruvec; unsigned long refault; - struct zone *zone; + struct pglist_data *pgdat; int memcgid; - unpack_shadow(shadow, &memcgid, &zone, &eviction); + unpack_shadow(shadow, &memcgid, &pgdat, &eviction); rcu_read_lock(); /* @@ -267,7 +264,7 @@ bool workingset_refault(void *shadow) rcu_read_unlock(); return false; } - lruvec = mem_cgroup_lruvec(zone->zone_pgdat, memcg); + lruvec = mem_cgroup_lruvec(pgdat, memcg); refault = atomic_long_read(&lruvec->inactive_age); active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE); rcu_read_unlock(); @@ -290,10 +287,10 @@ bool workingset_refault(void *shadow) */ refault_distance = (refault - eviction) & EVICTION_MASK; - inc_zone_state(zone, WORKINGSET_REFAULT); + inc_node_state(pgdat, WORKINGSET_REFAULT); if (refault_distance <= active_file) { - inc_zone_state(zone, WORKINGSET_ACTIVATE); + inc_node_state(pgdat, WORKINGSET_ACTIVATE); return true; } return false; @@ -436,7 +433,7 @@ static enum lru_status shadow_lru_isolat } } BUG_ON(node->count); - inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM); + inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM); if (!__radix_tree_delete_node(&mapping->page_tree, node)) BUG(); _ Patches currently in -mm which might be from mgorman@xxxxxxxxxxxxxxxxxxx are mm-meminit-always-return-a-valid-node-from-early_pfn_to_nid.patch mm-meminit-ensure-node-is-online-before-checking-whether-pages-are-uninitialised.patch mm-meminit-remove-early_page_nid_uninitialised.patch mm-vmstat-add-infrastructure-for-per-node-vmstats.patch mm-vmscan-move-lru_lock-to-the-node.patch mm-vmscan-move-lru-lists-to-node.patch mm-mmzone-clarify-the-usage-of-zone-padding.patch mm-vmscan-begin-reclaiming-pages-on-a-per-node-basis.patch mm-vmscan-have-kswapd-only-scan-based-on-the-highest-requested-zone.patch mm-vmscan-make-kswapd-reclaim-in-terms-of-nodes.patch mm-vmscan-remove-balance-gap.patch mm-vmscan-simplify-the-logic-deciding-whether-kswapd-sleeps.patch mm-vmscan-by-default-have-direct-reclaim-only-shrink-once-per-node.patch mm-vmscan-remove-duplicate-logic-clearing-node-congestion-and-dirty-state.patch mm-vmscan-do-not-reclaim-from-kswapd-if-there-is-any-eligible-zone.patch mm-vmscan-make-shrink_node-decisions-more-node-centric.patch mm-memcg-move-memcg-limit-enforcement-from-zones-to-nodes.patch mm-workingset-make-working-set-detection-node-aware.patch mm-page_alloc-consider-dirtyable-memory-in-terms-of-nodes.patch mm-move-page-mapped-accounting-to-the-node.patch mm-rename-nr_anon_pages-to-nr_anon_mapped.patch mm-move-most-file-based-accounting-to-the-node.patch mm-move-vmscan-writes-and-file-write-accounting-to-the-node.patch mm-vmscan-only-wakeup-kswapd-once-per-node-for-the-requested-classzone.patch mm-page_alloc-wake-kswapd-based-on-the-highest-eligible-zone.patch mm-convert-zone_reclaim-to-node_reclaim.patch mm-vmscan-avoid-passing-in-classzone_idx-unnecessarily-to-shrink_node.patch mm-vmscan-avoid-passing-in-classzone_idx-unnecessarily-to-compaction_ready.patch mm-vmscan-avoid-passing-in-remaining-unnecessarily-to-prepare_kswapd_sleep.patch mm-vmscan-have-kswapd-reclaim-from-all-zones-if-reclaiming-and-buffer_heads_over_limit.patch mm-vmscan-add-classzone-information-to-tracepoints.patch mm-page_alloc-remove-fair-zone-allocation-policy.patch mm-page_alloc-cache-the-last-node-whose-dirty-limit-is-reached.patch mm-vmstat-replace-__count_zone_vm_events-with-a-zone-id-equivalent.patch mm-vmstat-account-per-zone-stalls-and-pages-skipped-during-reclaim.patch mm-vmstat-print-node-based-stats-in-zoneinfo-file.patch mm-vmstat-remove-zone-and-node-double-accounting-by-approximating-retries.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html