The patch titled Subject: mm, page_alloc: inline zone_statistics has been added to the -mm tree. Its filename is mm-page_alloc-inline-zone_statistics.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-page_alloc-inline-zone_statistics.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-page_alloc-inline-zone_statistics.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/SubmitChecklist when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Subject: mm, page_alloc: inline zone_statistics zone_statistics has one call-site but it's a public function. Make it static and inline. The performance difference on a page allocator microbenchmark is; 4.6.0-rc2 4.6.0-rc2 statbranch-v1r20 statinline-v1r20 Min alloc-odr0-1 419.00 ( 0.00%) 412.00 ( 1.67%) Min alloc-odr0-2 305.00 ( 0.00%) 301.00 ( 1.31%) Min alloc-odr0-4 250.00 ( 0.00%) 247.00 ( 1.20%) Min alloc-odr0-8 219.00 ( 0.00%) 215.00 ( 1.83%) Min alloc-odr0-16 203.00 ( 0.00%) 199.00 ( 1.97%) Min alloc-odr0-32 195.00 ( 0.00%) 191.00 ( 2.05%) Min alloc-odr0-64 191.00 ( 0.00%) 187.00 ( 2.09%) Min alloc-odr0-128 189.00 ( 0.00%) 185.00 ( 2.12%) Min alloc-odr0-256 198.00 ( 0.00%) 193.00 ( 2.53%) Min alloc-odr0-512 210.00 ( 0.00%) 207.00 ( 1.43%) Min alloc-odr0-1024 216.00 ( 0.00%) 213.00 ( 1.39%) Min alloc-odr0-2048 221.00 ( 0.00%) 220.00 ( 0.45%) Min alloc-odr0-4096 227.00 ( 0.00%) 226.00 ( 0.44%) Min alloc-odr0-8192 232.00 ( 0.00%) 229.00 ( 1.29%) Min alloc-odr0-16384 232.00 ( 0.00%) 229.00 ( 1.29%) Signed-off-by: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Jesper Dangaard Brouer <brouer@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/vmstat.h | 2 -- mm/page_alloc.c | 31 +++++++++++++++++++++++++++++++ mm/vmstat.c | 29 ----------------------------- 3 files changed, 31 insertions(+), 31 deletions(-) diff -puN include/linux/vmstat.h~mm-page_alloc-inline-zone_statistics include/linux/vmstat.h --- a/include/linux/vmstat.h~mm-page_alloc-inline-zone_statistics +++ a/include/linux/vmstat.h @@ -163,12 +163,10 @@ static inline unsigned long zone_page_st #ifdef CONFIG_NUMA extern unsigned long node_page_state(int node, enum zone_stat_item item); -extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); #else #define node_page_state(node, item) global_page_state(item) -#define zone_statistics(_zl, _z, gfp) do { } while (0) #endif /* CONFIG_NUMA */ diff -puN mm/page_alloc.c~mm-page_alloc-inline-zone_statistics mm/page_alloc.c --- a/mm/page_alloc.c~mm-page_alloc-inline-zone_statistics +++ a/mm/page_alloc.c @@ -2382,6 +2382,37 @@ alloc_pages_zone(struct zone *zone, unsi } /* + * Update NUMA hit/miss statistics + * + * Must be called with interrupts disabled. + * + * When __GFP_OTHER_NODE is set assume the node of the preferred + * zone is the local node. This is useful for daemons who allocate + * memory on behalf of other processes. + */ +static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, + gfp_t flags) +{ +#ifdef CONFIG_NUMA + int local_nid = numa_node_id(); + enum zone_stat_item local_stat = NUMA_LOCAL; + + if (unlikely(flags & __GFP_OTHER_NODE)) { + local_stat = NUMA_OTHER; + local_nid = preferred_zone->node; + } + + if (z->node == local_nid) { + __inc_zone_state(z, NUMA_HIT); + __inc_zone_state(z, local_stat); + } else { + __inc_zone_state(z, NUMA_MISS); + __inc_zone_state(preferred_zone, NUMA_FOREIGN); + } +#endif +} + +/* * Allocate a page from the given zone. Use pcplists for order-0 allocations. */ static inline diff -puN mm/vmstat.c~mm-page_alloc-inline-zone_statistics mm/vmstat.c --- a/mm/vmstat.c~mm-page_alloc-inline-zone_statistics +++ a/mm/vmstat.c @@ -570,35 +570,6 @@ void drain_zonestat(struct zone *zone, s #ifdef CONFIG_NUMA /* - * zonelist = the list of zones passed to the allocator - * z = the zone from which the allocation occurred. - * - * Must be called with interrupts disabled. - * - * When __GFP_OTHER_NODE is set assume the node of the preferred - * zone is the local node. This is useful for daemons who allocate - * memory on behalf of other processes. - */ -void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags) -{ - int local_nid = numa_node_id(); - enum zone_stat_item local_stat = NUMA_LOCAL; - - if (unlikely(flags & __GFP_OTHER_NODE)) { - local_stat = NUMA_OTHER; - local_nid = preferred_zone->node; - } - - if (z->node == local_nid) { - __inc_zone_state(z, NUMA_HIT); - __inc_zone_state(z, local_stat); - } else { - __inc_zone_state(z, NUMA_MISS); - __inc_zone_state(preferred_zone, NUMA_FOREIGN); - } -} - -/* * Determine the per node value of a stat item. */ unsigned long node_page_state(int node, enum zone_stat_item item) _ Patches currently in -mm which might be from mgorman@xxxxxxxxxxxxxxxxxxx are mm-page_alloc-only-check-pagecompound-for-high-order-pages.patch mm-page_alloc-use-new-pageanonhead-helper-in-the-free-page-fast-path.patch mm-page_alloc-reduce-branches-in-zone_statistics.patch mm-page_alloc-inline-zone_statistics.patch mm-page_alloc-inline-the-fast-path-of-the-zonelist-iterator.patch mm-page_alloc-use-__dec_zone_state-for-order-0-page-allocation.patch mm-page_alloc-avoid-unnecessary-zone-lookups-during-pageblock-operations.patch mm-page_alloc-convert-alloc_flags-to-unsigned.patch mm-page_alloc-convert-nr_fair_skipped-to-bool.patch mm-page_alloc-remove-unnecessary-local-variable-in-get_page_from_freelist.patch mm-page_alloc-remove-unnecessary-initialisation-in-get_page_from_freelist.patch mm-page_alloc-remove-redundant-check-for-empty-zonelist.patch mm-page_alloc-simplify-last-cpupid-reset.patch mm-page_alloc-move-might_sleep_if-check-to-the-allocator-slowpath.patch mm-page_alloc-move-__gfp_hardwall-modifications-out-of-the-fastpath.patch mm-page_alloc-check-once-if-a-zone-has-isolated-pageblocks.patch mm-page_alloc-shorten-the-page-allocator-fast-path.patch mm-page_alloc-reduce-cost-of-fair-zone-allocation-policy-retry.patch mm-page_alloc-shortcut-watermark-checks-for-order-0-pages.patch mm-page_alloc-avoid-looking-up-the-first-zone-in-a-zonelist-twice.patch mm-page_alloc-remove-field-from-alloc_context.patch mm-page_alloc-check-multiple-page-fields-with-a-single-branch.patch mm-page_alloc-remove-unnecessary-variable-from-free_pcppages_bulk.patch mm-page_alloc-inline-pageblock-lookup-in-page-free-fast-paths.patch mm-page_alloc-defer-debugging-checks-of-freed-pages-until-a-pcp-drain.patch mm-page_alloc-defer-debugging-checks-of-pages-allocated-from-the-pcp.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html