This is partially a preparation patch for more vmstat work but it also has the slight advantage that __count_zid_vm_events is cheaper to calculate than __count_zone_vm_events(). Signed-off-by: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> --- include/linux/vmstat.h | 5 ++--- mm/page_alloc.c | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index ea00884ac8a0..810914b63564 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -101,9 +101,8 @@ static inline void vm_events_fold_cpu(int cpu) #define count_vm_vmacache_event(x) do {} while (0) #endif -#define __count_zone_vm_events(item, zone, delta) \ - __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ - zone_idx(zone), delta) +#define __count_zid_vm_events(item, zid, delta) \ + __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) /* * Zone and node-based page accounting with per cpu differentials. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a6e6184d3e38..ef04dc74e7e9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2399,7 +2399,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, get_pcppage_migratetype(page)); } - __count_zone_vm_events(PGALLOC, zone, 1 << order); + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); local_irq_restore(flags); -- 2.6.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>