On Mon, 18 Oct 2010, KOSAKI Motohiro wrote: > diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h > index 39c24eb..699cdea 100644 > --- a/include/linux/mmzone.h > +++ b/include/linux/mmzone.h > @@ -185,6 +185,7 @@ struct per_cpu_pageset { > #ifdef CONFIG_SMP > s8 stat_threshold; > s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; > + s8 vm_stat_drifted[NR_VM_ZONE_STAT_ITEMS]; > #endif > }; Significantly increases cache footprint for per_cpu_pagesets. > @@ -168,10 +175,14 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, > int delta) > { > struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); > - > s8 *p = pcp->vm_stat_diff + item; > long x; > > + if (unlikely(!vm_stat_drift_take(zone, item))) { > + zone_page_state_add(delta, zone, item); > + return; > + } > + > x = delta + *p; > > if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { > @@ -224,6 +235,11 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) > struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); > s8 *p = pcp->vm_stat_diff + item; > > + if (unlikely(!vm_stat_drift_take(zone, item))) { > + zone_page_state_add(1, zone, item); > + return; > + } > + > (*p)++; > > if (unlikely(*p > pcp->stat_threshold)) { > @@ -245,6 +261,11 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) > struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); > s8 *p = pcp->vm_stat_diff + item; > > + if (unlikely(!vm_stat_drift_take(zone, item))) { > + zone_page_state_add(-1, zone, item); > + return; > + } > + > (*p)--; > > if (unlikely(*p < - pcp->stat_threshold)) { Increased overhead for basic VM counter management. Instead of all of this why not simply set the stat_threshold to 0 for select cpus? -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxxx For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>