On Mon, 15 Feb 2010 12:57:30 +0200 "Kirill A. Shutemov" <kirill@xxxxxxxxxxxxx> wrote: > On Fri, Feb 12, 2010 at 11:09 AM, KAMEZAWA Hiroyuki > <kamezawa.hiroyu@xxxxxxxxxxxxxx> wrote: > > Memcg has 2 eventcountes which counts "the same" event. Just usages are > > different from each other. This patch tries to reduce event counter. > > > > Now logic uses "only increment, no reset" counter and masks for each > > checks. Softlimit chesk was done per 1000 evetns. So, the similar check > > can be done by !(new_counter & 0x3ff). Threshold check was done per 100 > > events. So, the similar check can be done by (!new_counter & 0x7f) > > > > ALL event checks are done right after EVENT percpu counter is updated. > > > > Changelog: 2010/02/12 > > - fixed to use "inc" rather than "dec" > > - modified to be more unified style of counter handling. > > - taking care of account-move. > > > > Cc: Kirill A. Shutemov <kirill@xxxxxxxxxxxxx> > > Cc: Balbir Singh <balbir@xxxxxxxxxxxxxxxxxx> > > Cc: Daisuke Nishimura <nishimura@xxxxxxxxxxxxxxxxx> > > Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx> > > --- > > mm/memcontrol.c | 86 ++++++++++++++++++++++++++------------------------------ > > 1 file changed, 41 insertions(+), 45 deletions(-) > > > > Index: mmotm-2.6.33-Feb10/mm/memcontrol.c > > =================================================================== > > --- mmotm-2.6.33-Feb10.orig/mm/memcontrol.c > > +++ mmotm-2.6.33-Feb10/mm/memcontrol.c > > @@ -63,8 +63,15 @@ static int really_do_swap_account __init > > #define do_swap_account (0) > > #endif > > > > -#define SOFTLIMIT_EVENTS_THRESH (1000) > > -#define THRESHOLDS_EVENTS_THRESH (100) > > +/* > > + * Per memcg event counter is incremented at every pagein/pageout. This counter > > + * is used for trigger some periodic events. This is straightforward and better > > + * than using jiffies etc. to handle periodic memcg event. > > + * > > + * These values will be used as !((event) & ((1 <<(thresh)) - 1)) > > + */ > > +#define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */ > > +#define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */ > > > > /* > > * Statistics for memory cgroup. > > @@ -79,10 +86,7 @@ enum mem_cgroup_stat_index { > > MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ > > MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ > > MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ > > - MEM_CGROUP_STAT_SOFTLIMIT, /* decrements on each page in/out. > > - used by soft limit implementation */ > > - MEM_CGROUP_STAT_THRESHOLDS, /* decrements on each page in/out. > > - used by threshold implementation */ > > + MEM_CGROUP_EVENTS, /* incremented at every pagein/pageout */ > > > > MEM_CGROUP_STAT_NSTATS, > > }; > > @@ -154,7 +158,6 @@ struct mem_cgroup_threshold_ary { > > struct mem_cgroup_threshold entries[0]; > > }; > > > > -static bool mem_cgroup_threshold_check(struct mem_cgroup *mem); > > static void mem_cgroup_threshold(struct mem_cgroup *mem); > > > > /* > > @@ -392,19 +395,6 @@ mem_cgroup_remove_exceeded(struct mem_cg > > spin_unlock(&mctz->lock); > > } > > > > -static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem) > > -{ > > - bool ret = false; > > - s64 val; > > - > > - val = this_cpu_read(mem->stat->count[MEM_CGROUP_STAT_SOFTLIMIT]); > > - if (unlikely(val < 0)) { > > - this_cpu_write(mem->stat->count[MEM_CGROUP_STAT_SOFTLIMIT], > > - SOFTLIMIT_EVENTS_THRESH); > > - ret = true; > > - } > > - return ret; > > -} > > > > static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) > > { > > @@ -542,8 +532,7 @@ static void mem_cgroup_charge_statistics > > __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]); > > else > > __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]); > > - __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_SOFTLIMIT]); > > - __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_THRESHOLDS]); > > + __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]); > > > > preempt_enable(); > > } > > @@ -563,6 +552,29 @@ static unsigned long mem_cgroup_get_loca > > return total; > > } > > > > +static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift) > > inline? > > > +{ > > + s64 val; > > + > > + val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]); > > + > > + return !(val & ((1 << event_mask_shift) - 1)); > > +} > > + > > +/* > > + * Check events in order. > > + * > > + */ > > +static void memcg_check_events(struct mem_cgroup *mem, struct page *page) > > Ditto. > I'd like to depend on compiler. Thanks, -Kame -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxxx For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>