On Thu, Nov 17, 2011 at 11:25:56AM +0100, Thomas Gleixner wrote: > On Thu, 17 Nov 2011, Yong Zhang wrote: > > > On Wed, Nov 16, 2011 at 06:02:42PM +0100, Thomas Gleixner wrote: > > > On Wed, 16 Nov 2011, Steven Rostedt wrote: > > > > On Wed, 2011-11-16 at 17:16 +0800, Yong Zhang wrote: > > > > > Looking at commit 4799401f [memcg: Fix race condition in > > > > > memcg_check_events() with this_cpu usage], we just want > > > > > to disable migration. So use the right API in -rt. This > > > > > will cure below warning. > > > > No this won't work. Not even for -rt. If we disable migration but not > > > > preemption, then two tasks can take this path. And the checks in > > > > __memcg_event_check() will be corrupted because nothing is protecting > > > > the updates from two tasks going into the same path. > > > > > > > > Perhaps a local_lock would work. > > > > > > Yes, that's the only sensible option for now. Untested patch below. > > > > Works for me. > > Johannes came up with a different solution. Could you please give it a try? Works too :) Thanks, Yong > > Thanks, > > tglx > > -------------> > Subject: [patch] mm: memcg: shorten preempt-disabled section around event checks > > Only the ratelimit checks themselves have to run with preemption > disabled, the resulting actions - checking for usage thresholds, > updating the soft limit tree - can and should run with preemption > enabled. > > Signed-off-by: Johannes Weiner <jweiner@xxxxxxxxxx> > --- > mm/memcontrol.c | 73 ++++++++++++++++++++++++++---------------------------- > 1 files changed, 35 insertions(+), 38 deletions(-) > > Thomas, HTH and it is probably interesting for upstream as well. > Unfortunately, I'm in the middle of moving right now, so this is > untested except for compiling. > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index 6aff93c..8e62d3e 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -683,37 +683,32 @@ static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, > return total; > } > > -static bool __memcg_event_check(struct mem_cgroup *memcg, int target) > +static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, > + enum mem_cgroup_events_target target) > { > unsigned long val, next; > > val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); > next = __this_cpu_read(memcg->stat->targets[target]); > /* from time_after() in jiffies.h */ > - return ((long)next - (long)val < 0); > -} > - > -static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target) > -{ > - unsigned long val, next; > - > - val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); > - > - switch (target) { > - case MEM_CGROUP_TARGET_THRESH: > - next = val + THRESHOLDS_EVENTS_TARGET; > - break; > - case MEM_CGROUP_TARGET_SOFTLIMIT: > - next = val + SOFTLIMIT_EVENTS_TARGET; > - break; > - case MEM_CGROUP_TARGET_NUMAINFO: > - next = val + NUMAINFO_EVENTS_TARGET; > - break; > - default: > - return; > + if ((long)next - (long)val < 0) { > + switch (target) { > + case MEM_CGROUP_TARGET_THRESH: > + next = val + THRESHOLDS_EVENTS_TARGET; > + break; > + case MEM_CGROUP_TARGET_SOFTLIMIT: > + next = val + SOFTLIMIT_EVENTS_TARGET; > + break; > + case MEM_CGROUP_TARGET_NUMAINFO: > + next = val + NUMAINFO_EVENTS_TARGET; > + break; > + default: > + break; > + } > + __this_cpu_write(memcg->stat->targets[target], next); > + return true; > } > - > - __this_cpu_write(memcg->stat->targets[target], next); > + return false; > } > > /* > @@ -724,25 +719,27 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) > { > preempt_disable(); > /* threshold event is triggered in finer grain than soft limit */ > - if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) { > + if (unlikely(mem_cgroup_event_ratelimit(memcg, > + MEM_CGROUP_TARGET_THRESH))) { > + bool do_softlimit, do_numainfo; > + > + do_softlimit = mem_cgroup_event_ratelimit(memcg, > + MEM_CGROUP_TARGET_SOFTLIMIT); > +#if MAX_NUMNODES > 1 > + do_numainfo = mem_cgroup_event_ratelimit(memcg, > + MEM_CGROUP_TARGET_NUMAINFO); > +#endif > + preempt_enable(); > + > mem_cgroup_threshold(memcg); > - __mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH); > - if (unlikely(__memcg_event_check(memcg, > - MEM_CGROUP_TARGET_SOFTLIMIT))) { > + if (unlikely(do_softlimit)) > mem_cgroup_update_tree(memcg, page); > - __mem_cgroup_target_update(memcg, > - MEM_CGROUP_TARGET_SOFTLIMIT); > - } > #if MAX_NUMNODES > 1 > - if (unlikely(__memcg_event_check(memcg, > - MEM_CGROUP_TARGET_NUMAINFO))) { > + if (unlikely(do_numainfo)) > atomic_inc(&memcg->numainfo_events); > - __mem_cgroup_target_update(memcg, > - MEM_CGROUP_TARGET_NUMAINFO); > - } > #endif > - } > - preempt_enable(); > + } else > + preempt_enable(); > } > > static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) > -- > 1.7.6.4 > > -- > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in > the body of a message to majordomo@xxxxxxxxxxxxxxx > More majordomo info at http://vger.kernel.org/majordomo-info.html > Please read the FAQ at http://www.tux.org/lkml/ -- Only stand for myself -- To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html