>From 528f5f2667da17c26e40d271b24691412e1cbe81 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx> Date: Thu, 15 Dec 2011 11:41:18 +0900 Subject: [PATCH 1/5] memcg: simplify account moving check Now, percpu variable MEM_CGROUP_ON_MOVE is used for indicating that a memcg is under move_account() and pc->mem_cgroup under it may be overwritten. But this value is almost read only and not worth to be percpu. Using atomic_t instread. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx> --- mm/memcontrol.c | 44 ++++++++------------------------------------ 1 files changed, 8 insertions(+), 36 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 996f8f0..d183e1b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -89,7 +89,6 @@ enum mem_cgroup_stat_index { MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ - MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ MEM_CGROUP_STAT_NSTATS, }; @@ -289,6 +288,10 @@ struct mem_cgroup { */ int kmem_independent_accounting; /* + * > 0 if a page in this cgroup is under moving account. + */ + atomic_t moving_account; + /* * percpu counter. */ struct mem_cgroup_stat_cpu *stat; @@ -1247,32 +1250,15 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg) static void mem_cgroup_start_move(struct mem_cgroup *memcg) { - int cpu; - - get_online_cpus(); - spin_lock(&memcg->pcp_counter_lock); - for_each_online_cpu(cpu) - per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; - memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; - spin_unlock(&memcg->pcp_counter_lock); - put_online_cpus(); - + atomic_inc(&memcg->moving_account); synchronize_rcu(); } static void mem_cgroup_end_move(struct mem_cgroup *memcg) { - int cpu; - if (!memcg) return; - get_online_cpus(); - spin_lock(&memcg->pcp_counter_lock); - for_each_online_cpu(cpu) - per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; - memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; - spin_unlock(&memcg->pcp_counter_lock); - put_online_cpus(); + atomic_dec(&memcg->moving_account); } /* * 2 routines for checking "mem" is under move_account() or not. @@ -1289,7 +1275,7 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg) static bool mem_cgroup_stealed(struct mem_cgroup *memcg) { VM_BUG_ON(!rcu_read_lock_held()); - return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0; + return atomic_read(&memcg->moving_account); } static bool mem_cgroup_under_move(struct mem_cgroup *memcg) @@ -2058,17 +2044,6 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) per_cpu(memcg->stat->events[i], cpu) = 0; memcg->nocpu_base.events[i] += x; } - /* need to clear ON_MOVE value, works as a kind of lock. */ - per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; - spin_unlock(&memcg->pcp_counter_lock); -} - -static void synchronize_mem_cgroup_on_move(struct mem_cgroup *memcg, int cpu) -{ - int idx = MEM_CGROUP_ON_MOVE; - - spin_lock(&memcg->pcp_counter_lock); - per_cpu(memcg->stat->count[idx], cpu) = memcg->nocpu_base.count[idx]; spin_unlock(&memcg->pcp_counter_lock); } @@ -2080,11 +2055,8 @@ static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, struct memcg_stock_pcp *stock; struct mem_cgroup *iter; - if ((action == CPU_ONLINE)) { - for_each_mem_cgroup(iter) - synchronize_mem_cgroup_on_move(iter, cpu); + if ((action == CPU_ONLINE)) return NOTIFY_OK; - } if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) return NOTIFY_OK; -- 1.7.4.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>