memcg_stock are currently initialized during the root cgroup allocation which is OK but it pointlessly pollutes memcg allocation code with something that can be called when the memcg subsystem is initialized by mem_cgroup_init along with other controller specific parts. This patch wrappes the current memcg_stock initialization code into a helper calls it from the controller subsystem initialization code. Signed-off-by: Michal Hocko <mhocko@xxxxxxx> --- mm/memcontrol.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b0d3339..e9c1690 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2362,6 +2362,17 @@ static void drain_local_stock(struct work_struct *dummy) clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); } +static void __init memcg_stock_init(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct memcg_stock_pcp *stock = + &per_cpu(memcg_stock, cpu); + INIT_WORK(&stock->work, drain_local_stock); + } +} + /* * Cache charges(val) which is from res_counter, to local per_cpu area. * This will be consumed by consume_stock() function, later. @@ -6268,15 +6279,7 @@ mem_cgroup_css_alloc(struct cgroup *cont) /* root ? */ if (cont->parent == NULL) { - int cpu; - root_mem_cgroup = memcg; - for_each_possible_cpu(cpu) { - struct memcg_stock_pcp *stock = - &per_cpu(memcg_stock, cpu); - INIT_WORK(&stock->work, drain_local_stock); - } - res_counter_init(&memcg->res, NULL); res_counter_init(&memcg->memsw, NULL); res_counter_init(&memcg->kmem, NULL); @@ -7014,6 +7017,7 @@ static int __init mem_cgroup_init(void) hotcpu_notifier(memcg_cpu_hotplug_callback, 0); enable_swap_cgroup(); mem_cgroup_soft_limit_tree_init(); + memcg_stock_init(); return 0; } subsys_initcall(mem_cgroup_init); -- 1.7.10.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>