[RFC][PATCH 1/2] memcg: coalescing css_get() at charge

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



based on a clean up patch I sent.
==
From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx>

Coalessing multiple css_get() to a __css_get(count) as res_counter does.
This reduces memcg's cost, cache ping-pong very much.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx>
---
 mm/memcontrol.c |   35 ++++++++++++++++++++++++++++-------
 1 file changed, 28 insertions(+), 7 deletions(-)

Index: mmotm-2.6.34-May21/mm/memcontrol.c
===================================================================
--- mmotm-2.6.34-May21.orig/mm/memcontrol.c
+++ mmotm-2.6.34-May21/mm/memcontrol.c
@@ -1542,6 +1542,7 @@ static void drain_stock(struct memcg_sto
 		res_counter_uncharge(&old->res, stock->charge);
 		if (do_swap_account)
 			res_counter_uncharge(&old->memsw, stock->charge);
+		__css_put(&old->css, stock->charge/PAGE_SIZE);
 	}
 	stock->cached = NULL;
 	stock->charge = 0;
@@ -1570,6 +1571,7 @@ static void refill_stock(struct mem_cgro
 		stock->cached = mem;
 	}
 	stock->charge += val;
+	__css_get(&mem->css, val/PAGE_SIZE);
 	put_cpu_var(memcg_stock);
 }
 
@@ -1710,6 +1712,7 @@ static int __mem_cgroup_try_charge(struc
 	 * in system level. So, allow to go ahead dying process in addition to
 	 * MEMDIE process.
 	 */
+again:
 	if (unlikely(test_thread_flag(TIF_MEMDIE)
 		     || fatal_signal_pending(current)))
 		goto bypass;
@@ -1720,25 +1723,42 @@ static int __mem_cgroup_try_charge(struc
 	 * thread group leader migrates. It's possible that mm is not
 	 * set, if so charge the init_mm (happens for pagecache usage).
 	 */
+
+	rcu_read_lock();
 	if (*memcg) {
 		mem = *memcg;
-		css_get(&mem->css);
 	} else {
-		mem = try_get_mem_cgroup_from_mm(mm);
+		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
 		if (unlikely(!mem))
 			return 0;
 		*memcg = mem;
 	}
 
-	VM_BUG_ON(css_is_removed(&mem->css));
-	if (mem_cgroup_is_root(mem))
+	/* racy ? (but seems to never happen in usual */
+	if (unlikely(css_is_removed(&mem->css))) {
+		rcu_read_unlock();
+		mem = NULL;
+		goto bypass;
+	}
+
+	if (mem_cgroup_is_root(mem)) {
+		rcu_read_unlock();
 		goto done;
+	}
 
+	if (consume_stock(mem)) {
+		rcu_read_unlock();
+		goto done;
+	}
+	if (!css_tryget(&mem->css)) {
+		rcu_read_unlock();
+		goto again;
+	}
+	rcu_read_unlock();
+	/* Enter memory reclaim loop */
 	do {
 		bool oom_check;
 
-		if (consume_stock(mem))
-			goto done; /* don't need to fill stock */
 		/* If killed, bypass charge */
 		if (fatal_signal_pending(current))
 			goto bypass;
@@ -1756,7 +1776,8 @@ static int __mem_cgroup_try_charge(struc
 			break;
 		case CHARGE_RETRY: /* not in OOM situation but retry */
 			csize = PAGE_SIZE;
-			break;
+			css_put(&mem->css);
+			goto again;
 		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
 			goto nomem;
 		case CHARGE_NOMEM: /* OOM routine works */

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@xxxxxxxxxx  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@xxxxxxxxx";> email@xxxxxxxxx </a>


[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]