Tests the cgroup cpuset (cpu affinity) functionality. It spawn no_cpus+1 cpu-intensive proces', limits the cpu affinity to cpu0 using cgroups and verifies. Than it do the same on all cpus except the cpu0. Signed-off-by: Lukas Doktor <ldoktor@xxxxxxxxxx> --- client/tests/cgroup/cgroup.py | 161 ++++++++++++++++++++++++++++++++++++++++- 1 files changed, 160 insertions(+), 1 deletions(-) diff --git a/client/tests/cgroup/cgroup.py b/client/tests/cgroup/cgroup.py index 45636f2..dc5a1b3 100755 --- a/client/tests/cgroup/cgroup.py +++ b/client/tests/cgroup/cgroup.py @@ -22,7 +22,7 @@ class cgroup(test.test): err = "" # Run available tests - for i in ['memory']: + for i in ['memory', 'cpuset']: logging.info("---< 'test_%s' START >---", i) try: if not self.modules.get_pwd(i): @@ -241,3 +241,162 @@ class cgroup(test.test): + def test_cpuset(self): + """ + Cpuset test + 1) Initiate CPU load on CPU0, than spread into CPU* - CPU0 + """ + class per_cpu_load: + """ + Handles the per_cpu_load stats + self.values [cpus, cpu0, cpu1, ...] + """ + def __init__(self): + """ + Init + """ + self.values = [] + self.f = open('/proc/stat', 'r') + line = self.f.readline() + while line: + if line.startswith('cpu'): + self.values.append(int(line.split()[1])) + else: + break + line = self.f.readline() + + def reload(self): + """ + Reload current values + """ + self.values = self.get() + + def get(self): + """ + Get the current values + @return vals: array of current values [cpus, cpu0, cpu1..] + """ + self.f.seek(0) + self.f.flush() + vals = [] + for i in range(len(self.values)): + vals.append(int(self.f.readline().split()[1])) + return vals + + def tick(self): + """ + Reload values and returns the load between the last tick/reload + @return vals: array of load between ticks/reloads + values [cpus, cpu0, cpu1..] + """ + vals = self.get() + ret = [] + for i in range(len(self.values)): + ret.append(vals[i] - self.values[i]) + self.values = vals + return ret + + def cleanup(supress=False): + # cleanup + logging.debug("test_cpuset: Cleanup") + err = "" + try: + for task in tasks: + for i in range(10): + task.terminate() + if task.poll() != None: + break + time.sleep(1) + if i >= 9: + logging.error("test_cpuset: Subprocess didn't finish") + except Exception, inst: + err += "\nCan't terminate tasks: %s" % inst + if item.rm_cgroup(pwd): + err += "\nCan't remove cgroup direcotry" + if err: + if supress: + logging.warn("Some parts of cleanup failed%s" % err) + else: + raise error.TestFail("Some parts of cleanup failed%s" % err) + + # Preparation + item = CG('cpuset', self._client) + if item.initialize(self.modules): + raise error.TestFail("cgroup init failed") + + # FIXME: new cpuset cgroup doesn't have any mems and cpus assigned + # thus smoke_test won't work + #if item.smoke_test(): + # raise error.TestFail("smoke_test failed") + + try: + # Available cpus: cpuset.cpus = "0-$CPUS\n" + no_cpus = int(item.get_prop("cpuset.cpus").split('-')[1]) + 1 + except: + raise error.TestFail("Failed to get no_cpus or no_cpus = 1") + + pwd = item.mk_cgroup() + if pwd == None: + raise error.TestFail("Can't create cgroup") + # FIXME: new cpuset cgroup doesn't have any mems and cpus assigned + try: + tmp = item.get_prop("cpuset.cpus") + item.set_property("cpuset.cpus", tmp, pwd) + tmp = item.get_prop("cpuset.mems") + item.set_property("cpuset.mems", tmp, pwd) + except: + cleanup(True) + raise error.TestFail("Failed to set cpus and mems of" + "a new cgroup") + + ################################################ + # Cpu allocation test + # Use cpu0 and verify, than all cpu* - cpu0 and verify + ################################################ + logging.debug("test_cpuset: Cpu allocation test") + + tasks = [] + # Run no_cpus + 1 jobs + for i in range(no_cpus + 1): + tasks.append(item.test("cpu")) + if item.set_cgroup(tasks[i].pid, pwd): + cleanup(True) + raise error.TestFail("Failed to set cgroup") + tasks[i].stdin.write('\n') + stats = per_cpu_load() + # Use only the first CPU + item.set_property("cpuset.cpus", 0, pwd) + stats.reload() + time.sleep(10) + # [0] = all cpus + s1 = stats.tick()[1:] + s2 = s1[1:] + s1 = s1[0] + for _s in s2: + if s1 < _s: + cleanup(True) + raise error.TestFail("Unused processor had higher utilization\n" + "used cpu: %s, remaining cpus: %s" + % (s1, s2)) + + if no_cpus == 2: + item.set_property("cpuset.cpus", "1", pwd) + else: + item.set_property("cpuset.cpus", "1-%d"%(no_cpus-1), pwd) + stats.reload() + time.sleep(10) + s1 = stats.tick()[1:] + s2 = s1[0] + s1 = s1[1:] + for _s in s1: + if s2 > _s: + cleanup(True) + raise error.TestFail("Unused processor had higher utilization\n" + "used cpus: %s, remaining cpu: %s" + % (s1, s2)) + logging.debug("test_cpuset: Cpu allocation test passed") + + ################################################ + # CLEANUP + ################################################ + cleanup() -- 1.7.6 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html