[PATCH 1/2] tests.cgroup: Add cpuset.mems test

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This test tests the memory pinning via cpuset.mems cgroup feature. It
changes cgroups with different node setting.

Signed-off-by: Lukas Doktor <ldoktor@xxxxxxxxxx>
---
 client/tests/kvm/tests/cgroup.py |   99 ++++++++++++++++++++++++++++++++++++++
 client/virt/subtests.cfg.sample  |    3 +
 2 files changed, 102 insertions(+), 0 deletions(-)

diff --git a/client/tests/kvm/tests/cgroup.py b/client/tests/kvm/tests/cgroup.py
index 5972ccb..8b11865 100644
--- a/client/tests/kvm/tests/cgroup.py
+++ b/client/tests/kvm/tests/cgroup.py
@@ -1293,6 +1293,105 @@ def run_cgroup(test, params, env):
             return ("VM survived %d cgroup switches" % i)
 
     @error.context_aware
+    def cpuset_mems_switching():
+        """
+        Tests the cpuset.mems pinning. It changes cgroups with different
+        mem nodes while stressing memory.
+        @param cfg: cgroup_test_time - test duration '60'
+        @param cfg: cgroup_cpuset_mems_mb - override the size of memory blocks
+                    'by default 1/2 of VM memory'
+        """
+        error.context("Init")
+        test_time = int(params.get('cgroup_test_time', 10))
+        vm = env.get_all_vms()[0]
+
+        error.context("Prepare")
+        modules = CgroupModules()
+        if (modules.init(['cpuset']) != 1):
+            raise error.TestFail("Can't mount cpuset cgroup modules")
+        cgroup = Cgroup('cpuset', '')
+        cgroup.initialize(modules)
+
+        mems = cgroup.get_property("cpuset.mems")[0]
+        mems = mems.split('-')
+        no_mems = len(mems)
+        if no_mems < 2:
+            raise error.TestError("This test needs at least 2 memory nodes, "
+                                  "detected %s" % mems)
+        # Create cgroups
+        all_cpus = cgroup.get_property("cpuset.cpus")[0]
+        mems = range(int(mems[0]), int(mems[1]) + 1)
+        for i in range(no_mems):
+            cgroup.mk_cgroup()
+            cgroup.set_property('cpuset.mems', mems[i], -1)
+            cgroup.set_property('cpuset.cpus', all_cpus, -1)
+            cgroup.set_property('cpuset.memory_migrate', 1)
+
+        timeout = int(params.get("login_timeout", 360))
+        sessions = []
+        sessions.append(vm.wait_for_login(timeout=timeout))
+        sessions.append(vm.wait_for_login(timeout=30))
+
+        # Don't allow to specify more than 1/2 of the VM's memory
+        size = int(params.get('mem', 1024)) / 2
+        if params.get('cgroup_cpuset_mems_mb') is not None:
+            size = min(size, int(params.get('cgroup_cpuset_mems_mb')))
+
+        error.context("Test")
+        err = ""
+        try:
+            logging.info("Some harmless IOError messages of non-existing "
+                         "processes might occur.")
+            sessions[0].sendline('dd if=/dev/zero of=/dev/null bs=%dM '
+                                 'iflag=fullblock' % size)
+
+            i = 0
+            sessions[1].cmd('killall -SIGUSR1 dd')
+            t_stop = time.time() + test_time
+            while time.time() < t_stop:
+                i += 1
+                assign_vm_into_cgroup(vm, cgroup, i % no_mems)
+            sessions[1].cmd('killall -SIGUSR1 dd; true')
+            try:
+                out = sessions[0].read_until_output_matches(
+                                                ['(\d+)\+\d records out'])[1]
+                if len(re.findall(r'(\d+)\+\d records out', out)) < 2:
+                    out += sessions[0].read_until_output_matches(
+                                                ['(\d+)\+\d records out'])[1]
+            except ExpectTimeoutError:
+                err = ("dd didn't produce expected output: %s" % out)
+
+            if not err:
+                sessions[1].cmd('killall dd; true')
+                dd_res = re.findall(r'(\d+)\+(\d+) records in', out)
+                dd_res += re.findall(r'(\d+)\+(\d+) records out', out)
+                dd_res = [int(_[0]) + int(_[1]) for _ in dd_res]
+                if dd_res[1] <= dd_res[0] or dd_res[3] <= dd_res[2]:
+                    err = ("dd stoped sending bytes: %s..%s, %s..%s" %
+                           (dd_res[0], dd_res[1], dd_res[2], dd_res[3]))
+            if err:
+                logging.error(err)
+            else:
+                out = ("Guest moved %stimes in %s seconds while moving %d "
+                       "blocks of %dMB each" % (i, test_time, dd_res[3], size))
+                logging.info(out)
+        finally:
+            error.context("Cleanup")
+            del(cgroup)
+            del(modules)
+
+            for session in sessions:
+                # try whether all sessions are clean
+                session.cmd("true")
+                session.close()
+
+        error.context("Results")
+        if err:
+            raise error.TestFail(err)
+        else:
+            return ("VM survived %d cgroup switches" % i)
+
+    @error.context_aware
     def devices_access():
         """
         Tests devices.list capability. It tries hot-adding disk with different
diff --git a/client/virt/subtests.cfg.sample b/client/virt/subtests.cfg.sample
index a5eac01..d96711d 100644
--- a/client/virt/subtests.cfg.sample
+++ b/client/virt/subtests.cfg.sample
@@ -1349,6 +1349,9 @@ variants:
             - cpuset_cpus_switching:
                 cgroup_test = "cpuset_cpus_switching"
                 # cgroup_test_time
+            - cpuset_mems_switching:
+                cgroup_test = "cpuset_mems"
+                # cgroup_test_time, cgroup_cpuset_mems_mb
             - devices_access:
                 cgroup_test = "devices_access"
             - freezer:
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux