[Autotest][PATCH 4/4] virt: Allows multihost test using of fd migration and adds new tests

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



New tests:
  1) change downtime of migration after start of migration
  2) change migration speed after start of migration.
  3) cancel migration after start.

Signed-off-by: Jiří Župka <jzupka@xxxxxxxxxx>
---
 kvm/tests/migration_multi_host.py                  |   75 ++---------
 kvm/tests/migration_multi_host_cancel.py           |   91 ++++++++++++
 .../migration_multi_host_downtime_and_speed.py     |  145 ++++++++++++++++++++
 .../migration_multi_host_with_file_transfer.py     |    8 +-
 .../migration_multi_host_with_speed_measurement.py |   12 +-
 shared/cfg/subtests.cfg.sample                     |   77 +++++++----
 6 files changed, 309 insertions(+), 99 deletions(-)
 create mode 100644 kvm/tests/migration_multi_host_cancel.py
 create mode 100644 kvm/tests/migration_multi_host_downtime_and_speed.py

diff --git a/kvm/tests/migration_multi_host.py b/kvm/tests/migration_multi_host.py
index b8fe032..122ee9d 100644
--- a/kvm/tests/migration_multi_host.py
+++ b/kvm/tests/migration_multi_host.py
@@ -3,6 +3,7 @@ from autotest.client.shared import error
 from virttest import utils_test, remote, virt_vm, utils_misc
 
 
+@error.context_aware
 def run_migration_multi_host(test, params, env):
     """
     KVM multi-host migration test:
@@ -14,7 +15,12 @@ def run_migration_multi_host(test, params, env):
     @param params: Dictionary with test parameters.
     @param env: Dictionary with the test environment.
     """
-    class TestMultihostMigration(utils_test.MultihostMigration):
+    mig_protocol = params.get("mig_protocol", "tcp")
+    base_class = utils_test.MultihostMigration
+    if mig_protocol == "fd":
+        base_class = utils_test.MultihostMigrationFd
+
+    class TestMultihostMigration(base_class):
         def __init__(self, test, params, env):
             super(TestMultihostMigration, self).__init__(test, params, env)
             self.srchost = self.params.get("hosts")[0]
@@ -23,71 +29,12 @@ def run_migration_multi_host(test, params, env):
 
 
         def migration_scenario(self, worker=None):
+            error.context("Migration from %s to %s over protocol %s." %
+                          (self.srchost, self.dsthost, mig_protocol),
+                          logging.info)
             self.migrate_wait(self.vms, self.srchost, self.dsthost,
                               start_work=worker)
 
 
-    class TestMultihostMigrationCancel(TestMultihostMigration):
-        def __init__(self, test, params, env):
-            super(TestMultihostMigrationCancel, self).__init__(test, params,
-                                                               env)
-            self.install_path = params.get("cpuflags_install_path", "/tmp")
-            self.vm_mem = int(params.get("mem", "512"))
-            self.srchost = self.params.get("hosts")[0]
-            self.dsthost = self.params.get("hosts")[1]
-            self.vms = params.get("vms").split()
-            self.id = {'src': self.srchost,
-                       'dst': self.dsthost,
-                       "type": "cancel_migration"}
-
-        def check_guest(self):
-            broken_vms = []
-            for vm in self.vms:
-                try:
-                    vm = env.get_vm(vm)
-                    session = vm.wait_for_login(timeout=self.login_timeout)
-                    session.sendline("killall -9 cpuflags-test")
-                except (remote.LoginError, virt_vm.VMError):
-                    broken_vms.append(vm)
-            if broken_vms:
-                raise error.TestError("VMs %s should work on src"
-                                      " host after canceling of"
-                                      " migration." % (broken_vms))
-            # Try migration again without cancel.
-
-        def migration_scenario(self):
-            def worker(mig_data):
-                vm = mig_data.vms[0]
-                session = vm.wait_for_login(timeout=self.login_timeout)
-
-                utils_misc.install_cpuflags_util_on_vm(test, vm,
-                                                       self.install_path,
-                                                   extra_flags="-msse3 -msse2")
-
-                cmd = ("%s/cpuflags-test --stressmem %d %%" %
-                           (os.path.join(self.install_path, "test_cpu_flags"),
-                            self.vm_mem / 2))
-                logging.debug("Sending command: %s" % (cmd))
-                session.sendline(cmd)
-
-            super_cls = super(TestMultihostMigrationCancel, self)
-            super_cls.migration_scenario(worker)
-
-            if params.get("hostid") == self.master_id():
-                self.check_guest()
-
-            self._hosts_barrier(self.hosts, self.id,
-                                'wait_for_cancel', self.login_timeout)
-
-            params["cancel_delay"] = None
-            super(TestMultihostMigrationCancel, self).migration_scenario()
-
-
-    mig = None
-    cancel_delay = params.get("cancel_delay", None)
-    if cancel_delay is None:
-        mig = TestMultihostMigration(test, params, env)
-    else:
-        mig = TestMultihostMigrationCancel(test, params, env)
-
+    mig = TestMultihostMigration(test, params, env)
     mig.run()
diff --git a/kvm/tests/migration_multi_host_cancel.py b/kvm/tests/migration_multi_host_cancel.py
new file mode 100644
index 0000000..0b5c20a
--- /dev/null
+++ b/kvm/tests/migration_multi_host_cancel.py
@@ -0,0 +1,91 @@
+import logging, socket, time, errno, os, fcntl
+from virttest import utils_test, utils_misc, remote, virt_vm
+from autotest.client.shared import error
+from autotest.client.shared.syncdata import SyncData
+
+
+@error.context_aware
+def run_migration_multi_host_cancel(test, params, env):
+    """
+    KVM multi-host migration over fd test:
+
+    Migrate machine over socket's fd. Migration execution progress is
+    described in documentation for migrate method in class MultihostMigration.
+    This test allows migrate only one machine at once.
+
+    @param test: kvm test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    mig_protocol = params.get("mig_protocol", "tcp")
+    base_class = utils_test.MultihostMigration
+    if mig_protocol == "fd":
+        base_class = utils_test.MultihostMigrationFd
+
+
+    class TestMultihostMigrationCancel(base_class):
+        def __init__(self, test, params, env):
+            super(TestMultihostMigrationCancel, self).__init__(test, params,
+                                                               env)
+            self.install_path = params.get("cpuflags_install_path", "/tmp")
+            self.vm_mem = int(params.get("mem", "512"))
+            self.srchost = self.params.get("hosts")[0]
+            self.dsthost = self.params.get("hosts")[1]
+            self.vms = params.get("vms").split()
+            self.id = {'src': self.srchost,
+                       'dst': self.dsthost,
+                       "type": "cancel_migration"}
+
+        def check_guest(self):
+            broken_vms = []
+            for vm in self.vms:
+                try:
+                    vm = env.get_vm(vm)
+                    session = vm.wait_for_login(timeout=self.login_timeout)
+                    session.sendline("killall -9 cpuflags-test")
+                except (remote.LoginError, virt_vm.VMError):
+                    broken_vms.append(vm)
+            if broken_vms:
+                raise error.TestError("VMs %s should work on src"
+                                      " host after canceling of"
+                                      " migration." % (broken_vms))
+            # Try migration again without cancel.
+
+        def migration_scenario(self):
+            srchost = self.params.get("hosts")[0]
+            dsthost = self.params.get("hosts")[1]
+
+            def worker(mig_data):
+                vm = mig_data.vms[0]
+                session = vm.wait_for_login(timeout=self.login_timeout)
+
+                utils_misc.install_cpuflags_util_on_vm(test, vm,
+                                                       self.install_path,
+                                                   extra_flags="-msse3 -msse2")
+
+                cmd = ("%s/cpuflags-test --stressmem %d %%" %
+                           (os.path.join(self.install_path, "test_cpu_flags"),
+                            self.vm_mem / 2))
+                logging.debug("Sending command: %s" % (cmd))
+                session.sendline(cmd)
+
+            error.context("Migration from %s to %s over protocol %s with high"
+                          " cpu and memory load." %
+                          (self.srchost, self.dsthost, mig_protocol),
+                          logging.info)
+            self.migrate_wait(["vm1"], srchost, dsthost, worker)
+            if params.get("hostid") == self.master_id():
+                self.check_guest()
+
+            self._hosts_barrier(self.hosts, self.id,
+                                'wait_for_cancel', self.login_timeout)
+
+            params["cancel_delay"] = None
+            error.context("Finish migration from %s to %s over protocol %s." %
+                          (self.srchost, self.dsthost, mig_protocol),
+                          logging.info)
+            self.migrate_wait(["vm1"], srchost, dsthost)
+
+
+    mig = TestMultihostMigrationCancel(test, params, env)
+    mig.run()
diff --git a/kvm/tests/migration_multi_host_downtime_and_speed.py b/kvm/tests/migration_multi_host_downtime_and_speed.py
new file mode 100644
index 0000000..1bba04a
--- /dev/null
+++ b/kvm/tests/migration_multi_host_downtime_and_speed.py
@@ -0,0 +1,145 @@
+import logging, os
+from autotest.client.shared import error
+from virttest import utils_test, remote, virt_vm, utils_misc
+from autotest.client.shared import utils
+
+
+def run_migration_multi_host_downtime_and_speed(test, params, env):
+    """
+    KVM multi-host migration test:
+
+    Migration execution progress is described in documentation
+    for migrate method in class MultihostMigration.
+
+    @param test: kvm test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    class TestMultihostMigration(utils_test.MultihostMigration):
+        def __init__(self, test, params, env):
+            super(TestMultihostMigration, self).__init__(test, params, env)
+            self.install_path = params.get("cpuflags_install_path", "/tmp")
+            self.vm_mem = int(params.get("mem", "512"))
+            self.srchost = self.params.get("hosts")[0]
+            self.dsthost = self.params.get("hosts")[1]
+            self.vms = params.get("vms").split()
+
+            self.sub_type = self.params.get("sub_type", None)
+            self.max_downtime = int(self.params.get("max_mig_downtime", "10"))
+            self.min_speed = self.params.get("min_migration_speed", "10")
+            self.max_speed = self.params.get("max_migration_speed", "1000")
+            self.ch_speed = int(self.params.get("change_speed_interval", 1))
+            speed_count = float(self.params.get("count_of_change", 5))
+
+            self.min_speed = utils.convert_data_size(self.min_speed, "M")
+            self.max_speed = utils.convert_data_size(self.max_speed, "M")
+            self.speed_step = int((self.max_speed - self.min_speed) /
+                                                                  speed_count)
+
+            if self.sub_type == "downtime":
+                self.post_migration = self.post_migration_downtime
+            elif self.sub_type == "speed":
+                self.post_migration = self.post_migration_speed
+            else:
+                error.TestFail("Wrong subtest type selected %s" %
+                               (self.sub_type))
+
+
+        def mig_finished(self, vm):
+            ret = True
+            if (vm.params["display"] == "spice" and
+                vm.get_spice_var("spice_seamless_migration") == "on"):
+                s = vm.monitor.info("spice")
+                if isinstance(s, str):
+                    ret = "migrated: true" in s
+                else:
+                    ret = s.get("migrated") == "true"
+            o = vm.monitor.info("migrate")
+            if isinstance(o, str):
+                return ret and (not "status: active" in o)
+            else:
+                return ret and (o.get("status") != "active")
+
+
+        def wait_for_migration(self, vm, timeout):
+            if not utils_misc.wait_for(lambda: self.mig_finished(vm),
+                                       timeout,
+                                       2, 2,
+                                      "Waiting for migration to complete"):
+                raise virt_vm.VMMigrateTimeoutError("Timeout expired while"
+                                                    " waiting for migration"
+                                                    " to finish")
+
+
+        def post_migration_downtime(self, vm, cancel_delay, dsthost, vm_ports,
+                         not_wait_for_migration, fd):
+                downtime = 0
+                for downtime in range(1, self.max_downtime):
+                    try:
+                        self.wait_for_migration(vm, 10)
+                        break
+                    except virt_vm.VMMigrateTimeoutError:
+                        vm.monitor.migrate_set_downtime(downtime)
+                logging.debug("Migration pass with downtime %s" % (downtime))
+
+
+        def post_migration_speed(self, vm, cancel_delay, dsthost, vm_ports,
+                         not_wait_for_migration, fd):
+                self.min_speed
+                self.max_speed
+                self.ch_speed
+                mig_speed = None
+                for mig_speed in range(self.min_speed,
+                                      self.max_speed,
+                                      self.speed_step):
+                    try:
+                        self.wait_for_migration(vm, 5)
+                        break
+                    except virt_vm.VMMigrateTimeoutError:
+                        vm.monitor.migrate_set_speed("%sB" % (mig_speed))
+
+                # Test migration status. If migration is not completed then
+                # it kill program which creates guest load.
+                try:
+                    self.wait_for_migration(vm, 5)
+                except virt_vm.VMMigrateTimeoutError:
+                    try:
+                        session = vm.wait_for_login(timeout=15)
+                        session.sendline("killall -9 cpuflags-test")
+                    except remote.LoginTimeoutError:
+                        try:
+                            self.wait_for_migration(vm, 5)
+                        except virt_vm.VMMigrateTimeoutError:
+                            raise error.TestFail("Migration wan't successful"
+                                                 " and VM is not accessible.")
+                    self.wait_for_migration(vm, self.mig_timeout)
+                logging.debug("Migration pass with mig_speed %s" % (mig_speed))
+
+
+        def migrate_vms_src(self, mig_data):
+            super_cls = super(TestMultihostMigration, self)
+            super_cls.migrate_vms_src(mig_data)
+
+
+        def migration_scenario(self, worker=None):
+            def worker(mig_data):
+                vm = mig_data.vms[0]
+                session = vm.wait_for_login(timeout=self.login_timeout)
+
+                utils_misc.install_cpuflags_util_on_vm(test, vm,
+                                                       self.install_path,
+                                                   extra_flags="-msse3 -msse2")
+
+                cmd = ("%s/cpuflags-test --stressmem %d %% &" %
+                           (os.path.join(self.install_path, "test_cpu_flags"),
+                            self.vm_mem / 2))
+                logging.debug("Sending command: %s" % (cmd))
+                session.sendline(cmd)
+
+            self.migrate_wait(self.vms, self.srchost, self.dsthost,
+                              start_work=worker)
+
+
+    mig = TestMultihostMigration(test, params, env)
+
+    mig.run()
diff --git a/kvm/tests/migration_multi_host_with_file_transfer.py b/kvm/tests/migration_multi_host_with_file_transfer.py
index a316c75..a4c7433 100644
--- a/kvm/tests/migration_multi_host_with_file_transfer.py
+++ b/kvm/tests/migration_multi_host_with_file_transfer.py
@@ -45,6 +45,11 @@ def run_migration_multi_host_with_file_transfer(test, params, env):
         transfer_speed: File transfer speed limit.
         guest_path: Path where file is stored on guest.
     """
+    mig_protocol = params.get("mig_protocol", "tcp")
+    base_class = utils_test.MultihostMigration
+    if mig_protocol == "fd":
+        base_class = utils_test.MultihostMigrationFd
+
     guest_root = params.get("guest_root", "root")
     guest_pass = params.get("password", "123456")
 
@@ -66,7 +71,7 @@ def run_migration_multi_host_with_file_transfer(test, params, env):
     #Count of migration during file transfer.
     migrate_count = int(params.get("migrate_count", "3"))
 
-    class TestMultihostMigration(utils_test.MultihostMigration):
+    class TestMultihostMigration(base_class):
         def __init__(self, test, params, env):
             super(TestMultihostMigration, self).__init__(test, params, env)
             self.vm = None
@@ -86,6 +91,7 @@ def run_migration_multi_host_with_file_transfer(test, params, env):
             @param mig_data: object with migration data.
             """
             for vm in mig_data.vms:
+                vm.resume()
                 if not utils_test.guest_active(vm):
                     raise error.TestFail("Guest not active after migration")
 
diff --git a/kvm/tests/migration_multi_host_with_speed_measurement.py b/kvm/tests/migration_multi_host_with_speed_measurement.py
index d929095..033207f 100644
--- a/kvm/tests/migration_multi_host_with_speed_measurement.py
+++ b/kvm/tests/migration_multi_host_with_speed_measurement.py
@@ -25,6 +25,11 @@ def run_migration_multi_host_with_speed_measurement(test, params, env):
     @param params: Dictionary with test parameters.
     @param env: Dictionary with the test environment.
     """
+    mig_protocol = params.get("mig_protocol", "tcp")
+    base_class = utils_test.MultihostMigration
+    if mig_protocol == "fd":
+        base_class = utils_test.MultihostMigrationFd
+
     install_path = params.get("cpuflags_install_path", "/tmp")
 
     vm_mem = int(params.get("mem", "512"))
@@ -71,7 +76,7 @@ def run_migration_multi_host_with_speed_measurement(test, params, env):
 
         return mig_stat
 
-    class TestMultihostMigration(utils_test.MultihostMigration):
+    class TestMultihostMigration(base_class):
         def __init__(self, test, params, env):
             super(TestMultihostMigration, self).__init__(test, params, env)
             self.mig_stat = None
@@ -99,10 +104,9 @@ def run_migration_multi_host_with_speed_measurement(test, params, env):
             For change way how machine migrates is necessary
             re implement this method.
             """
+            super_cls = super(TestMultihostMigration, self)
+            super_cls.migrate_vms_src(mig_data)
             vm = mig_data.vms[0]
-            vm.migrate(dest_host=mig_data.dst,
-                       remote_port=mig_data.vm_ports[vm.name],
-                       not_wait_for_migration=True)
             self.mig_stat = get_migration_statistic(vm)
 
         def migration_scenario(self):
diff --git a/shared/cfg/subtests.cfg.sample b/shared/cfg/subtests.cfg.sample
index 2286690..a4cabaa 100644
--- a/shared/cfg/subtests.cfg.sample
+++ b/shared/cfg/subtests.cfg.sample
@@ -1210,15 +1210,9 @@ variants:
                 variants:
                     #Migration protocol.
                     -tcp:
-                        variants:
-                            - @default:
-                                type = migration_multi_host
-                            - measure_migration_speed:
-                                only Linux
-                                mig_speed = 1G
-                                type = migration_multi_host_with_speed_measurement
+                        mig_protocol = "tcp"
                     -fd:
-                        type = migration_multi_host_fd
+                        mig_protocol = "fd"
 
                 variants:
                     #Time when start migration
@@ -1235,30 +1229,53 @@ variants:
                                 start_migration_timeout = 6
 
                 variants:
-                    # Migration cancel
-                    - @no_cancel:
+                    # Migration properties
+                    - @default:
+                        type = migration_multi_host
                     - cancel_with_delay:
-                        no measure_migration_speed
+                        type = migration_multi_host_cancel
+                        only after_login_vm
                         cancel_delay = 10
-
-            - migration_multi_host_with_file_transfer: install setup image_copy unattended_install.cdrom
-                type = migration_multi_host_with_file_transfer
-                vms = "vm1"
-                start_vm = no
-                kill_vm_on_error = yes
-                used_mem = 1024
-                mig_timeout = 4800
-                disk_prepare_timeout = 360
-                comm_port = 13234
-                #path where file is stored on guest.
-                guest_path = "/tmp/file"
-                #size of generated file.
-                file_size = 500
-                transfer_timeout = 240
-                #Transfer speed in Mb
-                transfer_speed = 100
-                #Count of migration during file transfer.
-                migrate_count = 3
+                    - measure_speed:
+                        only Linux
+                        only after_login_vm
+                        not_wait_for_migration = yes
+                        mig_speed = 1G
+                        type = migration_multi_host_with_speed_measurement
+                    - with_file_transfer:
+                        only Linux
+                        only after_login_vm
+                        type = migration_multi_host_with_file_transfer
+                        comm_port = 13234
+                        #path where file is stored on guest.
+                        guest_path = "/tmp/file"
+                        #size of generated file in MB.
+                        file_size = 512
+                        transfer_timeout = 440
+                        #Transfer speed in Mb
+                        transfer_speed = 300
+                        #Count of migration during file transfer.
+                        migrate_count = 3
+                    - downtime:
+                        only after_login_vm
+                        sub_type = downtime
+                        # downtime in seconds.
+                        max_downtime = 10
+                        not_wait_for_migration = yes
+                        type = migration_multi_host_downtime_and_speed
+                    - speed:
+                        only after_login_vm
+                        sub_type = speed
+                        # speed in Mb
+                        min_migration_speed = 10M
+                        max_migration_speed = 500M
+                        # time interval in seconds.
+                        # set how fast is migration speed changed.
+                        change_speed_interval = 1
+                        # speed is changed x time per min max interval
+                        count_of_change = 10
+                        not_wait_for_migration = yes
+                        type = migration_multi_host_downtime_and_speed
 
             - cpuflags_multi_host:
                 type = cpuflags
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux