On Wed, Jun 17, 2009 at 2:59 AM, Lucas Meneghel Rodrigues<lmr@xxxxxxxxxx> wrote: > Adding iperf network performance test. Basically it tests > networking functionality, stability and performance of guest OSes. > This test is cross-platform -- i.e. it works on both Linux and > Windows VMs. > I have a question here. Why are we adding iperf in a way different than other tests ? We have client/tests/<different_tests> directory for each test which contains the python modules and the test tarball. Then why in case of iperf we are putting it under client/tests/kvm and modifying kvm.py instead of putting the testsuit as part of autotest(run_autotest is not enough?)? Even if we do not want to touch the existing iperf test in autotest we can use a separate name like kvm_iperf. Somehow I have a feeling that there was a discussion on the list for keeping tests under a particular directory. But still I feel that should be only for tests specific to KVM and not the guest. Is there any disadvantage of using the current approach of executing these testsuits ? > Signed-off-by: Alexey Eromenko <aeromenk@xxxxxxxxxx> > --- > client/tests/kvm/kvm.py | 1 + > client/tests/kvm/kvm_iperf.py | 105 +++++++++++++++++++++++++++++++++ > client/tests/kvm/kvm_tests.cfg.sample | 8 +++ > 3 files changed, 114 insertions(+), 0 deletions(-) > create mode 100644 client/tests/kvm/kvm_iperf.py > > diff --git a/client/tests/kvm/kvm.py b/client/tests/kvm/kvm.py > index 9428162..e1a6e27 100644 > --- a/client/tests/kvm/kvm.py > +++ b/client/tests/kvm/kvm.py > @@ -53,6 +53,7 @@ class kvm(test.test): > "autotest": test_routine("kvm_tests", "run_autotest"), > "kvm_install": test_routine("kvm_install", "run_kvm_install"), > "linux_s3": test_routine("kvm_tests", "run_linux_s3"), > + "iperf": test_routine("kvm_iperf", "run_iperf"), > } > > # Make it possible to import modules from the test's bindir > diff --git a/client/tests/kvm/kvm_iperf.py b/client/tests/kvm/kvm_iperf.py > new file mode 100644 > index 0000000..927c9e5 > --- /dev/null > +++ b/client/tests/kvm/kvm_iperf.py > @@ -0,0 +1,105 @@ > +import time, os, logging > +from autotest_lib.client.common_lib import utils, error > +import kvm_utils > + > +def run_iperf(test, params, env): > + """ > + Runs iperf on the guest system and brings back the result. > + > + @see: http://sourceforge.net/projects/iperf > + @param test: kvm test object > + @param params: Dictionary with test parameters > + @param env: Test environment > + """ > + vm = kvm_utils.env_get_vm(env, params.get("main_vm")) > + if not vm: > + message = "VM object not found in environment" > + logging.error(message) > + raise error.TestError, message > + if not vm.is_alive(): > + message = "VM seems to be dead; Test requires a living VM" > + logging.error(message) > + raise error.TestError(message) > + > + logging.info("Waiting for guest to be up...") > + > + session = kvm_utils.wait_for(vm.ssh_login, 240, 0, 2) > + if not session: > + message = "Could not log into guest" > + logging.error(message) > + raise error.TestFail, message > + > + logging.info("Logged in") > + > + # Checking for GuestOS-compatible iPerf binary existence on host. > + iperf_binary = params.get("iperf_binary", "misc/iperf") > + iperf_duration = params.get("iperf_duration", 5) > + iperf_parallel_threads = params.get("iperf_parallel_threads", 1) > + iperf_dest_ip = params.get("iperf_dest_ip", "10.0.2.2") > + iperf_binary = os.path.join(test.bindir, iperf_binary) > + if not os.path.exists(iperf_binary): > + message = "iPerf binary: %s was not found on host" % iperf_binary > + logging.error(message) > + raise error.TestError, message > + else: > + logging.info("iPerf binary: %s was found on host" % iperf_binary) > + > + # Starting HostOS-compatible iPerf Server on host > + logging.info('VM is up ... \n starting iPerf Server on host') > + kvm_utils.run_bg("iperf -s", timeout=5) > + > + # Detecting GuestOS > + if iperf_binary.__contains__("exe"): > + vm_type="win32" > + else: > + vm_type="linux32" > + > + # Copying GuestOS-compatible iPerf binary to guest. > + # Starting iPerf Client on guest, plus connect to host. > + if vm_type == "win32": > + win_dir = "/cygdrive/c/" > + logging.info('starting copying %s to Windows VM to %s' % (iperf_binary, > + win_dir)) > + if not vm.scp_to_remote(iperf_binary, win_dir): > + message = "Could not copy Win32 iPerf to guest" > + logging.error(message) > + raise error.TestError(message) > + logging.debug("Enabling file permissions of iPerf.exe on Windows VM...") > + session.sendline('cacls C:\iperf.exe /P Administrator:F') > + session.sendline('y') > + session.sendline('') > + time.sleep(2) > + session.sendline('') > + logging.info("starting iPerf client on Windows VM, connecting to host") > + session.sendline('C:\iperf -t %s -c %s -P %s' % (int(iperf_duration), > + iperf_dest_ip, > + int(iperf_parallel_threads))) > + else: > + logging.info('starting copying %s to Linux VM ' % iperf_binary) > + if not vm.scp_to_remote(iperf_binary, "/usr/local/bin"): > + message = "Could not copy Linux iPerf to guest" > + logging.error(message) > + raise error.TestError, message > + print "starting iPerf client on VM, connecting to host" > + session.sendline('iperf -t %s -c %s -P %s' % (int(iperf_duration), > + iperf_dest_ip, > + int(iperf_parallel_threads))) > + > + # Analyzing results > + iperf_result_match, iperf_result = session.read_up_to_prompt() > + logging.debug("iperf_result =", iperf_result) > + > + if iperf_result.__contains__(" 0.00 bits/sec"): > + msg = 'Guest returned 0.00 bits/sec during iperf test.' > + raise error.TestError(msg) > + elif iperf_result.__contains__("No route to host"): > + msg = 'SSH to guest returned: No route to host.' > + raise error.TestError(msg) > + elif iperf_result.__contains__("Access is denied"): > + msg = 'SSH to guest returned: Access is denied.' > + raise error.TestError(msg) > + elif not iperf_result.__contains__("bits/sec"): > + msg = 'SSH result unrecognizeable.' > + raise error.TestError(msg) > + > + session.close() > diff --git a/client/tests/kvm/kvm_tests.cfg.sample b/client/tests/kvm/kvm_tests.cfg.sample > index 2c0b321..931f748 100644 > --- a/client/tests/kvm/kvm_tests.cfg.sample > +++ b/client/tests/kvm/kvm_tests.cfg.sample > @@ -82,6 +82,10 @@ variants: > - linux_s3: install setup > type = linux_s3 > > + - iperf: install setup > + type = iperf > + extra_params += " -snapshot" > + > # NICs > variants: > - @rtl8139: > @@ -102,6 +106,8 @@ variants: > ssh_status_test_command = echo $? > username = root > password = 123456 > + iperf: > + iperf_binary = misc/iperf > > variants: > - Fedora: > @@ -292,6 +298,8 @@ variants: > password = 123456 > migrate: > migration_test_command = ver && vol > + iperf: > + iperf_binary = misc/iperf.exe > > variants: > - Win2000: > -- > 1.6.2.2 > > -- > To unsubscribe from this list: send the line "unsubscribe kvm" in > the body of a message to majordomo@xxxxxxxxxxxxxxx > More majordomo info at http://vger.kernel.org/majordomo-info.html > -- Sudhir Kumar -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html