On Mon, Aug 03, 2009 at 02:37:29PM +0300, Dor Laor wrote: > On 08/03/2009 12:19 PM, Yolkfull Chow wrote: >> On 06/30/2009 09:58 PM, Dor Laor wrote: >>> On 06/30/2009 02:11 PM, Yolkfull Chow wrote: >>>> Signed-off-by: Yolkfull Chow<yzhou@xxxxxxxxxx> >>>> --- >>>> client/tests/kvm/kvm.py | 1 + >>>> client/tests/kvm/kvm_tests.cfg.sample | 56 ++++++++++++++++++++ >>>> client/tests/kvm/kvm_tests.py | 93 +++++++++++++++++++++++++++++++++ >>>> client/tests/kvm/kvm_vm.py | 2 + >>>> 4 files changed, 152 insertions(+), 0 deletions(-) >>>> >>>> diff --git a/client/tests/kvm/kvm.py b/client/tests/kvm/kvm.py >>>> index 4c7bae4..4fbce5b 100644 >>>> --- a/client/tests/kvm/kvm.py >>>> +++ b/client/tests/kvm/kvm.py >>>> @@ -55,6 +55,7 @@ class kvm(test.test): >>>> "kvm_install": test_routine("kvm_install", "run_kvm_install"), >>>> "linux_s3": test_routine("kvm_tests", "run_linux_s3"), >>>> "stress_boot": test_routine("kvm_tests", "run_stress_boot"), >>>> + "pci_hotplug": test_routine("kvm_tests", "run_pci_hotplug"), >>> >>> Cool! It's very good since it tends to break. >>> >>> >>>> } >>>> >>>> # Make it possible to import modules from the test's bindir >>>> diff --git a/client/tests/kvm/kvm_tests.cfg.sample >>>> b/client/tests/kvm/kvm_tests.cfg.sample >>>> index 2f864de..50b5765 100644 >>>> --- a/client/tests/kvm/kvm_tests.cfg.sample >>>> +++ b/client/tests/kvm/kvm_tests.cfg.sample >>>> @@ -94,6 +94,52 @@ variants: >>>> max_vms = 5 >>>> alive_test_cmd = ps aux >>>> >>>> + >>>> + - nic_hotplug: >>>> + type = pci_hotplug >>>> + pci_type = nic >>>> + modprobe_acpiphp = yes >>>> + reference_cmd = lspci >>>> + find_pci_cmd = 'lspci | tail -n1' >>>> + pci_test_cmd = 'nslookup www.redhat.com' >>>> + variants: >>>> + - @nic_8139: >>>> + pci_model = rtl8139 >>>> + match_string = "8139" >>>> + - nic_virtio: >>>> + pci_model = virtio >>>> + match_string = "Virtio network device" >>>> + - nic_e1000: >>>> + pci_model = e1000 >>>> + match_string = "Gigabit Ethernet Controller" >>>> + >>>> + - block_hotplug: >>>> + type = pci_hotplug >>>> + pci_type = block >>>> + modprobe_acpiphp = yes >>>> + reference_cmd = lspci >>>> + find_pci_cmd = 'lspci | tail -n1' >>>> + images += " stg" >>>> + boot_drive_stg = no >>>> + image_name_stg = storage >>>> + image_size = 1G >>>> + force_create_image_stg = yes >>>> + pci_test_cmd = 'dir' >>>> + no Windows >>>> + variants: >>>> + - block_virtio: >>>> + pci_model = virtio >>>> + match_string = "Virtio block device" >>>> + - block_scsi: >>>> + pci_model = scsi >>>> + match_string = "SCSI storage controller" >>>> + variants: >>> >>> There is no need to test qcow2/raw here since it shouldn't matter. >>> You can test qcow2 only, it is enough. >> >> Hi Glauber, according to Dor's comments on this, I did some testing and >> got an interesting result for block_hotplug: >> 1) hotplug storage of raw + SCSI will always fail on Windows >> 2) hotplug storage of Raw + Virtio will always fail on Fedora >> 3) hotplug storage with image format Raw will also fail often on RHEL >> >> Does block_hotplug relate to image format? Would you give me any clue on >> this? > > It shouldn't matter. In case the test is sensitive for timeout, it might. > > Can you describe what's working and what's not on each combination? > As for scsi, it is not reliable so it might be scsi's fault. > > Can you provide the fdisk -l ouput on Fedora when it is not working? > From the test below, there is not time/event for letting the guest hook > up the new block device. > Maybe you need to do several retries in a loop or check a real event in > the guest (better one) I can make sure now there is a bug in block hotplug feature since segfault has been found in dmesg on my laptop Fedora. Also, both guest RHEL.5.3-i386 and Windows 2008 have crashed during running block_hotplug test. For example, for Windows 2008-32 guest, sometimes issue command 'systeminfo' during block_hotplug can crash the guest. I had added a loop wait for the PCI device hooked up, before this, I used sleep(some_seconds) wait for module installed. > >> Thanks in advance. >> >>> >>>> + - fmt_qcow2: >>>> + image_format_stg = qcow2 >>>> + - fmt_raw: >>>> + image_format_stg = raw >>>> + >>>> + >>>> # NICs >>>> variants: >>>> - @rtl8139: >>>> @@ -306,6 +352,12 @@ variants: >>>> migration_test_command = ver&& vol >>>> stress_boot: >>>> alive_test_cmd = systeminfo >>>> + nic_hotplug: >>>> + modprobe_acpiphp = no >>>> + reference_cmd = systeminfo >>>> + find_pci_cmd = ipconfig /all | find "Description" >>>> + nic_e1000: >>>> + match_string = "Intel(R) PRO/1000 MT Network Connection" >>>> >>>> variants: >>>> - Win2000: >>>> @@ -530,6 +582,10 @@ virtio|virtio_blk|e1000: >>>> only Fedora.9 openSUSE-11 Ubuntu-8.10-server >>>> >>>> >>>> +nic_hotplug.nic_virtio|block_hotplug: >>>> + no Windows >>>> + >>>> + >>>> variants: >>>> - @qcow2: >>>> image_format = qcow2 >>>> diff --git a/client/tests/kvm/kvm_tests.py >>>> b/client/tests/kvm/kvm_tests.py >>>> index 2d11fed..21280b9 100644 >>>> --- a/client/tests/kvm/kvm_tests.py >>>> +++ b/client/tests/kvm/kvm_tests.py >>>> @@ -585,3 +585,96 @@ def run_stress_boot(tests, params, env): >>>> for se in sessions: >>>> se.close() >>>> logging.info("Total number booted: %d" % (num -1)) >>>> + >>>> + >>>> +def run_pci_hotplug(test, params, env): >>>> + """ >>>> + Test pci devices' hotplug >>>> + 1) pci_add a deivce (nic or storage) >>>> + 2) Compare 'info pci' output >>>> + 3) Compare $reference_cmd output >>>> + 4) Verify whether pci_model is shown in $pci_find_cmd >>>> + 5) pci_del the device, verify whether could remove the pci device >>>> + >>>> + @param test: kvm test object >>>> + @param params: Dictionary with the test parameters >>>> + @param env: Dictionary with test environment. >>>> + """ >>>> + vm = kvm_utils.env_get_vm(env, params.get("main_vm")) >>>> + if not vm: >>>> + raise error.TestError("VM object not found in environment") >>>> + if not vm.is_alive(): >>>> + raise error.TestError("VM seems to be dead; Test requires a living >>>> VM") >>>> + >>>> + logging.info("Waiting for guest to be up...") >>>> + >>>> + session = kvm_utils.wait_for(vm.ssh_login, 240, 0, 2) >>>> + if not session: >>>> + raise error.TestFail("Could not log into guest") >>>> + >>>> + logging.info("Logged in") >>>> + >>>> + # modprobe the module that enable hotplug >>>> + if params.get("modprobe_acpiphp") == "yes": >>>> + if session.get_command_status("modprobe acpiphp"): >>>> + raise error.TestError("Modprobe module 'acpiphp' failed") >>>> + >>>> + # get reference output >>>> + s, info_pci_ref = vm.send_monitor_cmd("info pci") >>>> + >>>> + # compare the output of `reference_cmd` >>>> + ref_cmd = params.get("reference_cmd") >>>> + reference = session.get_command_output(ref_cmd) >>>> + >>>> + # implement pci hotplug >>>> + tested_model = params.get("pci_model") >>>> + logging.info("Testing hotplug pci device:%s" % tested_model) >>>> + >>>> + test_type = params.get("pci_type") >>>> + if test_type == "nic": >>>> + pci_add_cmd = "pci_add pci_addr=auto nic model=%s" % tested_model >>>> + s, add_output = vm.send_monitor_cmd(pci_add_cmd) >>> >>> You're basing the assumption that the VM was raised with -net tap or >>> -net user with the right vlan. >>> Actually it would be nice to hot add the host back end too (available >>> on newer kvm's). It's not a must. >>> >>> >>>> + >>>> + elif test_type == "block": >>>> + image_name = params.get("image_name_stg") >>>> + image_format = params.get("image_format_stg", "qcow2") >>>> + image_filename = "%s.%s" % (image_name, image_format) >>>> + image_dir = os.path.join(test.bindir, "images") >>>> + storage_name = os.path.join(image_dir, image_filename) >>>> + pci_add_cmd = "pci_add pci_addr=auto storage file=%s,if=%s" % \ >>>> + (storage_name, tested_model) >>>> + s, add_output = vm.send_monitor_cmd(pci_add_cmd) >>>> + >>>> + if not "OK domain" in add_output: >>>> + raise error.TestFail("Command failed: %s" % pci_add_cmd) >>>> + >>>> + # compare the output of 'info pci' >>>> + s, after_add = vm.send_monitor_cmd("info pci") >>>> + if after_add == info_pci_ref: >>>> + raise error.TestFail("No new pci device shown in 'info pci'") >>>> + >>>> + time.sleep(5) >>>> + >>>> + o = session.get_command_output(ref_cmd) >>>> + if reference == o: >>>> + raise error.TestFail("No new device shown in cmd: %s" % ref_cmd) >>>> + >>>> + cmd = params.get("find_pci_cmd") >>>> + output = session.get_command_output(cmd) >>>> + if not params.get("match_string") in output: >>>> + raise error.TestFail("Not found pci model: %s; Command is: %s" % ( \ >>>> + tested_model, cmd)) >>>> + >>>> + # del pci device >>>> + slot_id = "0" + add_output.split(",")[2].split()[1] >>>> + cmd = "pci_del pci_addr=%s" % slot_id >>>> + s, after_del = vm.send_monitor_cmd(cmd) >>>> + if after_del == after_add: >>>> + raise error.TestFail("Failed to hot remove pci device:%s; \ >>>> + Command: %s" % (tested_model, cmd)) >>>> + >>>> + # check whether VM's network& disk work fine >>>> + if session.get_command_status(params.get("pci_test_cmd")): >>>> + raise error.TestFail("VM's %s damaged after pci_hotplug" % test_type) >>> >>> Doesn't it need to fail after the hot delete? >>> >>>> + >>>> + session.close() >>>> diff --git a/client/tests/kvm/kvm_vm.py b/client/tests/kvm/kvm_vm.py >>>> index 503f636..95b55eb 100644 >>>> --- a/client/tests/kvm/kvm_vm.py >>>> +++ b/client/tests/kvm/kvm_vm.py >>>> @@ -239,6 +239,8 @@ class VM: >>>> >>>> for image_name in kvm_utils.get_sub_dict_names(params, "images"): >>>> image_params = kvm_utils.get_sub_dict(params, image_name) >>>> + if image_params.get("boot_drive") == "no": >>>> + continue >>>> qemu_cmd += " -drive file=%s" % get_image_filename(image_params, >>>> image_dir) >>>> if image_params.get("drive_format"): >>> >>> -- >>> To unsubscribe from this list: send the line "unsubscribe kvm" in >>> the body of a message to majordomo@xxxxxxxxxxxxxxx >>> More majordomo info at http://vger.kernel.org/majordomo-info.html >> >> > > -- > To unsubscribe from this list: send the line "unsubscribe kvm" in > the body of a message to majordomo@xxxxxxxxxxxxxxx > More majordomo info at http://vger.kernel.org/majordomo-info.html -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html