Hello ppl,
as we promised there is a new version with modifications you wanted us
to do. It's complete package based on the newest GIT version.
[Changelog]
- new structure (tests_base.cfg, ...)
- improved log
- function get_stat() raise an error when access death VM
- get_stat() splitted into 2 functions, _get_stat() returns int,
get_stat returns log string
- use of session.close() instead of get_command_status('exit;')
- PID of VM is taken using -pidfile option (RFC: It would be nice to
have this in framework by default)
- possible infinite loop (i = i + 1)
- 32bit host supports 3.1GB guest, 64bit without limitation, detection
using image file_name
[Not changed]
- We skip the merge of serial and parallel init functions as the result
would be way more complicated (= more possible errors)
>From 3420916facae18f45617e3c25c365eaa59c0374c Mon Sep 17 00:00:00 2001
From: =?utf-8?q?Luk=C3=A1=C5=A1=20Doktor?= <medic@xxxxxxxxxxxxxxxx>
Date: Fri, 18 Dec 2009 15:56:31 +0100
Subject: [KSM-autotest] KSM overcommit v2 modification
[Changelog]
- new structure (tests_base.cfg, ...)
- improved log
- function get_stat() raise an error when access death VM
- get_stat() splitted into 2 functions, _get_stat() returns int, get_stat returns log string
- PID of VM is taken using -pidfile option (RFC: It would be nice to have this in framework by default)
- possible infinite loop (i = i + 1)
- 32bit host supports 3.1GB guest, 64bit without limitation, detection using image file_name
[Not changed]
- We skip the merge of serial and parallel init functions as the result would be way more complicated (= more possible errors)
---
client/tests/kvm/tests/ksm_overcommit.py | 616 ++++++++++++++++++++++++++++++
client/tests/kvm/tests_base.cfg.sample | 18 +
client/tests/kvm/unattended/allocator.py | 213 ++++++++++
3 files changed, 847 insertions(+), 0 deletions(-)
create mode 100644 client/tests/kvm/tests/ksm_overcommit.py
create mode 100644 client/tests/kvm/unattended/allocator.py
diff --git a/client/tests/kvm/tests/ksm_overcommit.py b/client/tests/kvm/tests/ksm_overcommit.py
new file mode 100644
index 0000000..a726e1c
--- /dev/null
+++ b/client/tests/kvm/tests/ksm_overcommit.py
@@ -0,0 +1,616 @@
+import logging, time
+from autotest_lib.client.common_lib import error
+import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_preprocessing
+import random, string, math, os
+
+def run_ksm_overcommit(test, params, env):
+ """
+ Test how KSM (Kernel Shared Memory) act with more than physical memory is
+ used. In second part is also tested, how KVM can handle the situation,
+ when the host runs out of memory (expected is to pause the guest system,
+ wait until some process returns the memory and bring the guest back to life)
+
+ @param test: kvm test object.
+ @param params: Dictionary with test parameters.
+ @param env: Dictionary with the test wnvironment.
+ """
+
+ def parse_meminfo(rowName):
+ """
+ Function get date from file /proc/meminfo
+
+ @param rowName: Name of line in meminfo
+ """
+ for line in open('/proc/meminfo').readlines():
+ if line.startswith(rowName+":"):
+ name, amt, unit = line.split()
+ return name, amt, unit
+
+ def parse_meminfo_value(rowName):
+ """
+ Function convert meminfo value to int
+
+ @param rowName: Name of line in meminfo
+ """
+ name, amt, unit = parse_meminfo(rowName)
+ return amt
+
+ def _get_stat(vm):
+ if vm.is_dead():
+ error.TestError("_get_stat: Trying to get informations of death"\
+ "VM: %s" % vm.name)
+ try:
+ cmd = "cat /proc/%d/statm" % params.get('pid_'+vm.name)
+ shm = int(os.popen(cmd).readline().split()[2])
+ # statm stores informations in pages, recalculate to MB
+ shm = shm * 4 / 1024
+ except:
+ raise error.TestError("_get_stat: Could not fetch shmem info from"\
+ "VM: %s" % vm.name)
+ return shm
+
+ def get_stat(lvms):
+ """
+ Get statistics in format:
+ Host: memfree = XXXM; Guests memsh = {XXX,XXX,...}
+
+ @params lvms: List of VMs
+ """
+ if not isinstance(lvms, list):
+ raise error.TestError("get_stat: parameter have to be proper list")
+
+ try:
+ stat = "Host: memfree = "
+ stat += str(int(parse_meminfo_value("MemFree")) / 1024) + "M; "
+ stat += "swapfree = "
+ stat += str(int(parse_meminfo_value("SwapFree")) / 1024) + "M; "
+ except:
+ raise error.TestFail("Could not fetch free memory info")
+
+
+ stat += "Guests memsh = {"
+ for vm in lvms:
+ stat += "%dM; " % (_get_stat(vm))
+ stat = stat[0:-2] + "}"
+ return stat
+
+ def tmp_file(file, ext=None, dir='/tmp/'):
+ while True:
+ file_name = (file + '-' + time.strftime("%Y%m%d-%H%M%S-") +
+ kvm_utils.generate_random_string(4))
+ if ext:
+ file_name += '.' + ext
+ file_name = os.path.join(dir, file_name)
+ if not os.path.exists(file_name):
+ break
+ return file_name
+
+ logging.info("Starting phase 0: Initialization")
+ # host_reserve: mem reserve keept for the host system to run
+ host_reserve = 256
+ # guest_reserve: mem reserve which is not used by allocator on the guests
+ guest_reserve = 256
+ max_vms = params.get("max_vms")
+ if max_vms:
+ max_vms = int(max_vms)
+ else:
+ max_vms = 2
+
+
+ overcommit = params.get("ksm_overcommit_ratio")
+ if overcommit:
+ overcommit = float(overcommit)
+ else:
+ overcommit = 2.0
+
+ max_alloc = params.get("ksm_paralel_ratio")
+ if max_alloc:
+ max_alloc = int(max_alloc)
+ else:
+ max_alloc = 1
+
+
+ # vmsc: count of all used VMs
+ vmsc = int(overcommit) + 1
+ vmsc = max(vmsc, max_vms)
+
+ if (params['ksm_test_size'] == "serial"):
+ max_alloc = vmsc
+
+ host_mem = (int(parse_meminfo_value("MemTotal")) / 1024 - host_reserve)
+
+ ksm_swap = False
+ if params.get("ksm_swap") == "yes":
+ ksm_swap = True
+
+ # Performance ratio
+ perf_ratio = params.get("ksm_perf_ratio")
+ if perf_ratio:
+ perf_ratio = float(perf_ratio)
+ else:
+ perf_ratio = 1
+
+ if (params['ksm_test_size'] == "paralel") :
+ vmsc = 1
+ overcommit = 1
+ mem = host_mem
+ # 32bit system adjustment
+ if not params['image_name'].endswith("64"):
+ logging.debug("Probably i386 guest architecture, "\
+ "max allocator mem = 2G")
+ # Guest can have more than 2G but kvm mem + 1MB (allocator itself)
+ # can't
+ if (host_mem > 3100):
+ mem = 3100
+
+
+ if os.popen("uname -i").readline().startswith("i386"):
+ logging.debug("Host is i386 architecture, max guest mem is 2G")
+ # Guest system with qemu overhead (64M) can't have more than 2G
+ if mem > 3100 - 64:
+ mem = 3100 - 64
+
+ else:
+ # mem: Memory of the guest systems. Maximum must be less than amount of
+ # the host's physical ram
+ mem = int(overcommit * host_mem / vmsc)
+
+ # 32bit system adjustment
+ if not params['image_name'].endswith("64"):
+ logging.debug("Probably i386 guest architecture, "\
+ "max allocator mem = 2G")
+ # Guest can have more than 2G but kvm mem + 1MB (allocator itself)
+ # can't
+ if mem-guest_reserve-1 > 3100:
+ vmsc = int(math.ceil((host_mem*overcommit) / \
+ (3100+guest_reserve)))
+ mem = int(math.floor(host_mem*overcommit/vmsc))
+
+ if os.popen("uname -i").readline().startswith("i386"):
+ logging.debug("Host is i386 architecture, max guest mem is 2G")
+ # Guest system with qemu overhead (64M) can't have more than 2G
+ if mem > 3100 - 64:
+ vmsc = int(math.ceil((host_mem*overcommit)/(3100 - 64.0)))
+ mem = int(math.floor(host_mem*overcommit/vmsc))
+
+ logging.debug("Check KSM status...")
+ ksm_flag = 0
+ for line in os.popen('ksmctl info').readlines():
+ if line.startswith('flags'):
+ ksm_flag = int(line.split(' ')[1].split(',')[0])
+ if int(ksm_flag) != 1:
+ logging.info("KSM was not launched! So try to restart.")
+ if os.system("modprobe ksm && ksmctl start 5000 100") != 0:
+ raise error.TestFail("Fail to restart KSM!")
+ else:
+ logging.info("Success to restart KSM.")
+ else:
+ logging.debug("KSM has been launched.")
+
+
+ logging.info("overcommit = %f" % (overcommit))
+ logging.info("true overcommit = %f " % (float(vmsc*mem) / float(host_mem)))
+ logging.info("host mem = %dM" % (host_mem))
+ logging.info("mem = %dM" % (mem))
+ logging.info("using swap = %s" % (ksm_swap))
+ logging.info("swap = %dM" %\
+ (int(parse_meminfo_value("SwapTotal")) / 1024))
+ logging.info("max_vms = %d" % (max_vms))
+ logging.info("vmsc = %d" % (vmsc))
+ logging.info("performance_ratio = %f" % (perf_ratio))
+
+ # Generate unique keys for random series
+ skeys = []
+ dkeys = []
+ for i in range(0, max(vmsc, max_alloc)):
+ key = random.randrange(0,255)
+ while key in skeys:
+ key = random.randrange(0,255)
+ skeys.append(key)
+
+ key = random.randrange(0,999)
+ while key in dkeys:
+ key = random.randrange(0,999)
+ dkeys.append(key)
+
+ lvms = []
+ lsessions = []
+
+ # As we don't know the number and memory amount of VMs in advance, we need
+ # to specify and create them here (FIXME: not a nice thing)
+ vm_name = params.get("main_vm")
+ params['mem'] = mem
+ params['vms'] = vm_name
+ # Associate pidfile name
+ params['pid_'+vm_name] = tmp_file(vm_name, 'pid')
+ if not params.get('extra_params'):
+ params['extra_params'] = ' '
+ params['extra_params_'+vm_name] = params.get('extra_params')
+ params['extra_params_'+vm_name] += " -pidfile %s" % \
+ (params.get('pid_'+vm_name))
+ params['extra_params'] = params.get('extra_params_'+vm_name)
+
+ # ksm_size: amount of memory used by allocator
+ ksm_size = mem - guest_reserve
+ logging.info("ksm_size = %dM" % (ksm_size))
+
+ # Creating of the first guest
+ kvm_preprocessing.preprocess_vm(test, params, env, vm_name)
+ lvms.append(kvm_utils.env_get_vm(env, vm_name))
+ if not lvms[0]:
+ raise error.TestError("VM object not found in environment")
+ if not lvms[0].is_alive():
+ raise error.TestError("VM seems to be dead; Test requires a living VM")
+
+ logging.info("Booting the first guest %s" % lvms[0].name)
+
+ lsessions.append(kvm_utils.wait_for(lvms[0].remote_login, 360, 0, 2))
+ if not lsessions[0]:
+ raise error.TestFail("Could not log into first guest")
+ # Associate vm PID
+ try:
+ tmp = open(params.get('pid_'+vm_name), 'r')
+ params['pid_'+vm_name] = int(tmp.readline())
+ except:
+ raise error.TestFail("Could not get PID of %s" % (vm_name))
+
+ # Creating of other guest systems
+ for i in range(1, vmsc):
+ vm_name = "vm" + str(i + 1)
+ params['pid_'+vm_name] = tmp_file(vm_name, 'pid')
+ params['extra_params_'+vm_name] = params.get('extra_params')
+ params['extra_params_'+vm_name] += " -pidfile %s" % \
+ (params.get('pid_'+vm_name))
+ params['extra_params'] = params.get('extra_params_'+vm_name)
+ # Last VM is later used to run more allocators simultaneously
+
+ lvms.append(lvms[0].clone(vm_name, params))
+ kvm_utils.env_register_vm(env, vm_name, lvms[i])
+ params['vms'] += " " + vm_name
+
+ logging.info("Booting guest %s" % lvms[i].name)
+ if not lvms[i].create():
+ raise error.TestFail("Cannot create VM %s" % lvms[i].name)
+ if not lvms[i].is_alive():
+ raise error.TestError("VM %s seems to be dead; Test requires a"\
+ "living VM" % lvms[i].name)
+
+ lsessions.append(kvm_utils.wait_for(lvms[i].remote_login, 360, 0, 2))
+ if not lsessions[i]:
+ raise error.TestFail("Could not log into guest %s" % lvms[i].name)
+ try:
+ tmp = open(params.get('pid_'+vm_name), 'r')
+ params['pid_'+vm_name] = int(tmp.readline())
+ except:
+ raise error.TestFail("Could not get PID of %s" % (vm_name))
+
+ # Let systems take a rest :-)
+ time.sleep(vmsc * 2 * perf_ratio)
+ logging.info(get_stat(lvms))
+
+ # Copy the allocator.py into guests
+ pwd = os.path.join(os.environ['AUTODIR'],'tests/kvm')
+ vksmd_src = os.path.join(pwd, "unattended/allocator.py")
+ dst_dir = "/tmp"
+ for vm in lvms:
+ if not vm.copy_files_to(vksmd_src, dst_dir):
+ raise error.TestFail("Copy_files_to failed %s" % (vm.name))
+ logging.info("Phase 0 => passed")
+
+ def phase_inicialize_guests():
+ """ Inicialize virtual machine """
+ logging.info("Starting phase 1: filling with 0")
+ logging.info("Preparing the guests and fill in pages by zero")
+ for session in lsessions:
+ vm = lvms[lsessions.index(session)]
+
+ ret = session.get_command_status("swapoff -a",\
+ timeout=300)
+ if ret == None or ret:
+ raise error.TestFail("Failed to swapoff in the %s"\
+ % (vm.name))
+
+ # Start the allocator
+ session.sendline("python /tmp/allocator.py")
+ (ret,data) = session.read_until_last_line_matches(\
+ ["PASS:","FAIL:"], 60*perf_ratio)
+ if ret == None or ret == 1:
+ raise error.TestFail("Could not run vksmd in %s errno: %s\n"\
+ "Output:\n%s" % (vm.name, ret, data))
+
+
+ # Set allocator keys
+ for i in range(0, vmsc):
+ vm = lvms[i]
+
+ lsessions[i].sendline("mem = mem_fill(%d,%s,%s)" \
+ % (ksm_size,skeys[i],dkeys[i]))
+
+ (match,data) = lsessions[i].read_until_last_line_matches(\
+ ["PASS:","FAIL:"],60*perf_ratio)
+ if match == 1 or match == None:
+ raise error.TestFail("Could not allocate memory on guest %s\n"\
+ "Output:\n%s" % (vm.name, data))
+
+
+ lsessions[i].sendline("mem.value_fill(%d)"% (skeys[0]))
+ (match,data) = lsessions[i].read_until_last_line_matches(\
+ ["PASS:","FAIL:"],120*perf_ratio)
+ if match == 1 or match == None:
+ raise error.TestFail("Could not allocate memory on guest %s\n"\
+ "Output: %s" % (vm.name, data))
+
+ # Let kksmd works (until shared mem rich expected value)
+ shm = 0
+ i = 0
+ while shm < ksm_size:
+ if i > 64:
+ logging.info(get_stat(lvms))
+ raise error.TestError("SHM didn't merged the memory until "\
+ "the DL on guest: %s"% (vm.name))
+ logging.debug("Sleep(%d)" % (ksm_size / 200 * perf_ratio))
+ time.sleep(ksm_size / 200 * perf_ratio)
+ shm = _get_stat(vm)
+ i = i + 1
+
+ # Keep some reserve
+ time.sleep(ksm_size / 200 * perf_ratio)
+
+ logging.info(get_stat(lvms))
+ logging.info("Phase 1 => passed")
+
+ def phase_separate_first_guest():
+ """ Separate first guest memory by generate a special random series """
+ logging.info("Starting phase 2: Split the pages on the first guest")
+
+ lsessions[0].sendline("mem.static_random_fill()")
+
+ (match,data) = lsessions[0].read_until_last_line_matches(\
+ ["PASS:","FAIL:"],120*perf_ratio)
+ if match == 1:
+ raise error.TestFail("Could not fill memory by random on guest %s"\
+ "\nOutput:\n%s" % (vm.name, data))
+ if match == None:
+ raise error.TestFail("Generating random series timeout on guest %s"\
+ % (vm.name))
+
+ data = data.splitlines()[-1]
+ logging.debug("Return message of static_random_fill: %s" % data)
+ out = int(data.split()[4])
+ logging.info("PERFORMANCE: %dMB * 1000 / %dms = %dMB/s"\
+ % (ksm_size, out, (ksm_size * 1000 / out)))
+ logging.info(get_stat(lvms))
+ logging.info("Phase 2 => passed")
+
+ def phase_split_guest():
+ """ Sequentional split of pages on guests up to memory limit """
+ logging.info("Starting phase 3a: Sequentional split of pages on guests"\
+ " up to memory limit")
+ last_vm = 0
+ session = None
+ vm = None
+ for i in range(1, vmsc):
+ vm = lvms[i]
+ session = lsessions[i]
+ session.sendline("mem.static_random_fill()")
+
+ out = ""
+ while not out.startswith("PASS") and not out.startswith("FAIL"):
+ free_mem = int(parse_meminfo_value("MemFree"))
+ if (ksm_swap):
+ free_mem = free_mem + int(parse_meminfo_value("SwapFree"))
+ logging.debug("FreeMem = %d" % (free_mem))
+ # We need to keep some memory for python to run.
+
+ if (free_mem < 64000) or (ksm_swap and free_mem < 256000):
+ logging.debug("Only %s free memory, killing 0 - %d hosts"\
+ % (free_mem, (i-1)))
+ for j in range(0, i):
+ lvms[j].destroy(gracefully = False)
+ last_vm = i
+ break
+ out = session.read_nonblocking(0.1)
+
+
+ if last_vm != 0:
+ break
+
+ logging.info("Memory filled by the guest %s" % (vm.name))
+ logging.info("Phase 3a => passed")
+
+ """ Check if memory in max loading guest is allright"""
+ logging.info("Starting phase 3b")
+
+ """ Kill rest of machine"""
+ for i in range(last_vm+1, vmsc):
+ lsessions[i].close()
+ if i == (vmsc-1):
+ logging.info(get_stat([lvms[i]]))
+ lvms[i].destroy(gracefully = False)
+
+ """ Verify last machine with random generated memory"""
+ session.sendline("mem.static_random_verify()")
+ (match,data) = session.read_until_last_line_matches(
+ ["PASS:","FAIL:"], (mem / 200 * 50 * perf_ratio))
+ if (match == 1):
+ raise error.TestError("Memory error dump: %s" % data)
+ logging.info(get_stat([lvms[last_vm]]))
+ (status,data) = lsessions[i].get_command_status_output("die()",20)
+ lvms[last_vm].destroy(gracefully = False)
+ logging.info("Phase 3b => passed")
+
+ def phase_paralel():
+ """ Paralel page spliting """
+ logging.info("Phase 1: Paralel page spliting")
+ # We have to wait until allocator is finished (it waits 5 seconds to
+ # clean the socket
+
+ session = lsessions[0]
+ vm = lvms[0]
+ for i in range(1,max_alloc):
+ lsessions.append(kvm_utils.wait_for(vm.remote_login, 360, 0, 2))
+ if not lsessions[i]:
+ raise error.TestFail("Could not log into guest %s" \
+ % lvms[i].name)
+
+ ret = session.get_command_status("swapoff -a", timeout=300)
+ if ret == None or ret:
+ raise error.TestFail("Failed to swapoff in the %s" % (vm.name))
+
+ for i in range(0, max_alloc):
+ lsessions[i].sendline("python /tmp/allocator.py")
+ (ret,data) = lsessions[i].read_until_last_line_matches(
+ ["PASS:","FAIL:"], (60 * perf_ratio))
+ if ret == None:
+ raise error.TestFail("Could not run vksmd in guest %s"\
+ % (vm.name))
+ if ret == 1:
+ raise error.TestFail("Could not run allocator in %s errno: %d"\
+ "\nOutput:\n%s" % (vm.name, ret, data))
+
+
+ logging.info("Phase 4a: Simultaneous merging")
+ for i in range(0, max_alloc):
+ logging.info("Memory to guest allocator = %dMB" % (ksm_size/max_alloc))
+ lsessions[i].sendline("mem = mem_fill(%d,%s,%s)" % \
+ ((ksm_size/max_alloc), skeys[i], dkeys[i]))
+
+ for i in range(0, max_alloc):
+ (match,data) = lsessions[i].read_until_last_line_matches(
+ ["PASS:","FAIL:"], (60 * perf_ratio))
+ if match == 1:
+ raise error.TestFail("Could not allocate memory on guest %s"\
+ "\nOutput:\n%s" % (vm.name, data))
+
+ for i in range(0, max_alloc):
+ lsessions[i].sendline("mem.value_fill(%d)"% (skeys[0]))
+
+ for i in range(0, max_alloc):
+ (match,data) = lsessions[i].read_until_last_line_matches(
+ ["PASS:","FAIL:"], (90 * perf_ratio))
+ if match == 1:
+ raise error.TestFail("Could not allocate memory on guest %s"\
+ "\nOutput:\n%s" % (vm.name, data))
+ # Wait until kksmd merges the pages (3 x ksm_size / 3)
+ shm = 0
+ i = 0
+ while shm < ksm_size:
+ if i > 64:
+ logging.info(get_stat(lvms))
+ raise error.TestError("SHM didn't merged the memory until DL")
+ logging.debug("Sleep(%d)" % (ksm_size / 200 * perf_ratio))
+ time.sleep(ksm_size / 200 * perf_ratio)
+ shm = _get_stat(vm)
+ i = i + 1
+ logging.info(get_stat([vm]))
+
+
+ logging.info("Phases 4b: Simultaneous spliting")
+
+ # Actual splitting
+ for i in range(0, max_alloc):
+ lsessions[i].sendline("mem.static_random_fill()")
+
+ for i in range(0, max_alloc):
+ (match,data) = lsessions[i].read_until_last_line_matches(
+ ["PASS:","FAIL:"], (90 * perf_ratio))
+ if match == 1:
+ raise error.TestFail("Could not fill memory by random on guest"\
+ " %s\nOutput:\n%s" % (vm.name, data))
+
+ if match == None:
+ raise error.TestFail("Generating random series timeout on "\
+ "guest %s" % (vm.name))
+
+ data = data.splitlines()[-1]
+ logging.debug(data)
+ out = int(data.split()[4])
+ logging.info("PERFORMANCE: %dMB * 1000 / %dms = %dMB/s"\
+ % (ksm_size/max_alloc, out, \
+ (ksm_size * 1000 / out / max_alloc)))
+ logging.info(get_stat([vm]))
+
+ logging.info("Phase 4c: Simultaneous verification")
+ for i in range(0, max_alloc):
+ lsessions[i].sendline("mem.static_random_verify()")
+ for i in range(0, max_alloc):
+ (match,data) = lsessions[i].read_until_last_line_matches(\
+ ["PASS:","FAIL:"], (mem / 200 * 50 * perf_ratio))
+ if (match == 1):
+ raise error.TestError("Memory error dump: %s" % data)
+
+ logging.info("Phases 4d: Simultaneous merging")
+
+ # Actual splitting
+ for i in range(0, max_alloc):
+ lsessions[i].sendline("mem.value_fill(%d)" % (skeys[0]))
+ for i in range(0, max_alloc):
+ (match,data) = lsessions[i].read_until_last_line_matches(\
+ ["PASS:","FAIL:"], (120 * perf_ratio))
+ if match == 1:
+ raise error.TestFail("Could not fill memory by random on guest"\
+ " %s\nOutput:\n%s" % (vm.name))
+ logging.info(get_stat([vm]))
+
+ logging.info("Phase 4e: Simultaneous verification")
+ for i in range(0, max_alloc):
+ lsessions[i].sendline("mem.value_fill(%d)" % (skeys[0]))
+ for i in range(0, max_alloc):
+ (match,data) = lsessions[i].read_until_last_line_matches(\
+ ["PASS:","FAIL:"], (mem / 200 * 50 * perf_ratio))
+ if (match == 1):
+ raise error.TestError("Memory error dump: %s" % data)
+
+ logging.info("Phases 4f: Simultaneous spliting last 96B")
+
+ # Actual splitting
+ for i in range(0, max_alloc):
+ lsessions[i].sendline("mem.static_random_fill(96)")
+
+ for i in range(0, max_alloc):
+ (match,data) = lsessions[i].read_until_last_line_matches(\
+ ["PASS:","FAIL:"], (60 * perf_ratio))
+ if match == 1:
+ raise error.TestFail("Could not fill memory by zero on guest"\
+ " %s\nOutput:\n%s" % (vm.name))
+
+ if match == None:
+ raise error.TestFail("Generating random series timeout on guest %s"\
+ % vm.name)
+
+ data = data.splitlines()[-1]
+ out = int(data.split()[4])
+ logging.info("PERFORMANCE: %dMB * 1000 / %dms = %dMB/s"\
+ % (ksm_size/max_alloc, out, \
+ (ksm_size * 1000 / out / max_alloc)))
+ logging.info(get_stat([vm]))
+
+ logging.info("Phase 4g: Simultaneous verification last 96B")
+ for i in range(0, max_alloc):
+ lsessions[i].sendline("mem.static_random_verify(96)")
+ for i in range(0, max_alloc):
+ (match,data) = lsessions[i].read_until_last_line_matches(
+ ["PASS:","FAIL:"], (mem / 200 * 50 * perf_ratio))
+ if (match == 1):
+ raise error.TestError("Memory error dump: %s" % data)
+
+
+
+
+ logging.info(get_stat([vm]))
+
+ logging.info("Phase 4 => passed")
+ # Clean-up
+ for i in range(0, max_alloc):
+ lsessions[i].get_command_status_output("die()",20)
+ session.close()
+ vm.destroy(gracefully = False)
+
+ if params['ksm_test_size'] == "paralel":
+ phase_paralel()
+ elif params['ksm_test_size'] == "serial":
+ phase_inicialize_guests()
+ phase_separate_first_guest()
+ phase_split_guest()
+
diff --git a/client/tests/kvm/tests_base.cfg.sample b/client/tests/kvm/tests_base.cfg.sample
index a403399..49ddff2 100644
--- a/client/tests/kvm/tests_base.cfg.sample
+++ b/client/tests/kvm/tests_base.cfg.sample
@@ -270,6 +270,24 @@ variants:
type = physical_resources_check
catch_uuid_cmd = dmidecode | awk -F: '/UUID/ {print $2}'
+ - ksm_overcommit:
+ # Don't preprocess any vms as we need to change it's params
+ vms = ''
+ image_snapshot = yes
+ kill_vm_gracefully = no
+ type = ksm_overcommit
+ ksm_swap = yes
+ no hugepages
+ # Overcommit of host memmory
+ ksm_overcommit_ratio = 3
+ # Max paralel runs machine
+ ksm_paralel_ratio = 4
+ variants:
+ - ksm_serial
+ ksm_test_size = "serial"
+ - ksm_paralel
+ ksm_test_size = "paralel"
+
# NICs
variants:
- @rtl8139:
diff --git a/client/tests/kvm/unattended/allocator.py b/client/tests/kvm/unattended/allocator.py
new file mode 100644
index 0000000..43a7aab
--- /dev/null
+++ b/client/tests/kvm/unattended/allocator.py
@@ -0,0 +1,213 @@
+import os
+import array
+import sys
+import struct
+import random
+import copy
+import inspect
+import tempfile
+from datetime import datetime
+from datetime import timedelta
+
+"""
+KVM test definitions.
+
+@copyright: 2008-2009 Red Hat Inc.
+Jiri Zupka <jzupka@xxxxxxxxxx>
+"""
+PAGE_SIZE = 4096 #machine page size
+
+class mem_fill:
+ """
+ Guest side script to test KSM driver
+ """
+
+ def __init__(self,mem,static_value,random_key):
+ """
+ Constructor of mem_fill class
+
+ @param mem: amount of test memory in MB
+ @param random_key: seed of random series used for fill
+ @param static_value: value witch fill whole memory
+ """
+ if (static_value < 0 or static_value > 255):
+ print "FAIL: Initialization static value"+\
+ "can be only in range (0..255)"
+ return
+
+ self.tmpdp = tempfile.mkdtemp()
+ if (not os.system("mount -osize=%dM tmpfs %s -t tmpfs" \
+ % (mem+25,self.tmpdp)) == 0):
+ print "FAIL: Only root can do that"
+ else:
+ self.f = tempfile.TemporaryFile(prefix='mem', dir=self.tmpdp)
+ self.allocate_by = 'L'
+ self.npages = (mem * 1024 * 1024)/PAGE_SIZE
+ self.random_key = random_key
+ self.static_value = static_value
+ print "PASS: Initialization"
+
+ def __del__(self):
+ if (os.path.ismount(self.tmpdp)):
+ self.f.close()
+ os.system("umount %s" % (self.tmpdp))
+
+ def compare_page(self,original,inmem):
+ """
+ compare memory
+
+ @param original: data witch we expected on memory
+ @param inmem: data in memory
+ """
+ for ip in range(PAGE_SIZE/original.itemsize):
+ if (not original[ip] == inmem[ip]):#find wrong item
+ originalp = array.array("B")
+ inmemp = array.array("B")
+ originalp.fromstring(original[ip:ip+1].tostring())
+ inmemp.fromstring(inmem[ip:ip+1].tostring())
+ for ib in range(len(originalp)): #find wrong byte in item
+ if not (originalp[ib] == inmemp[ib]):
+ position = self.f.tell()-PAGE_SIZE+(ip)*\
+ (original.itemsize)+ib
+ print "Mem error on position %d wanted 0x%Lx"+\
+ " and is 0x%Lx"\
+ % (position,originalp[ib],inmemp[ib])
+
+ def value_page(self,value):
+ """
+ Create page filled by value
+
+ @param value: ...
+ @return: return array of bytes size PAGE_SIZE
+ """
+ a = array.array("B")
+ for i in range(PAGE_SIZE/a.itemsize):
+ try:
+ a.append(value)
+ except:
+ print "FAIL: Value can be only in range (0..255)"
+ return a
+
+ def random_page(self,seed):
+ """
+ Create page filled by static random series
+
+ @param seed: seed of random series
+ @return: static random array series
+ """
+ random.seed(seed)
+ a = array.array(self.allocate_by)
+ for i in range(PAGE_SIZE/a.itemsize):
+ a.append(random.randrange(0,sys.maxint))
+ return a
+
+
+ def value_fill(self,value = -1):
+ """
+ Fill memory by page generated with value_page
+ """
+ self.f.seek(0)
+ if value == -1:
+ value = self.static_value
+ page = self.value_page(value)
+ for pages in range(self.npages):
+ page.tofile(self.f)
+ print "PASS: Mem value fill"
+
+ def value_check(self):
+ """
+ Check memory if there is a correct
+
+ @return: if data in memory is correct return PASS
+ else print some wrong data and return FAIL
+ """
+ self.f.seek(0)
+ e = 2
+ failure = False
+ page = self.value_page(self.static_value)
+ for pages in range(self.npages):
+ pf = array.array("B")
+ pf.fromfile(self.f,PAGE_SIZE/pf.itemsize)
+ if not (page == pf):
+ failure = True
+ self.compare_page(page,pf)
+ e = e - 1
+ if e == 0:
+ break
+ if failure:
+ print "FAIL: value verification"
+ else:
+ print "PASS: value verification"
+
+ def static_random_fill(self,n_bytes_on_end=PAGE_SIZE):
+ """
+ Fill memory by page with static random series with added special value
+ on random place in pages.
+
+ @param n_bytes_on_end: how many byte on the end of page can be changed
+ @return: PASS
+ """
+ self.f.seek(0)
+ page = self.random_page(self.random_key)
+ random.seed(self.random_key)
+ p = copy.copy(page)
+ t_start = datetime.now()
+ for pages in range(self.npages):
+ rand = random.randint(((PAGE_SIZE/page.itemsize)-1)-
+ (n_bytes_on_end/page.itemsize),
+ (PAGE_SIZE/page.itemsize)-1)
+
+ p[rand] = pages
+ p.tofile(self.f)
+ p[rand] = page[rand]
+ t_end = datetime.now()
+ delta = t_end-t_start
+ milisec = delta.microseconds/1e3+delta.seconds*1e3
+ print "PASS: filling duration = %Ld ms" % milisec
+
+ def static_random_verify(self,n_bytes_on_end=PAGE_SIZE):
+ """
+ Check memory if there is a correct
+
+ @return: if data in memory is correct return PASS
+ else print some wrong data and return FAIL
+ """
+ self.f.seek(0)
+ e = 2
+ page = self.random_page(self.random_key)
+ random.seed(self.random_key)
+ p = copy.copy(page)
+ failure = False
+ for pages in range(self.npages):
+ rand = random.randint(((PAGE_SIZE/page.itemsize)-1)-
+ (n_bytes_on_end/page.itemsize),
+ (PAGE_SIZE/page.itemsize)-1)
+ p[rand] = pages
+ pf = array.array(self.allocate_by)
+ pf.fromfile(self.f,PAGE_SIZE/pf.itemsize)
+ if not (p == pf):
+ failure = True
+ self.compare_page(p,pf)
+ e = e - 1
+ if e == 0:
+ break
+ p[rand] = page[rand]
+ if failure:
+ print "FAIL: Random series verification"
+ else:
+ print "PASS: Random series verification"
+
+
+def die():
+ exit(0)
+
+
+print "PASS: Start"
+def main():
+ end = False
+ while (not end):
+ str = raw_input()
+ exec str
+
+if __name__ == "__main__":
+ main()
--
1.6.2.5