Hi,
we are back with new features of KSM_overcommit test:
* NEW: guest_reserve and host_reserve are now calculated based on used
memory
* NEW: tmpfs reserve is also evaluated to fit the overhead
* NEW: VM alive check during split_guest()
* FIX: In function split_guest() we used incorrect session
* MOD: Increase number of VNC ports
host_reserve:
* Still possible to set this value in cfg
* Calculation:
1) host_reserve = Available memory + minimal guest(128MB)
2) host_reserve += number of guests * 64
guest_reserve:
* Still possible to set this value in cfg
* Calculation:
1) guest_reserve = 256
2) guest_reserve += guest memory * constant
... where constant represents (TMPFS overhead and multiplicative OS
memory consumption) per MB
Tested by ldoktor and jzupka on various HW (2GB-36GB host memory).
Best regards,
Lukáš Doktor
diff --git a/client/tests/kvm/kvm_vm.py b/client/tests/kvm/kvm_vm.py
index 6bc7987..1d83120 100755
--- a/client/tests/kvm/kvm_vm.py
+++ b/client/tests/kvm/kvm_vm.py
@@ -396,7 +396,7 @@ class VM:
# Find available VNC port, if needed
if params.get("display") == "vnc":
- self.vnc_port = kvm_utils.find_free_port(5900, 6000)
+ self.vnc_port = kvm_utils.find_free_port(5900, 6100)
# Find random UUID if specified 'uuid = random' in config file
if params.get("uuid") == "random":
diff --git a/client/tests/kvm/scripts/allocator.py b/client/tests/kvm/scripts/allocator.py
index 1036893..227745a 100755
--- a/client/tests/kvm/scripts/allocator.py
+++ b/client/tests/kvm/scripts/allocator.py
@@ -8,10 +8,12 @@ Auxiliary script used to allocate memory on guests.
"""
-import os, array, sys, struct, random, copy, inspect, tempfile, datetime
+import os, array, sys, struct, random, copy, inspect, tempfile, datetime, math
PAGE_SIZE = 4096 # machine page size
+TMPFS_OVERHEAD = 0.0022 # overhead on 1MB of write data
+
class MemFill(object):
"""
@@ -32,7 +34,8 @@ class MemFill(object):
self.tmpdp = tempfile.mkdtemp()
ret_code = os.system("mount -o size=%dM tmpfs %s -t tmpfs" %
- ((mem + 25), self.tmpdp))
+ ((mem+math.ceil(mem*TMPFS_OVERHEAD)),
+ self.tmpdp))
if ret_code != 0:
if os.getuid() != 0:
print ("FAIL: Unable to mount tmpfs "
@@ -42,7 +45,7 @@ class MemFill(object):
else:
self.f = tempfile.TemporaryFile(prefix='mem', dir=self.tmpdp)
self.allocate_by = 'L'
- self.npages = (mem * 1024 * 1024) / PAGE_SIZE
+ self.npages = ((mem * 1024 * 1024) / PAGE_SIZE)
self.random_key = random_key
self.static_value = static_value
print "PASS: Initialization"
@@ -83,7 +86,7 @@ class MemFill(object):
@return: return array of bytes size PAGE_SIZE.
"""
a = array.array("B")
- for i in range(PAGE_SIZE / a.itemsize):
+ for i in range((PAGE_SIZE / a.itemsize)):
try:
a.append(value)
except:
diff --git a/client/tests/kvm/tests/ksm_overcommit.py b/client/tests/kvm/tests/ksm_overcommit.py
index 2dd46c4..31d5c61 100644
--- a/client/tests/kvm/tests/ksm_overcommit.py
+++ b/client/tests/kvm/tests/ksm_overcommit.py
@@ -142,6 +142,10 @@ def run_ksm_overcommit(test, params, env):
session = None
vm = None
for i in range(1, vmsc):
+ # Check VMs
+ for j in range(0, vmsc):
+ if not lvms[i].is_alive:
+ raise error.TestFail("one of other VMs is death")
vm = lvms[i]
session = lsessions[i]
a_cmd = "mem.static_random_fill()"
@@ -154,6 +158,8 @@ def run_ksm_overcommit(test, params, env):
logging.debug("Watching host memory while filling vm %s memory",
vm.name)
while not out.startswith("PASS") and not out.startswith("FAIL"):
+ if not vm.is_alive():
+ raise error.TestFail("VM is death")
free_mem = int(utils.read_from_meminfo("MemFree"))
if (ksm_swap):
free_mem = (free_mem +
@@ -202,7 +208,7 @@ def run_ksm_overcommit(test, params, env):
# Verify last machine with randomly generated memory
a_cmd = "mem.static_random_verify()"
- _execute_allocator(a_cmd, lvms[last_vm], session,
+ _execute_allocator(a_cmd, lvms[last_vm], lsessions[last_vm],
(mem / 200 * 50 * perf_ratio))
logging.debug(kvm_test_utils.get_memory_info([lvms[last_vm]]))
@@ -338,12 +344,29 @@ def run_ksm_overcommit(test, params, env):
# Main test code
logging.info("Starting phase 0: Initialization")
+
# host_reserve: mem reserve kept for the host system to run
- host_reserve = int(params.get("ksm_host_reserve", 512))
+ host_reserve = int(params.get("ksm_host_reserve", -1))
+ if (host_reserve == -1):
+ # default host_reserve = MemAvailable + one_minimal_guest(128MB)
+ # later we add 64MB per additional guest
+ host_reserve = ((utils.memtotal() - utils.read_from_meminfo("MemFree"))
+ / 1024 + 128)
+ # using default reserve
+ _host_reserve = 1
+ else:
+ _host_reserve = False
+
# guest_reserve: mem reserve kept to avoid guest OS to kill processes
- guest_reserve = int(params.get("ksm_guest_reserve", 1024))
- logging.debug("Memory reserved for host to run: %d", host_reserve)
- logging.debug("Memory reserved for guest to run: %d", guest_reserve)
+ guest_reserve = int(params.get("ksm_guest_reserve", -1))
+ if (guest_reserve == -1):
+ # default guest_reserve = minimal_system_mem(256MB)
+ # later we add tmpfs overhead
+ guest_reserve = 256
+ # using default reserve
+ _guest_reserve = True
+ else:
+ _guest_reserve = False
max_vms = int(params.get("max_vms", 2))
overcommit = float(params.get("ksm_overcommit_ratio", 2.0))
@@ -355,6 +378,10 @@ def run_ksm_overcommit(test, params, env):
if (params['ksm_mode'] == "serial"):
max_alloc = vmsc
+ if _host_reserve:
+ # First round of additional guest reserves
+ host_reserve += vmsc * 64
+ _host_reserve = vmsc
host_mem = (int(utils.memtotal()) / 1024 - host_reserve)
@@ -402,6 +429,10 @@ def run_ksm_overcommit(test, params, env):
if mem - guest_reserve - 1 > 3100:
vmsc = int(math.ceil((host_mem * overcommit) /
(3100 + guest_reserve)))
+ if _host_reserve:
+ host_reserve += (vmsc - _host_reserve) * 64
+ host_mem -= (vmsc - _host_reserve) * 64
+ _host_reserve = vmsc
mem = int(math.floor(host_mem * overcommit / vmsc))
if os.popen("uname -i").readline().startswith("i386"):
@@ -410,8 +441,19 @@ def run_ksm_overcommit(test, params, env):
if mem > 3100 - 64:
vmsc = int(math.ceil((host_mem * overcommit) /
(3100 - 64.0)))
+ if _host_reserve:
+ host_reserve += (vmsc - _host_reserve) * 64
+ host_mem -= (vmsc - _host_reserve) * 64
+ _host_reserve = vmsc
mem = int(math.floor(host_mem * overcommit / vmsc))
+ # 0.055 represents OS + TMPFS additional reserve per guest ram MB
+ if _guest_reserve:
+ guest_reserve += math.ceil(mem * 0.055)
+
+ logging.debug("Memory reserved for host to run: %d", host_reserve)
+ logging.debug("Memory reserved for guest to run: %d", guest_reserve)
+
logging.debug("Checking KSM status...")
ksm_flag = 0
for line in os.popen('ksmctl info').readlines():
diff --git a/client/tests/kvm/tests_base.cfg.sample b/client/tests/kvm/tests_base.cfg.sample
index ee83ac2..d3a5982 100644
--- a/client/tests/kvm/tests_base.cfg.sample
+++ b/client/tests/kvm/tests_base.cfg.sample
@@ -302,9 +302,9 @@ variants:
ksm_overcommit_ratio = 3
# Max paralel runs machine
ksm_parallel_ratio = 4
- # Host memory reserve
- ksm_host_reserve = 512
- ksm_guest_reserve = 1024
+ # Host memory reserve (default - best fit for used mem)
+ # ksm_host_reserve = 512
+ # ksm_guest_reserve = 1024
variants:
- ksm_serial:
ksm_mode = "serial"