On Thu, Apr 21, 2011 at 4:33 AM, Amos Kong <akong@xxxxxxxxxx> wrote: > This case is used to test the drift between host and guest. > Use taskset to make tsc program execute in a single cpu. > If the drift ratio bigger than 10%, then fail this case. The calculations of the tsc frequency looks wrong... See comments below. Also, when Glauber or Zach could take a look into this test it'd be great! > Signed-off-by: Amos Kong <akong@xxxxxxxxxx> > --- > client/tests/kvm/deps/get_tsc.c | 27 ++++++++++ > client/tests/kvm/tests/tsc_drift.py | 88 ++++++++++++++++++++++++++++++++ > client/tests/kvm/tests_base.cfg.sample | 5 ++ > 3 files changed, 120 insertions(+), 0 deletions(-) > create mode 100644 client/tests/kvm/deps/get_tsc.c > create mode 100644 client/tests/kvm/tests/tsc_drift.py > > diff --git a/client/tests/kvm/deps/get_tsc.c b/client/tests/kvm/deps/get_tsc.c > new file mode 100644 > index 0000000..e91a41f > --- /dev/null > +++ b/client/tests/kvm/deps/get_tsc.c > @@ -0,0 +1,27 @@ > +/* > + * Programme to get cpu's TSC(time stamp counter) > + * Copyright(C) 2009 Redhat, Inc. > + * Amos Kong <akong@xxxxxxxxxx> > + * Dec 9, 2009 > + * > + */ > + > +#define _GNU_SOURCE > +#include <stdio.h> > +#include <stdint.h> > + > +typedef unsigned long long u64; > + > +u64 rdtsc(void) > +{ > + unsigned tsc_lo, tsc_hi; > + > + asm volatile("rdtsc" : "=a"(tsc_lo), "=d"(tsc_hi)); > + return tsc_lo | (u64)tsc_hi << 32; > +} > + > +int main(void) > +{ > + printf("%lld\n", rdtsc()); > + return 0; > +} > diff --git a/client/tests/kvm/tests/tsc_drift.py b/client/tests/kvm/tests/tsc_drift.py > new file mode 100644 > index 0000000..de2fb76 > --- /dev/null > +++ b/client/tests/kvm/tests/tsc_drift.py > @@ -0,0 +1,88 @@ > +import time, os, logging, commands, re > +from autotest_lib.client.common_lib import error > +from autotest_lib.client.bin import local_host > +import kvm_test_utils > + > + > +def run_tsc_drift(test, params, env): > + """ > + Check the TSC(time stamp counter) frequency of guest and host whether match > + or not > + > + 1) Computer average tsc frequency of host's cpus by C the program > + 2) Copy the C code to the guest, complie and run it to get tsc > + frequency of guest's vcpus > + 3) Sleep sometimes and get the TSC of host and guest again > + 4) Compute the TSC frequency of host and guest > + 5) Compare the frequency deviation between host and guest with standard > + > + @param test: Kvm test object > + @param params: Dictionary with the test parameters. > + @param env: Dictionary with test environment. > + """ > + drift_threshold = float(params.get("drift_threshold")) > + interval = float(params.get("interval")) > + cpu_chk_cmd = params.get("cpu_chk_cmd") > + tsc_freq_path = os.path.join(test.bindir, 'deps/get_tsc.c') > + host_freq = 0 > + > + def get_tsc(machine="host", i=0): > + cmd = "taskset -c %s /tmp/get_tsc" % i > + if machine == "host": > + s, o = commands.getstatusoutput(cmd) > + else: > + s, o = session.cmd_status_output(cmd) > + if s != 0: > + logging.debug(o) > + raise error.TestError("Fail to get tsc of host, ncpu: %d" % i) > + return float(re.findall("(\d+)",o)[0]) > + > + vm = env.get_vm(params["main_vm"]) > + vm.verify_alive() > + timeout = float(params.get("login_timeout", 240)) > + session = vm.wait_for_login(timeout=timeout) > + > + commands.getoutput("gcc %s -o /tmp/get_tsc" % tsc_freq_path) > + ncpu = local_host.LocalHost().get_num_cpu() > + > + logging.info("Interval is %s" % interval) > + logging.info("Determine the TSC frequency in the host") > + for i in range(ncpu): > + tsc1 = get_tsc("host", i) > + time.sleep(interval) > + tsc2 = get_tsc("host", i) > + > + delta = tsc2 - tsc1 > + logging.info("Host TSC delta for cpu %s is %s" % (i, delta)) > + if delta < 0: > + raise error.TestError("Host TSC for cpu %s warps %s" % (i, delta)) ^ Yeah, I don't think this is expected to warp, but yet, good to check. > + host_freq += delta / ncpu Now, i really didn't understand the concept behind the tsc frequency. So we have a difference between 2 timestamps taken over an arbitrary period of time (in this case, looks 30s by default) and divide by the number of cpus, however we will repeat this procedure by the same amount so: N * ( d_tsc1/N + d_tsc2/N + d_tsc3/N + ... + d_tscn/N) This could be simplified to (d_tsc1 + d_tsc2 + d_tsc3 +.... + d_tscn) Unless I'm missing something... so host_freq = sum(d_tsci) The calculation could be simplified then... And by definition, isn't frequency how many times a phenomena occurs (in this case, change of timestamp counter) per time? So, don't we have to divide this sum by the time the program slept? Also, it looks like the whole logic to get the frequencies can be factored to a single function and just call that function for guest and host. -- Lucas -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html