Re: [RFC PATCH 4/4] virt: Introduce regression testing infrastructure

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



----- Original Message -----
> Hi akong,
> 
> see the inline comment.
> 
> On Fri, Dec 23, 2011 at 6:28 PM, Amos Kong <akong@xxxxxxxxxx> wrote:
> > regression.py:
> >  'regression' module is used to compare the test results
> >  of two jobs, we can use it (regression.compare()) at
> >  the end of control file,
> >
> > This script can also be used directly. Example:
> > | # python regression.py ntttcp /ntttcp-result1 /ntttcp-result2 \
> > |   ../../tests/kvm/perf.conf
> > | Fri Dec 23 17:23:08 2011
> > |
> > | 1 - /tmp/netperf-avg-0.385058442362.txt
> > | 2 - /tmp/netperf-avg-0.66384166902.txt
> > |
> > | ========================
> > |      buf(k)| throughput(Mbit/s)
> > | 1         2|  109.548
> > | 2         2|  104.239
> > | %          |     -4.8
> > | 1         4|  209.519
> > | 2         4|  211.633
> > | %          |     +1.0
> >
> > analyzer.py:
> >  It's used to compare two test results (standard format),
> >  it can also be used directly.
> > | # python analyzer.py /result1.RHS /ntttcp-result2.RHS
> >
> > perf.conf:
> >  config test related parameters.
> >
> > It supports to compare current result with the result in autotest
> > server.
> > autotest result directory should be shared by NFS first, and
> > specify
> > its address in perf.conf
> >
> > Signed-off-by: Amos Kong <akong@xxxxxxxxxx>
> > ---
> >  client/tests/kvm/control        |    7 +
> >  client/tests/kvm/perf.conf      |   23 ++++
> >  client/virt/tests/analyzer.py   |  224
> >  +++++++++++++++++++++++++++++++++++++++
> >  client/virt/tests/regression.py |   33 ++++++
> >  4 files changed, 287 insertions(+), 0 deletions(-)
> >  create mode 100644 client/tests/kvm/perf.conf
> >  create mode 100644 client/virt/tests/analyzer.py
> >  create mode 100644 client/virt/tests/regression.py
> >
> > diff --git a/client/tests/kvm/control b/client/tests/kvm/control
> > index 950154c..5cdf506 100644
> > --- a/client/tests/kvm/control
> > +++ b/client/tests/kvm/control
> > @@ -67,3 +67,10 @@ if args:
> >  parser.parse_string(str)
> >
> >  virt_utils.run_tests(parser, job)
> > +
> > +# compare the perfmance results of job
> > +# from autotest_lib.client.virt.tests import regression
> > +# regression.compare("ntttcp", "$olddir",
> > +# "%s/results/default/" % os.environ['AUTODIR'],
> > +# config_file="%s/tests/kvm/perf.conf" % os.environ['AUTODIR'],
> > +# output_dir="%s/results/default/" % os.environ['AUTODIR'])
> > diff --git a/client/tests/kvm/perf.conf
> > b/client/tests/kvm/perf.conf
> > new file mode 100644
> > index 0000000..31b72b2
> > --- /dev/null
> > +++ b/client/tests/kvm/perf.conf
> > @@ -0,0 +1,23 @@
> > +# this config file is used to set test related parameters
> > +#
> > +
> > +[server]
> > +result_nfs =
> > kvm-autotest.englab.nay.redhat.com:/usr/local/autotest/results
> Is it suitable to use an internal only address as the default path
> for the public repo?
> If not, why not use a variable and let the user set it?

I found this problem when I sent it out ;) will fix it in next version.
Thanks.

result_nfs = $autotest_server:/usr/local/autotest/results

> > +result_mntdir = /results/
> > +
> > +[ntttcp]
> > +result_dir = results
> > +result_file_pattern = .*.RHS
> > +
> > +[netperf]
> > +result_dir = results
> > +result_file_pattern = netperf-result.RHS
> > +
> > +[iozone]
> > +result_dir = guest_test_results
> > +result_file_pattern =
> > +
> > +[ffsb]
> > +result_dir = results
> > +result_file_pattern =
> > +
> > diff --git a/client/virt/tests/analyzer.py
> > b/client/virt/tests/analyzer.py
> > new file mode 100644
> > index 0000000..9023c77
> > --- /dev/null
> > +++ b/client/virt/tests/analyzer.py
> > @@ -0,0 +1,224 @@
> > +import sys, re, string, time, commands, os, random
> > +
> > +def aton(str):
> > +    substring = re.split("\.", str)
> > +    if len(substring) == 1:
> > +        if substring[0].isdigit():
> > +            return string.atoi(str)
> > +    elif len(substring) == 2:
> > +        if substring[0].isdigit() and substring[1].isdigit():
> > +            return string.atof(str)
> > +    return False
> > +
> > +def avg(dict, i):
> > +    linetmp = []
> > +    tmp     = []
> > +    lines   = {}
> > +
> > +    filecounts = len(dict)
> > +    for j in range(len(dict)):
> > +        lines[j] = re.split("\|", dict[j][i])
> > +    for value in range(len(lines[0])):
> > +        avgtmp = 0
> > +        column_caculate = 2
> > +        if value < column_caculate:
> > +            linetmp.append(lines[0][value])
> > +        else:
> > +            space = ""
> > +            strlen = len(lines[0][value])
> > +            for i in range(len(lines)):
> > +                avgtmp += (aton(lines[i][value].strip()))
> > +            if len(re.findall("\.", lines[0][value])) == 0:
> > +                avgtmpstr = "%d" % (avgtmp/filecounts)
> > +            else:
> > +                avgtmpstr = "%.2f" % (avgtmp/filecounts)
> > +
> > +            strlenvalue = len(avgtmpstr)
> > +            tmplen = strlen-strlenvalue
> > +            if value == (len(lines[0])-1):
> > +                for v in range(tmplen-1):
> > +                    space += " "
> > +                avgtmpstr= space + avgtmpstr + "\n"
> > +                linetmp.append(avgtmpstr)
> > +                break
> > +            for v in range(tmplen):
> > +                space += " "
> > +            avgtmpstr = space + avgtmpstr
> > +            linetmp.append(avgtmpstr)
> > +    line = "|".join(linetmp)
> > +    return line
> > +
> > +def avgfile(filenames):
> > +    """
> > +    caculate the average of namelist
> > +    1)get the data of every file, then put the data into the dict
> > +    2)caculat the average of the file
> > +    """
> > +    filelines = []
> > +    dict = {}
> > +    name = "/tmp/netperf-avg-%s.txt" % random.random()
> > +
> > +    for i in range(len(filenames)):
> > +        fd = open(filenames[i], "r")
> > +        dict[i] = fd.readlines()
> > +        fd.close()
> > +    filenum = len(dict)
> > +    if filenum == 1:
> > +        content = dict[0]
> > +    else:
> > +        for i in range(len(dict[0])):
> > +            if dict[0][i] == dict[1][i]:
> > +                filelines.append(dict[0][i])
> > +            else:
> > +                line = avg(dict, i)
> > +                filelines.append(line)
> > +        content = filelines
> > +    f = open(name, "w")
> > +    f.write(''.join(content))
> > +    f.close()
> > +    return name
> > +
> > +def record_result(name1, name2, file):
> > +
> > +    def tee(content):
> > +        f = open(file, "a")
> > +        f.write(content + "\n")
> > +        print content
> > +
> > +    result1  = {}
> > +    result2  = {}
> > +    result3  = {}
> > +    row      = 0
> > +    strlen   = 0
> > +    eachLine = ""
> > +    tee(name1)
> > +
> > +    # read the first file
> > +    fd = open(name1, "r")
> > +    for eachLine in fd:
> > +        #eachLine = ''.join(eachLine.split())
> > +        eachLine = eachLine.replace('\r', '')
> > +        eachLine = eachLine.replace('\n', '')
> > +        result1[row] = re.split("\|", eachLine)
> > +        row += 1
> > +
> > +    fd.close()
> > +    row = 0
> > +    # read the second file
> > +    fd = open(name2, "r")
> > +    for eachLine in fd:
> > +        #eachLine = ''.join(eachLine.split())
> > +        eachLine = eachLine.replace('\r', '')
> > +        eachLine = eachLine.replace('\n', '')
> > +        if re.findall("sessions", eachLine) != 0:
> > +            strlen = len(eachLine)
> > +        result2[row] = re.split("\|", eachLine)
> > +        row += 1
> > +
> > +    fd.close()
> > +
> > +    name1_list = re.split("/", name1)
> > +    name2_list = re.split("/", name2)
> > +
> > +    len1 = len(name1_list)
> > +    file_name11 = name1_list[len1-1]
> > +    len2 = len(name2_list)
> > +    file_name22 = name2_list[len2-1]
> > +
> > +    #rename the file which will save the result
> > +    name1list = re.split("-", file_name11)
> > +    name2list = re.split("-", file_name22)
> > +    if (len(name1list) > len(name2list)):
> > +        namelen = len(name2list)
> > +    else:
> > +        namelen = len(name1list)
> > +
> > +    resultlist = []
> > +    for i in range(namelen):
> > +        if name1list[i] == name2list[i]:
> > +            resultlist.append(name1list[i])
> > +
> > +    timevalue = time.time()
> > +    timestring = time.ctime(timevalue)
> > +    tee("%s\n" % timestring)
> > +    tee("1 - %s" % name1)
> > +    tee("2 - %s\n" % name2)
> > +
> > +    #caculate the length of each line
> > +    eachLine = ""
> > +    for i in range(strlen):
> > +        eachLine += "="
> > +    eachLine += "======"
> > +    tee("%s" % eachLine)
> > +    row = strlen = 0
> > +    for row in result1:
> > +        if result1[row] == result2[row]:
> > +            if len(result1[row]) > 1:
> > +                result1[row][0] = "   %s" % result1[row][0]
> > +                eachLine = "|".join(result1[row])
> > +                tee("%s" % eachLine)
> > +            else:
> > +                eachLine = "|".join(result1[row])
> > +                tee("%s" % eachLine)
> > +        else:
> > +            strlen = len(result1[row][0])
> > +            tmp = result1[row][0].strip()
> > +            tmp = "%s" % tmp
> > +            result1[row][0] = tmp.rjust(strlen, ' ')
> > +            result1[row][0] = "1  %s" % result1[row][0]
> > +            eachLine = "|".join(result1[row])
> > +            tee("%s" % eachLine)
> > +
> > +            strlen = len(result2[row][0])
> > +            tmp = result2[row][0].strip()
> > +            tmp = "%s" % tmp
> > +            result2[row][0] = tmp.rjust(strlen, ' ')
> > +            result2[row][0] = "2  %s" % result2[row][0]
> > +            eachLine = "|".join(result2[row])
> > +            tee("%s" % eachLine)
> > +
> > +            result_tmp = []
> > +            strlen = 0
> > +            result_colum = 1
> > +            for i in range(len(result1[row])):
> > +                if i < result_colum:
> > +                    tmp_str = ""
> > +                    strlen += len(result1[row][i])
> > +                    tmp_str = tmp_str.rjust(strlen-1, ' ')
> > +                    tmp_str = "%" + tmp_str
> > +                    if i == result_colum - 1:
> > +                        result_tmp.append(tmp_str)
> > +                elif i >= result_colum:
> > +                    strlen = len(result1[row][i])
> > +                    aa = (result1[row][i]).strip()
> > +                    aa = string.atof(aa)
> > +                    bb = (result2[row][i]).strip()
> > +                    bb = string.atof(bb)
> > +                    if aa != 0:
> > +                        cc = ((bb-aa)/aa)*100
> > +                        if cc > 0:
> > +                            result = "+%.1f" % cc
> > +                        else:
> > +                            result = "%.1f" % cc
> > +                    else:
> > +                        result = "0"
> > +                    result_str = result.rjust(strlen, ' ')
> > +                    result_tmp.append(result_str)
> > +
> > +            eachLine = "|".join(result_tmp)
> > +            tee("%s" % eachLine)
> > +
> > +def analyze(list_files1, list_files2, output_dir=""):
> > +    average1 = avgfile(list_files1.split())
> > +    average2 = avgfile(list_files2.split())
> > +    f = os.path.join(output_dir, "end-report-%s.txt" %
> > +                     time.strftime('%Y-%m-%d-%H.%M.%S'))
> > +    record_result(average1, average2, f)
> > +    commands.getoutput("rm -f /tmp/netperf-avg-*")
> > +
> > +
> > +if __name__ == "__main__":
> > +    if len(sys.argv) < 3:
> > +        print 'Usage: python %s "$results list1" "results list2"'
> > % sys.argv[0]
> > +        sys.exit(1)
> > +    analyze(sys.argv[1], sys.argv[2], sys.argv[3])
> > diff --git a/client/virt/tests/regression.py
> > b/client/virt/tests/regression.py
> > new file mode 100644
> > index 0000000..e2588a7
> > --- /dev/null
> > +++ b/client/virt/tests/regression.py
> > @@ -0,0 +1,33 @@
> > +import ConfigParser, sys, commands, os
> > +import analyzer
> > +
> > +def compare(testname, olddir, curdir, config_file='perf.conf',
> > output_dir=""):
> > +    config = ConfigParser.ConfigParser()
> > +    config.read(config_file)
> > +
> > +    result_nfs = config.get("server", "result_nfs")
> > +    result_mntdir = config.get("server", "result_mntdir")
> > +    result_dir = config.get(testname, "result_dir")
> > +    result_file_pattern = config.get(testname,
> > "result_file_pattern")
> > +
> > +    def search_files(dir):
> > +        cmd = 'find %s|grep %s|grep "%s/%s"' % (dir,
> > +           testname, result_dir, result_file_pattern)
> > +        return commands.getoutput(cmd)
> > +
> > +    if not os.path.isdir(result_mntdir):
> > +        os.mkdir(result_mntdir)
> > +    commands.getoutput("mount %s %s" % (result_nfs,
> > result_mntdir))
> > +
> > +    if not os.path.isabs(olddir):
> > +        olddir = result_mntdir + olddir
> > +    oldlist = search_files(olddir)
> > +    newlist = search_files(curdir)
> > +    if oldlist != "" or newlist != "":
> > +        analyzer.analyze(oldlist, newlist, output_dir)
> > +
> > +if __name__ == "__main__":
> > +    if len(sys.argv) != 5:
> > +        print 'Usage: python %s $testname $dir1 $dir2 $configfile'
> > % sys.argv[0]
> > +        sys.exit(1)
> > +    compare(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
> >
> 
> Thanks,
> Yang
> --
>     """
>     Keep It Simple,Stupid.
>     """
> 
> Chinese Name: 白杨
> Nick Name: Hamo
> Homepage: http://hamobai.com/
> GPG KEY ID: 0xA4691A33
> Key fingerprint = 09D5 2D78 8E2B 0995 CF8E  4331 33C4 3D24 A469 1A33
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux