[PATCH 6/6] Changes to the tko rpc interface

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Implementing the rpc interface logic for results comparison.

Signed-off-by: Dror Russo <drusso@xxxxxxxxxx>
---
 new_tko/tko/rpc_interface.py |  375 +++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 373 insertions(+), 2 deletions(-)

diff --git a/new_tko/tko/rpc_interface.py b/new_tko/tko/rpc_interface.py
index ac7e0b7..b8f56ca 100644
--- a/new_tko/tko/rpc_interface.py
+++ b/new_tko/tko/rpc_interface.py
@@ -1,10 +1,11 @@
-import os, pickle, datetime, itertools, operator
+import os, pickle, datetime, itertools, operator, urllib2, re
 from django.db import models as dbmodels
 from autotest_lib.frontend import thread_local
 from autotest_lib.frontend.afe import rpc_utils, model_logic
 from autotest_lib.frontend.afe import readonly_connection
-from autotest_lib.new_tko.tko import models, tko_rpc_utils, graphing_utils
+from autotest_lib.new_tko.tko import models, tko_rpc_utils, graphing_utils, db
 from autotest_lib.new_tko.tko import preconfigs
+from autotest_lib.client.common_lib import global_config
 
 # table/spreadsheet view support
 
@@ -377,6 +378,52 @@ def delete_saved_queries(id_list):
     query.delete()
 
 
+def get_test_comparison_attr(**filter_data):
+    """
+    Attributes list for test comparison is store at test comparison attributes
+    db table per user per test name. This function returns the database record
+    (if exists) that matches the user name and test name provided in
+    filter_data.
+
+    @param **filter_data: Dictionary of parameters that will be delegated to
+            TestComparisonAttribute.list_objects.
+    """
+    return rpc_utils.prepare_for_serialization(
+        models.TestComparisonAttribute.list_objects(filter_data))
+
+
+def add_test_comparison_attr(name, owner, attr_token=None, testset_token=""):
+    """
+    This function adds a new record to the test comparison attributes db table.
+
+    @param name: Test name.
+    @param owner: Test owner.
+    """
+    testname = name.strip()
+    if attr_token:
+        # if testset is not defined, the default is to save the test name (e.g.
+        # make the comparison on previous executions of the given testname)
+        if not testset_token or testset_token == "":
+            testset = testname
+        else:
+            testset = testset_token.strip()
+        existing_list = models.TestComparisonAttribute.objects.filter(
+                                                      owner=owner,testname=name)
+        if existing_list:
+            query_object = existing_list[0]
+            query_object.attributes= attr_token.strip()
+            query_object.testset = testset
+            query_object.save()
+            return query_object.id
+        return models.TestComparisonAttribute.add_object(owner=owner,
+                                                         testname=name,
+                                                         attributes=attr_token,
+                                                         testset=testset).id
+    else:
+        raise model_logic.ValidationError('No attributes defined for '
+                                          'comparison operation')
+
+
 # other
 def get_motd():
     return rpc_utils.get_motd()
@@ -445,3 +492,327 @@ def get_static_data():
     result['motd'] = rpc_utils.get_motd()
 
     return result
+
+
+def _get_test_lists(testID, attr_keys, testset):
+    """
+    A test execution is a list containing header (ID, Status, Reason and
+    Attributes) values. The function searches the results database (tko)
+    and returns three test lists:
+
+    @param testID: Test ID.
+    @param attr_keys: List with test attributes.
+    @param testset: List of tests.
+    @return: Tuple with the following elements:
+            * test headers list
+            * current test execution to compare with past executions.
+            * List of previous test executions of the same testcase
+            (test name is identical)
+            * All attributes that matched the reg exp of the attribute keys
+            provided
+    """
+    tko_db = db.db()
+    [(testname, status, reason)] = tko_db.select('test,status_word,reason',
+                                                 'test_view',
+                                                 {'test_idx': long(testID)},
+                                                 distinct=True)
+
+    # From all previous executions, filter those that name matches the regular
+    # expression provided by the user
+    previous_valid_test_executions = []
+    rows = tko_db.select('test_idx, test, status_word, reason', 'test_view', {},
+                         distinct=True)
+    p = re.compile('^%s$' % testset.strip())
+    for row in rows:
+        m = p.match(row[1])
+        if m:
+            previous_valid_test_executions.append(row)
+    if len(previous_valid_test_executions) == 0:
+        raise model_logic.ValidationError('No comparison data available for '
+                                          'this configuration.')
+
+    previous_test_executions = []
+    test_headers = ['ID','NAME','STATUS', 'REASON']
+    current_test_execution = [testID, str(testname), str(status), str(reason)]
+    current_test_execution_attr=[]
+    for (tc_attr) in db.select('attribute', 'test_attributes',
+                               {'test_idx': long(testID)}, distinct=True):
+        current_test_execution_attr.append(str(tc_attr[0]))
+
+    # Find all attribute for the comparison (including all matches if
+    # regexp specified)
+    attributes = []
+    for key in attr_keys:
+        valid_key = False
+        p = re.compile('^%s$' % key.strip())
+        for tc_attr in current_test_execution_attr:
+            m = p.match(tc_attr)
+            if m:
+                test_headers.append(tc_attr)
+                attributes.append(tc_attr)
+                valid_key=True
+        if not valid_key:
+            raise model_logic.ValidationError("Attribute '%s' does "
+                                              "not exist in this test "
+                                              "execution." % key)
+    for row in previous_valid_test_executions:
+        if row[0] <= long(testID): # look at historical data only
+                                   # (previous test executions)
+            previous_test_attributes = []
+            for (attr) in db.select('attribute', 'test_attributes',
+                                    {'test_idx': row[0]}, distinct=True):
+                previous_test_attributes.append(str(attr[0]))
+            # Check that previous test contains all required attributes
+            # for comparison
+            gap = [val for val in attributes if val not in
+                   previous_test_attributes]
+            if len(gap) == 0:
+                if row[0] != long(testID):
+                    test = [int(row[0]),str(row[1]), str(row[2]), str(row[3])]
+                for key in attributes:
+                    for (attr_key,attr_val) in db.select('attribute,value',
+                                                        'test_attributes',
+                                                        {'test_idx':row[0]},
+                                                        distinct=True):
+                        if str(attr_key)==key:
+                            if row[0] == long(testID):
+                                current_test_execution.append(str(attr_val))
+                            else:
+                                test.append(str(attr_val))
+                            break
+                if row[0] != long(testID):
+                    previous_test_executions.append(test)
+
+    if len(previous_test_executions) == 0:
+        raise model_logic.ValidationError('No comparison data available '
+                                          'for this configuration.')
+
+    return (attributes, test_headers, current_test_execution,
+            previous_test_executions)
+
+
+def _find_mismatches(headers, orig, new, valid_attr):
+    """
+    Finds the attributes mismatch between two tests provided: orig and new.
+
+    @param headers: Test headers.
+    @param orig: First test to be compared.
+    @param new: Second test to be compared.
+    @param valid_attr: Test attributes to be considered valid.
+    @returns: Tuple with the number of mismaching attributes found, a list
+            of the mismaching attribute names and a list of the mismaching
+            attribute values.
+    """
+    i = 0
+    mismatched_headers = []
+    mismatched_values = []
+
+    for index, item in enumerate(orig):
+        if item != new[index] and headers[index] in valid_attr:
+            i += 1
+            mismatched_headers.append(headers[index])
+            mismatched_values.append(new[index])
+
+    return (i,','.join(mismatched_headers),','.join(mismatched_values))
+
+
+def _prepare_test_analysis_dict(test_headers, previous_test_executions,
+                                current_test_execution, passed, attributes,
+                                max_mismatches_allowed=2):
+    """
+    Prepares and returns a dictionary of comparison analysis data.
+
+    @param test_headers: Dictionary with test headers.
+    @param previous_test_executions: List of test executions.
+    @param current_test_execution: Test execution being visualized on TKO.
+    @param passed: List of passed tests.
+    @param attributes: List of test attributes.
+    @param max_mismatches_allowed: Maximum amount of mismatches to be
+            considered for analysis.
+    """
+    dict = {}
+    counters = ['total', 'passed', 'pass_rate']
+    if len(previous_test_executions) > 0: # At least one test defined
+        for item in previous_test_executions:  
+            (mismatches, attr_headers,
+             attr_values) = _find_mismatches(test_headers,
+                                             current_test_execution, item,
+                                             attributes)
+            if mismatches <= max_mismatches_allowed:
+                # Create dictionary keys if does not exist and
+                # initialize all counters  
+                if mismatches not in dict.keys():
+                    dict[mismatches] = {'total': 0, 'passed': 0, 'pass_rate': 0}
+                if attr_headers not in dict[mismatches].keys():
+                    dict[mismatches][attr_headers] = {'total': 0, 'passed': 0,
+                                                      'pass_rate': 0}
+                if attr_values not in dict[mismatches][attr_headers].keys():
+                    dict[mismatches][attr_headers][attr_values] = {'total': 0,
+                                                                   'passed': 0,
+                                                                 'pass_rate': 0}
+                if (item[test_headers.index('STATUS')] not in
+                    dict[mismatches][attr_headers][attr_values].keys()):
+                    s = item[test_headers.index('STATUS')]
+                    dict[mismatches][attr_headers][attr_values][s] = []
+
+                # Update all counters
+                testcase = [item[test_headers.index('ID')],
+                            item[test_headers.index('NAME')],
+                            item[test_headers.index('REASON')]]
+
+                s = item[test_headers.index('STATUS')]
+                dict[mismatches][attr_headers][attr_values][s].append(testcase)
+                dict[mismatches]['total'] += 1
+                dict[mismatches][attr_headers]['total'] += 1
+                dict[mismatches][attr_headers][attr_values]['total'] += 1
+
+                if item[test_headers.index('STATUS')] in passed:
+                    dict[mismatches]['passed'] += 1
+                    dict[mismatches][attr_headers]['passed'] += 1
+                    dict[mismatches][attr_headers][attr_values]['passed'] += 1
+
+                p = float(dict[mismatches]['passed'])
+                t = float(dict[mismatches]['total'])
+                dict[mismatches]['pass_rate'] = int(p/t * 100)
+
+                p = float(dict[mismatches][attr_headers]['passed'])
+                t = float(dict[mismatches][attr_headers]['total'])
+                dict[mismatches][attr_headers]['pass_rate'] = int(p/t * 100)
+
+                p = float(dict[mismatches][attr_headers][attr_values]['passed'])
+                t = float(dict[mismatches][attr_headers][attr_values]['total'])
+                dict[mismatches][attr_headers][attr_values]['pass_rate'] = (
+                                                                int(p/t * 100))
+
+    return (dict, counters)
+
+
+def _make_content(testID, current_test_execution, headers, counters, t_dict,
+                  attributes, max_mismatches_allowed):
+    """
+    Prepares the comparison analysis text to be presented in the frontend
+    GUI.
+
+    @param testID: Test ID.
+    @param current_test_execution: Current text execution.
+    @param headers: Test headers.
+    @param counters: Auxiliary counters.
+    @param t_dict: Dictionary with the result that will be returned.
+    @param attributes: Test attributes.
+    @param max_mismatches_allowed: Number of maximum mismatches allowed.
+    """
+    mismatches  = t_dict.keys()
+    content = ('Test case comparison with the following attributes: %s\n' %
+               str(attributes))
+    content += ('(maximum allowed mismatching attributes = %d)\n' %
+                int(max_mismatches_allowed))
+
+    for mismatch in mismatches:
+        if mismatch not in counters:
+            content += ('\n%d mismatching attributes (%d/%d)\n' %
+                (mismatch, int(t_dict[mismatch]['passed']),
+                int(t_dict[mismatch]['total'])))
+        attribute_headers = t_dict[mismatch].keys()
+        for attr_header in attribute_headers:
+            if attr_header not in counters:
+                if int(mismatch) > 0:
+                    p = int(t_dict[mismatch][attr_header]['passed'])
+                    t = int(t_dict[mismatch][attr_header]['total'])
+                    content += '  %s (%d/%d)\n' % (attr_header, p, t)
+            attribute_values = t_dict[mismatch][attr_header].keys()
+            for attr_value in attribute_values:
+                if attr_value not in counters:
+                    if int(mismatch) > 0:
+                        p = int(
+                            t_dict[mismatch][attr_header][attr_value]['passed'])
+                        t = int(
+                            t_dict[mismatch][attr_header][attr_value]['total'])
+                        content += ('    %s (%d/%d)\n' % (attr_value, p, t))
+                    test_sets = (
+                            t_dict[mismatch][attr_header][attr_value].keys())
+                    for test_set in test_sets:
+                        if test_set not in counters:
+                            tc = (
+                            t_dict[mismatch][attr_header][attr_value][test_set])
+                            if len(tc) > 0:
+                                content += '      %s\n' % (test_set)
+                                links =[]
+                                for t in tc:
+                                    link = '%d : %s : %s' % (t[0], t[1], t[2])
+                                    content += '       %s\n' % link
+
+    return content
+
+
+def get_testcase_comparison_data(test, attr ,testset):
+    """
+    Test case comparison compares a given test execution with other executions
+    of the same test according to a predefined list of attributes. The result
+    is a dictionary of test cases classified by status (GOOD, FAIL, etc.) and
+    attributes mismatching distance (same idea as hamming distance, just
+    implemented on test attributes).
+
+    Distance of 0 refers to tests that have identical attributes values,
+    distance of 1 to tests that have only one different attribute value, so on
+    and so forth.
+
+    The general layout of test case comparison result is:
+
+    number of mismatching attributes (nSuccess/nTotal)
+       differing attributes (nSuccess/nTotal)
+         attribute value (nSuccess/nTotal)
+           Status
+             testID   reason
+             ...
+           Status (different)
+             testID   reason
+             ...
+           ...
+
+    @param test: Job ID that we'll get comparison data for.
+    @param attr: String of attributes to use for the comparison delimited by
+            comma.
+    @return: Dictionary with test comparison data that will be serialized.
+    """
+    result = {}
+    tid = None
+    attr_keys = None
+    attributes = None
+    if test:
+        tid = int(test)
+    if attr:
+        attr_keys = attr.replace(' ', '').split(',')
+
+    # process test comparison analysis
+    try:
+        c = global_config.global_config
+        max_mismatches_allowed = int(c.get_config_value('TKO',
+                                'test_comparison_maximum_attribute_mismatches'))
+        passed= ['GOOD']
+        if not tid:
+            raise model_logic.ValidationError('Test was not specified.')
+        if not attr_keys:
+            raise model_logic.ValidationError('At least one attribute must be '
+                                              'specified.')
+        if not testset:
+            raise model_logic.ValidationError('Previous test executions scope '
+                                              'must be specified.')
+
+        (attributes, test_headers, current_test_execution,
+         previous_test_executions) = _get_test_lists(tid, attr_keys, testset)
+
+        (test_comparison_dict,
+         counters) = _prepare_test_analysis_dict(test_headers,
+                                                 previous_test_executions,
+                                                 current_test_execution,
+                                                 passed, attributes,
+                                                 max_mismatches_allowed)
+
+        result = _make_content(tid, current_test_execution, test_headers,
+                               counters, test_comparison_dict, attributes,
+                               max_mismatches_allowed)
+
+    except urllib2.HTTPError:
+        result = 'Test comparison error!'
+
+    return rpc_utils.prepare_for_serialization(result)
-- 
1.6.2.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux