mirror of
https://git.yoctoproject.org/poky
synced 2026-04-30 03:32:12 +02:00
resulttool: Improvements to allow integration to the autobuilder
This is a combined patch of the various tweaks and improvements I made to resulttool: * Avoid subprocess.run() as its a python 3.6 feature and we have autobuilder workers with 3.5. * Avoid python keywords as variable names * Simplify dict accesses using .get() * Rename resultsutils -> resultutils to match the resultstool -> resulttool rename * Formalised the handling of "file_name" to "TESTSERIES" which the code will now add into the json configuration data if its not present, based on the directory name. * When we don't have failed test cases, print something saying so instead of an empty table * Tweak the table headers in the report to be more readable (reference "Test Series" instead if file_id and ID instead of results_id) * Improve/simplify the max string length handling * Merge the counts and percentage data into one table in the report since printing two reports of the same data confuses the user * Removed the confusing header in the regression report * Show matches, then regressions, then unmatched runs in the regression report, also remove chatting unneeded output * Try harder to "pair" up matching configurations to reduce noise in the regressions report * Abstracted the "mapping" table concept used to pairing in the regression code to general code in resultutils * Created multiple mappings for results analysis, results storage and 'flattening' results data in a merge * Simplify the merge command to take a source and a destination, letting the destination be a directory or a file, removing the need for an output directory parameter * Add the 'IMAGE_PKGTYPE' and 'DISTRO' config options to the regression mappings * Have the store command place the testresults files in a layout from the mapping, making commits into the git repo for results storage more useful for simple comparison purposes * Set the oe-git-archive tag format appropriately for oeqa results storage (and simplify the commit messages closer to their defaults) * Fix oe-git-archive to use the commit/branch data from the results file * Cleaned up the command option help to match other changes * Follow the model of git branch/tag processing used by oe-build-perf-report and use that to read the data using git show to avoid branch change * Add ptest summary to the report command * Update the tests to match the above changes (From OE-Core rev: b4513e75f746a0989b09ee53cb85e489d41e5783) Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
This commit is contained in:
127
scripts/lib/resulttool/resultutils.py
Normal file
127
scripts/lib/resulttool/resultutils.py
Normal file
@@ -0,0 +1,127 @@
|
||||
# resulttool - common library/utility functions
|
||||
#
|
||||
# Copyright (c) 2019, Intel Corporation.
|
||||
# Copyright (c) 2019, Linux Foundation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify it
|
||||
# under the terms and conditions of the GNU General Public License,
|
||||
# version 2, as published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
# more details.
|
||||
#
|
||||
import os
|
||||
import json
|
||||
import scriptpath
|
||||
scriptpath.add_oe_lib_path()
|
||||
|
||||
flatten_map = {
|
||||
"oeselftest": [],
|
||||
"runtime": [],
|
||||
"sdk": [],
|
||||
"sdkext": []
|
||||
}
|
||||
regression_map = {
|
||||
"oeselftest": ['TEST_TYPE', 'MACHINE'],
|
||||
"runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'],
|
||||
"sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
|
||||
"sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE']
|
||||
}
|
||||
store_map = {
|
||||
"oeselftest": ['TEST_TYPE'],
|
||||
"runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
|
||||
"sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
|
||||
"sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME']
|
||||
}
|
||||
|
||||
#
|
||||
# Load the json file and append the results data into the provided results dict
|
||||
#
|
||||
def append_resultsdata(results, f, configmap=store_map):
|
||||
if type(f) is str:
|
||||
with open(f, "r") as filedata:
|
||||
data = json.load(filedata)
|
||||
else:
|
||||
data = f
|
||||
for res in data:
|
||||
if "configuration" not in data[res] or "result" not in data[res]:
|
||||
raise ValueError("Test results data without configuration or result section?")
|
||||
if "TESTSERIES" not in data[res]["configuration"]:
|
||||
data[res]["configuration"]["TESTSERIES"] = os.path.basename(os.path.dirname(f))
|
||||
testtype = data[res]["configuration"].get("TEST_TYPE")
|
||||
if testtype not in configmap:
|
||||
raise ValueError("Unknown test type %s" % testtype)
|
||||
configvars = configmap[testtype]
|
||||
testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
|
||||
if testpath not in results:
|
||||
results[testpath] = {}
|
||||
if 'ptestresult.rawlogs' in data[res]['result']:
|
||||
del data[res]['result']['ptestresult.rawlogs']
|
||||
if 'ptestresult.sections' in data[res]['result']:
|
||||
for i in data[res]['result']['ptestresult.sections']:
|
||||
del data[res]['result']['ptestresult.sections'][i]['log']
|
||||
results[testpath][res] = data[res]
|
||||
|
||||
#
|
||||
# Walk a directory and find/load results data
|
||||
# or load directly from a file
|
||||
#
|
||||
def load_resultsdata(source, configmap=store_map):
|
||||
results = {}
|
||||
if os.path.isfile(source):
|
||||
append_resultsdata(results, source, configmap)
|
||||
return results
|
||||
for root, dirs, files in os.walk(source):
|
||||
for name in files:
|
||||
f = os.path.join(root, name)
|
||||
if name == "testresults.json":
|
||||
append_resultsdata(results, f, configmap)
|
||||
return results
|
||||
|
||||
def filter_resultsdata(results, resultid):
|
||||
newresults = {}
|
||||
for r in results:
|
||||
for i in results[r]:
|
||||
if i == resultsid:
|
||||
newresults[r] = {}
|
||||
newresults[r][i] = results[r][i]
|
||||
return newresults
|
||||
|
||||
def save_resultsdata(results, destdir, fn="testresults.json"):
|
||||
for res in results:
|
||||
if res:
|
||||
dst = destdir + "/" + res + "/" + fn
|
||||
else:
|
||||
dst = destdir + "/" + fn
|
||||
os.makedirs(os.path.dirname(dst), exist_ok=True)
|
||||
with open(dst, 'w') as f:
|
||||
f.write(json.dumps(results[res], sort_keys=True, indent=4))
|
||||
|
||||
def git_get_result(repo, tags):
|
||||
git_objs = []
|
||||
for tag in tags:
|
||||
files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
|
||||
git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")])
|
||||
|
||||
def parse_json_stream(data):
|
||||
"""Parse multiple concatenated JSON objects"""
|
||||
objs = []
|
||||
json_d = ""
|
||||
for line in data.splitlines():
|
||||
if line == '}{':
|
||||
json_d += '}'
|
||||
objs.append(json.loads(json_d))
|
||||
json_d = '{'
|
||||
else:
|
||||
json_d += line
|
||||
objs.append(json.loads(json_d))
|
||||
return objs
|
||||
|
||||
# Optimize by reading all data with one git command
|
||||
results = {}
|
||||
for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
|
||||
append_resultsdata(results, obj)
|
||||
|
||||
return results
|
||||
Reference in New Issue
Block a user