mirror of
https://git.yoctoproject.org/poky
synced 2026-02-08 18:02:12 +01:00
OEQA outputs test results into json files and these files were
archived by Autobuilder during QA releases. Example: each oe-selftest
run by Autobuilder for different host distro generate a
testresults.json file.
These scripts were developed as a test result tools to manage
these testresults.json file.
Using the "store" operation, user can store multiple testresults.json
files as well as the pre-configured directories used to hold those files.
Using the "merge" operation, user can merge multiple testresults.json
files to a target file.
Using the "report" operation, user can view the test result summary
for all available testresults.json files inside a ordinary directory
or a git repository.
Using the "regression-file" operation, user can perform regression
analysis on testresults.json files specified. Using the "regression-dir"
and "regression-git" operations, user can perform regression analysis
on directory and git accordingly.
These resulttool operations expect the testresults.json file to use
the json format below.
{
"<testresult_1>": {
"configuration": {
"<config_name_1>": "<config_value_1>",
"<config_name_2>": "<config_value_2>",
...
"<config_name_n>": "<config_value_n>",
},
"result": {
"<testcase_namespace_1>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
"<testcase_namespace_2>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
...
"<testcase_namespace_n>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
}
},
...
"<testresult_n>": {
"configuration": {
"<config_name_1>": "<config_value_1>",
"<config_name_2>": "<config_value_2>",
...
"<config_name_n>": "<config_value_n>",
},
"result": {
"<testcase_namespace_1>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
"<testcase_namespace_2>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
...
"<testcase_namespace_n>": {
"status": "<PASSED or FAILED or ERROR or SKIPPED>",
"log": "<failure or error logging>"
},
}
},
}
To use these scripts, first source oe environment, then run the
entry point script to look for help.
$ resulttool
To store test result from oeqa automated tests, execute the below
$ resulttool store <source_dir> <git_branch>
To merge multiple testresults.json files, execute the below
$ resulttool merge <base_result_file> <target_result_file>
To report test report, execute the below
$ resulttool report <source_dir>
To perform regression file analysis, execute the below
$ resulttool regression-file <base_result_file> <target_result_file>
To perform regression dir analysis, execute the below
$ resulttool regression-dir <base_result_dir> <target_result_dir>
To perform regression git analysis, execute the below
$ resulttool regression-git <source_dir> <base_branch> <target_branch>
[YOCTO# 13012]
[YOCTO# 12654]
(From OE-Core rev: 78a322d7be402a5b9b5abf26ad35670a8535408a)
Signed-off-by: Yeoh Ee Peng <ee.peng.yeoh@intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
114 lines
5.9 KiB
Python
114 lines
5.9 KiB
Python
# test result tool - report text based test results
|
|
#
|
|
# Copyright (c) 2019, Intel Corporation.
|
|
#
|
|
# This program is free software; you can redistribute it and/or modify it
|
|
# under the terms and conditions of the GNU General Public License,
|
|
# version 2, as published by the Free Software Foundation.
|
|
#
|
|
# This program is distributed in the hope it will be useful, but WITHOUT
|
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
# more details.
|
|
#
|
|
import os
|
|
import glob
|
|
import json
|
|
from resulttool.resultsutils import checkout_git_dir, load_json_file, get_dict_value, get_directory_files
|
|
|
|
class ResultsTextReport(object):
|
|
|
|
def get_aggregated_test_result(self, logger, testresult):
|
|
test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
|
|
result_types = {'passed': ['PASSED', 'passed'],
|
|
'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
|
|
'skipped': ['SKIPPED', 'skipped']}
|
|
result = get_dict_value(logger, testresult, 'result')
|
|
for k in result:
|
|
test_status = get_dict_value(logger, result[k], 'status')
|
|
for tk in result_types:
|
|
if test_status in result_types[tk]:
|
|
test_count_report[tk] += 1
|
|
if test_status in result_types['failed']:
|
|
test_count_report['failed_testcases'].append(k)
|
|
return test_count_report
|
|
|
|
def get_test_result_percentage(self, test_result_count):
|
|
total_tested = test_result_count['passed'] + test_result_count['failed'] + test_result_count['skipped']
|
|
test_percent_report = {'passed': 0, 'failed': 0, 'skipped': 0}
|
|
for k in test_percent_report:
|
|
test_percent_report[k] = format(test_result_count[k] / total_tested * 100, '.2f')
|
|
return test_percent_report
|
|
|
|
def add_test_configurations(self, test_report, source_dir, file, result_id):
|
|
test_report['file_dir'] = self._get_short_file_dir(source_dir, file)
|
|
test_report['result_id'] = result_id
|
|
test_report['test_file_dir_result_id'] = '%s_%s' % (test_report['file_dir'], test_report['result_id'])
|
|
|
|
def _get_short_file_dir(self, source_dir, file):
|
|
file_dir = os.path.dirname(file)
|
|
source_dir = source_dir[:-1] if source_dir[-1] == '/' else source_dir
|
|
if file_dir == source_dir:
|
|
return 'None'
|
|
return file_dir.replace(source_dir, '')
|
|
|
|
def get_max_string_len(self, test_result_list, key, default_max_len):
|
|
max_len = default_max_len
|
|
for test_result in test_result_list:
|
|
value_len = len(test_result[key])
|
|
if value_len > max_len:
|
|
max_len = value_len
|
|
return max_len
|
|
|
|
def print_test_report(self, template_file_name, test_count_reports, test_percent_reports,
|
|
max_len_dir, max_len_result_id):
|
|
from jinja2 import Environment, FileSystemLoader
|
|
script_path = os.path.dirname(os.path.realpath(__file__))
|
|
file_loader = FileSystemLoader(script_path + '/template')
|
|
env = Environment(loader=file_loader, trim_blocks=True)
|
|
template = env.get_template(template_file_name)
|
|
output = template.render(test_count_reports=test_count_reports,
|
|
test_percent_reports=test_percent_reports,
|
|
max_len_dir=max_len_dir,
|
|
max_len_result_id=max_len_result_id)
|
|
print('Printing text-based test report:')
|
|
print(output)
|
|
|
|
def view_test_report(self, logger, source_dir, git_branch):
|
|
if git_branch:
|
|
checkout_git_dir(source_dir, git_branch)
|
|
test_count_reports = []
|
|
test_percent_reports = []
|
|
for file in get_directory_files(source_dir, ['.git'], 'testresults.json'):
|
|
logger.debug('Computing result for test result file: %s' % file)
|
|
testresults = load_json_file(file)
|
|
for k in testresults:
|
|
test_count_report = self.get_aggregated_test_result(logger, testresults[k])
|
|
test_percent_report = self.get_test_result_percentage(test_count_report)
|
|
self.add_test_configurations(test_count_report, source_dir, file, k)
|
|
self.add_test_configurations(test_percent_report, source_dir, file, k)
|
|
test_count_reports.append(test_count_report)
|
|
test_percent_reports.append(test_percent_report)
|
|
max_len_dir = self.get_max_string_len(test_count_reports, 'file_dir', len('file_dir'))
|
|
max_len_result_id = self.get_max_string_len(test_count_reports, 'result_id', len('result_id'))
|
|
self.print_test_report('test_report_full_text.txt', test_count_reports, test_percent_reports,
|
|
max_len_dir, max_len_result_id)
|
|
|
|
def report(args, logger):
|
|
report = ResultsTextReport()
|
|
report.view_test_report(logger, args.source_dir, args.git_branch)
|
|
return 0
|
|
|
|
def register_commands(subparsers):
|
|
"""Register subcommands from this plugin"""
|
|
parser_build = subparsers.add_parser('report', help='report test result summary',
|
|
description='report text-based test result summary from the source directory',
|
|
group='analysis')
|
|
parser_build.set_defaults(func=report)
|
|
parser_build.add_argument('source_dir',
|
|
help='source directory that contain the test result files for reporting')
|
|
parser_build.add_argument('-b', '--git-branch', default='',
|
|
help='(optional) default assume source directory contains all available files for '
|
|
'reporting unless a git branch was provided where it will try to checkout '
|
|
'the provided git branch assuming source directory was a git repository')
|