mirror of
https://git.yoctoproject.org/poky
synced 2026-02-23 01:49:40 +01:00
Ensure that even if a ptests results section is empty, the log parser adds that empty section. Then ensure that empty sections trigger warnings. This means if a ptest suddently stops returning any results, we notice and see warnings about it. This has gone unnoticed on the autobuilder far too many times so is very much worth highlighting as a regression. We shouldn't have empty ptests. (From OE-Core rev: 5ad0cf57b41ec7f44647a03bc568d0b24906cc8d) Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
121 lines
4.8 KiB
Python
121 lines
4.8 KiB
Python
#
|
|
# Copyright OpenEmbedded Contributors
|
|
#
|
|
# SPDX-License-Identifier: MIT
|
|
#
|
|
|
|
import os
|
|
import unittest
|
|
import pprint
|
|
import datetime
|
|
|
|
from oeqa.runtime.case import OERuntimeTestCase
|
|
from oeqa.core.decorator.depends import OETestDepends
|
|
from oeqa.core.decorator.data import skipIfNotFeature
|
|
from oeqa.runtime.decorator.package import OEHasPackage
|
|
from oeqa.utils.logparser import PtestParser
|
|
|
|
class PtestRunnerTest(OERuntimeTestCase):
|
|
|
|
@skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
|
|
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
|
@OEHasPackage(['ptest-runner'])
|
|
@unittest.expectedFailure
|
|
def test_ptestrunner_expectfail(self):
|
|
if not self.td.get('PTEST_EXPECT_FAILURE'):
|
|
self.skipTest('Cannot run ptests with @expectedFailure as ptests are required to pass')
|
|
self.do_ptestrunner()
|
|
|
|
@skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
|
|
@OETestDepends(['ssh.SSHTest.test_ssh'])
|
|
@OEHasPackage(['ptest-runner'])
|
|
def test_ptestrunner_expectsuccess(self):
|
|
if self.td.get('PTEST_EXPECT_FAILURE'):
|
|
self.skipTest('Cannot run ptests without @expectedFailure as ptests are expected to fail')
|
|
self.do_ptestrunner()
|
|
|
|
def do_ptestrunner(self):
|
|
status, output = self.target.run('which ptest-runner', 0)
|
|
if status != 0:
|
|
self.skipTest("No -ptest packages are installed in the image")
|
|
|
|
test_log_dir = self.td.get('TEST_LOG_DIR', '')
|
|
# The TEST_LOG_DIR maybe NULL when testimage is added after
|
|
# testdata.json is generated.
|
|
if not test_log_dir:
|
|
test_log_dir = os.path.join(self.td.get('WORKDIR', ''), 'testimage')
|
|
# Make the test output path absolute, otherwise the output content will be
|
|
# created relative to current directory
|
|
if not os.path.isabs(test_log_dir):
|
|
test_log_dir = os.path.join(self.td.get('TOPDIR', ''), test_log_dir)
|
|
# Don't use self.td.get('DATETIME'), it's from testdata.json, not
|
|
# up-to-date, and may cause "File exists" when re-reun.
|
|
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
|
|
ptest_log_dir_link = os.path.join(test_log_dir, 'ptest_log')
|
|
ptest_log_dir = '%s.%s' % (ptest_log_dir_link, timestamp)
|
|
ptest_runner_log = os.path.join(ptest_log_dir, 'ptest-runner.log')
|
|
|
|
libdir = self.td.get('libdir', '')
|
|
ptest_dirs = [ '/usr/lib' ]
|
|
if not libdir in ptest_dirs:
|
|
ptest_dirs.append(libdir)
|
|
status, output = self.target.run('ptest-runner -t 450 -d \"{}\"'.format(' '.join(ptest_dirs)), 0)
|
|
os.makedirs(ptest_log_dir)
|
|
with open(ptest_runner_log, 'w') as f:
|
|
f.write(output)
|
|
|
|
# status != 0 is OK since some ptest tests may fail
|
|
self.assertTrue(status != 127, msg="Cannot execute ptest-runner!")
|
|
|
|
if not hasattr(self.tc, "extraresults"):
|
|
self.tc.extraresults = {}
|
|
extras = self.tc.extraresults
|
|
extras['ptestresult.rawlogs'] = {'log': output}
|
|
|
|
# Parse and save results
|
|
parser = PtestParser()
|
|
results, sections = parser.parse(ptest_runner_log)
|
|
parser.results_as_files(ptest_log_dir)
|
|
if os.path.exists(ptest_log_dir_link):
|
|
# Remove the old link to create a new one
|
|
os.remove(ptest_log_dir_link)
|
|
os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)
|
|
|
|
extras['ptestresult.sections'] = sections
|
|
|
|
zerolength = []
|
|
trans = str.maketrans("()", "__")
|
|
for section in results:
|
|
for test in results[section]:
|
|
result = results[section][test]
|
|
testname = "ptestresult." + (section or "No-section") + "." + "_".join(test.translate(trans).split())
|
|
extras[testname] = {'status': result}
|
|
if not results[section]:
|
|
zerolength.append(section)
|
|
|
|
failed_tests = {}
|
|
|
|
for section in sections:
|
|
if 'exitcode' in sections[section].keys():
|
|
failed_tests[section] = sections[section]["log"]
|
|
|
|
for section in results:
|
|
failed_testcases = [ "_".join(test.translate(trans).split()) for test in results[section] if results[section][test] == 'FAILED' ]
|
|
if failed_testcases:
|
|
failed_tests[section] = failed_testcases
|
|
|
|
failmsg = ""
|
|
status, output = self.target.run('dmesg | grep "Killed process"', 0)
|
|
if output:
|
|
failmsg = "ERROR: Processes were killed by the OOM Killer:\n%s\n" % output
|
|
|
|
if failed_tests:
|
|
failmsg = failmsg + "\nFailed ptests:\n%s\n" % pprint.pformat(failed_tests)
|
|
|
|
if zerolength:
|
|
failmsg = failmsg + "\nptests which had no test results:\n%s" % pprint.pformat(zerolength)
|
|
|
|
if failmsg:
|
|
self.logger.warning("There were failing ptests.")
|
|
self.fail(failmsg)
|