mirror of
https://github.com/AsahiLinux/u-boot
synced 2024-11-10 23:24:38 +00:00
patman: Move test running/reporting to test_util
This code is useful in other tools. Move it into a common file so it can be shared. Signed-off-by: Simon Glass <sjg@chromium.org>
This commit is contained in:
parent
428e773011
commit
ce0dc2edfc
2 changed files with 107 additions and 69 deletions
|
@ -11,7 +11,6 @@
|
||||||
|
|
||||||
from distutils.sysconfig import get_python_lib
|
from distutils.sysconfig import get_python_lib
|
||||||
import glob
|
import glob
|
||||||
import multiprocessing
|
|
||||||
import os
|
import os
|
||||||
import site
|
import site
|
||||||
import sys
|
import sys
|
||||||
|
@ -37,11 +36,6 @@ sys.path.append(get_python_lib())
|
||||||
|
|
||||||
import cmdline
|
import cmdline
|
||||||
import command
|
import command
|
||||||
use_concurrent = True
|
|
||||||
try:
|
|
||||||
from concurrencytest import ConcurrentTestSuite, fork_for_tests
|
|
||||||
except:
|
|
||||||
use_concurrent = False
|
|
||||||
import control
|
import control
|
||||||
import test_util
|
import test_util
|
||||||
|
|
||||||
|
@ -71,73 +65,17 @@ def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath):
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
result = unittest.TestResult()
|
result = unittest.TestResult()
|
||||||
for module in []:
|
test_name = args and args[0] or None
|
||||||
suite = doctest.DocTestSuite(module)
|
|
||||||
suite.run(result)
|
|
||||||
|
|
||||||
sys.argv = [sys.argv[0]]
|
|
||||||
if debug:
|
|
||||||
sys.argv.append('-D')
|
|
||||||
if verbosity:
|
|
||||||
sys.argv.append('-v%d' % verbosity)
|
|
||||||
if toolpath:
|
|
||||||
for path in toolpath:
|
|
||||||
sys.argv += ['--toolpath', path]
|
|
||||||
|
|
||||||
# Run the entry tests first ,since these need to be the first to import the
|
# Run the entry tests first ,since these need to be the first to import the
|
||||||
# 'entry' module.
|
# 'entry' module.
|
||||||
test_name = args and args[0] or None
|
test_util.RunTestSuites(
|
||||||
suite = unittest.TestSuite()
|
result, debug, verbosity, test_preserve_dirs, processes, test_name,
|
||||||
loader = unittest.TestLoader()
|
toolpath,
|
||||||
for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
|
[entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
|
||||||
elf_test.TestElf, image_test.TestImage,
|
elf_test.TestElf, image_test.TestImage, cbfs_util_test.TestCbfs])
|
||||||
cbfs_util_test.TestCbfs):
|
|
||||||
# Test the test module about our arguments, if it is interested
|
|
||||||
if hasattr(module, 'setup_test_args'):
|
|
||||||
setup_test_args = getattr(module, 'setup_test_args')
|
|
||||||
setup_test_args(preserve_indir=test_preserve_dirs,
|
|
||||||
preserve_outdirs=test_preserve_dirs and test_name is not None,
|
|
||||||
toolpath=toolpath, verbosity=verbosity)
|
|
||||||
if test_name:
|
|
||||||
try:
|
|
||||||
suite.addTests(loader.loadTestsFromName(test_name, module))
|
|
||||||
except AttributeError:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
suite.addTests(loader.loadTestsFromTestCase(module))
|
|
||||||
if use_concurrent and processes != 1:
|
|
||||||
concurrent_suite = ConcurrentTestSuite(suite,
|
|
||||||
fork_for_tests(processes or multiprocessing.cpu_count()))
|
|
||||||
concurrent_suite.run(result)
|
|
||||||
else:
|
|
||||||
suite.run(result)
|
|
||||||
|
|
||||||
# Remove errors which just indicate a missing test. Since Python v3.5 If an
|
return test_util.ReportResult('binman', test_name, result)
|
||||||
# ImportError or AttributeError occurs while traversing name then a
|
|
||||||
# synthetic test that raises that error when run will be returned. These
|
|
||||||
# errors are included in the errors accumulated by result.errors.
|
|
||||||
if test_name:
|
|
||||||
errors = []
|
|
||||||
for test, err in result.errors:
|
|
||||||
if ("has no attribute '%s'" % test_name) not in err:
|
|
||||||
errors.append((test, err))
|
|
||||||
result.testsRun -= 1
|
|
||||||
result.errors = errors
|
|
||||||
|
|
||||||
print(result)
|
|
||||||
for test, err in result.errors:
|
|
||||||
print(test.id(), err)
|
|
||||||
for test, err in result.failures:
|
|
||||||
print(err, result.failures)
|
|
||||||
if result.skipped:
|
|
||||||
print('%d binman test%s SKIPPED:' %
|
|
||||||
(len(result.skipped), 's' if len(result.skipped) > 1 else ''))
|
|
||||||
for skip_info in result.skipped:
|
|
||||||
print('%s: %s' % (skip_info[0], skip_info[1]))
|
|
||||||
if result.errors or result.failures:
|
|
||||||
print('binman tests FAILED')
|
|
||||||
return 1
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def GetEntryModules(include_testing=True):
|
def GetEntryModules(include_testing=True):
|
||||||
"""Get a set of entry class implementations
|
"""Get a set of entry class implementations
|
||||||
|
|
|
@ -5,13 +5,21 @@
|
||||||
|
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
import glob
|
import glob
|
||||||
|
import multiprocessing
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import unittest
|
||||||
|
|
||||||
import command
|
import command
|
||||||
|
|
||||||
from io import StringIO
|
from io import StringIO
|
||||||
|
|
||||||
|
use_concurrent = True
|
||||||
|
try:
|
||||||
|
from concurrencytest import ConcurrentTestSuite, fork_for_tests
|
||||||
|
except:
|
||||||
|
use_concurrent = False
|
||||||
|
|
||||||
|
|
||||||
def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
|
def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
|
||||||
"""Run tests and check that we get 100% coverage
|
"""Run tests and check that we get 100% coverage
|
||||||
|
@ -86,3 +94,95 @@ def capture_sys_output():
|
||||||
yield capture_out, capture_err
|
yield capture_out, capture_err
|
||||||
finally:
|
finally:
|
||||||
sys.stdout, sys.stderr = old_out, old_err
|
sys.stdout, sys.stderr = old_out, old_err
|
||||||
|
|
||||||
|
|
||||||
|
def ReportResult(toolname:str, test_name: str, result: unittest.TestResult):
|
||||||
|
"""Report the results from a suite of tests
|
||||||
|
|
||||||
|
Args:
|
||||||
|
toolname: Name of the tool that ran the tests
|
||||||
|
test_name: Name of test that was run, or None for all
|
||||||
|
result: A unittest.TestResult object containing the results
|
||||||
|
"""
|
||||||
|
# Remove errors which just indicate a missing test. Since Python v3.5 If an
|
||||||
|
# ImportError or AttributeError occurs while traversing name then a
|
||||||
|
# synthetic test that raises that error when run will be returned. These
|
||||||
|
# errors are included in the errors accumulated by result.errors.
|
||||||
|
if test_name:
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
for test, err in result.errors:
|
||||||
|
if ("has no attribute '%s'" % test_name) not in err:
|
||||||
|
errors.append((test, err))
|
||||||
|
result.testsRun -= 1
|
||||||
|
result.errors = errors
|
||||||
|
|
||||||
|
print(result)
|
||||||
|
for test, err in result.errors:
|
||||||
|
print(test.id(), err)
|
||||||
|
for test, err in result.failures:
|
||||||
|
print(err, result.failures)
|
||||||
|
if result.skipped:
|
||||||
|
print('%d binman test%s SKIPPED:' %
|
||||||
|
(len(result.skipped), 's' if len(result.skipped) > 1 else ''))
|
||||||
|
for skip_info in result.skipped:
|
||||||
|
print('%s: %s' % (skip_info[0], skip_info[1]))
|
||||||
|
if result.errors or result.failures:
|
||||||
|
print('binman tests FAILED')
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
|
||||||
|
test_name, toolpath, test_class_list):
|
||||||
|
"""Run a series of test suites and collect the results
|
||||||
|
|
||||||
|
Args:
|
||||||
|
result: A unittest.TestResult object to add the results to
|
||||||
|
debug: True to enable debugging, which shows a full stack trace on error
|
||||||
|
verbosity: Verbosity level to use (0-4)
|
||||||
|
test_preserve_dirs: True to preserve the input directory used by tests
|
||||||
|
so that it can be examined afterwards (only useful for debugging
|
||||||
|
tests). If a single test is selected (in args[0]) it also preserves
|
||||||
|
the output directory for this test. Both directories are displayed
|
||||||
|
on the command line.
|
||||||
|
processes: Number of processes to use to run tests (None=same as #CPUs)
|
||||||
|
test_name: Name of test to run, or None for all
|
||||||
|
toolpath: List of paths to use for tools
|
||||||
|
test_class_list: List of test classes to run
|
||||||
|
"""
|
||||||
|
for module in []:
|
||||||
|
suite = doctest.DocTestSuite(module)
|
||||||
|
suite.run(result)
|
||||||
|
|
||||||
|
sys.argv = [sys.argv[0]]
|
||||||
|
if debug:
|
||||||
|
sys.argv.append('-D')
|
||||||
|
if verbosity:
|
||||||
|
sys.argv.append('-v%d' % verbosity)
|
||||||
|
if toolpath:
|
||||||
|
for path in toolpath:
|
||||||
|
sys.argv += ['--toolpath', path]
|
||||||
|
|
||||||
|
suite = unittest.TestSuite()
|
||||||
|
loader = unittest.TestLoader()
|
||||||
|
for module in test_class_list:
|
||||||
|
# Test the test module about our arguments, if it is interested
|
||||||
|
if hasattr(module, 'setup_test_args'):
|
||||||
|
setup_test_args = getattr(module, 'setup_test_args')
|
||||||
|
setup_test_args(preserve_indir=test_preserve_dirs,
|
||||||
|
preserve_outdirs=test_preserve_dirs and test_name is not None,
|
||||||
|
toolpath=toolpath, verbosity=verbosity)
|
||||||
|
if test_name:
|
||||||
|
try:
|
||||||
|
suite.addTests(loader.loadTestsFromName(test_name, module))
|
||||||
|
except AttributeError:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
suite.addTests(loader.loadTestsFromTestCase(module))
|
||||||
|
if use_concurrent and processes != 1:
|
||||||
|
concurrent_suite = ConcurrentTestSuite(suite,
|
||||||
|
fork_for_tests(processes or multiprocessing.cpu_count()))
|
||||||
|
concurrent_suite.run(result)
|
||||||
|
else:
|
||||||
|
suite.run(result)
|
||||||
|
|
Loading…
Reference in a new issue