diff mbox series

[v2,10/14] patman: Move test running/reporting to test_util

Message ID 20200417180829.v2.10.I2026431586ddf537ba1edcaa2a2b727fceb2202b@changeid
State Accepted
Commit ce0dc2edfc51dba5134d50a49ac8a18667fb55b2
Headers show
Series tools: patman: Convert Python tools to use absolute imports | expand

Commit Message

Simon Glass April 18, 2020, 12:09 a.m. UTC
This code is useful in other tools. Move it into a common file so it can
be shared.

Signed-off-by: Simon Glass <sjg at chromium.org>
---

Changes in v2: None

 tools/binman/main.py      |  78 +++--------------------------
 tools/patman/test_util.py | 100 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 108 insertions(+), 70 deletions(-)

Comments

Simon Glass April 27, 2020, 3:06 a.m. UTC | #1
This code is useful in other tools. Move it into a common file so it can
be shared.

Signed-off-by: Simon Glass <sjg at chromium.org>
---

Changes in v2: None

 tools/binman/main.py      |  78 +++--------------------------
 tools/patman/test_util.py | 100 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 108 insertions(+), 70 deletions(-)

Applied to u-boot-dm, thanks!
diff mbox series

Patch

diff --git a/tools/binman/main.py b/tools/binman/main.py
index daff7ae4d3..a17764cdb0 100755
--- a/tools/binman/main.py
+++ b/tools/binman/main.py
@@ -11,7 +11,6 @@ 
 
 from distutils.sysconfig import get_python_lib
 import glob
-import multiprocessing
 import os
 import site
 import sys
@@ -37,11 +36,6 @@  sys.path.append(get_python_lib())
 
 import cmdline
 import command
-use_concurrent = True
-try:
-    from concurrencytest import ConcurrentTestSuite, fork_for_tests
-except:
-    use_concurrent = False
 import control
 import test_util
 
@@ -71,73 +65,17 @@  def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath):
     import doctest
 
     result = unittest.TestResult()
-    for module in []:
-        suite = doctest.DocTestSuite(module)
-        suite.run(result)
-
-    sys.argv = [sys.argv[0]]
-    if debug:
-        sys.argv.append('-D')
-    if verbosity:
-        sys.argv.append('-v%d' % verbosity)
-    if toolpath:
-        for path in toolpath:
-            sys.argv += ['--toolpath', path]
+    test_name = args and args[0] or None
 
     # Run the entry tests first ,since these need to be the first to import the
     # 'entry' module.
-    test_name = args and args[0] or None
-    suite = unittest.TestSuite()
-    loader = unittest.TestLoader()
-    for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
-                   elf_test.TestElf, image_test.TestImage,
-                   cbfs_util_test.TestCbfs):
-        # Test the test module about our arguments, if it is interested
-        if hasattr(module, 'setup_test_args'):
-            setup_test_args = getattr(module, 'setup_test_args')
-            setup_test_args(preserve_indir=test_preserve_dirs,
-                preserve_outdirs=test_preserve_dirs and test_name is not None,
-                toolpath=toolpath, verbosity=verbosity)
-        if test_name:
-            try:
-                suite.addTests(loader.loadTestsFromName(test_name, module))
-            except AttributeError:
-                continue
-        else:
-            suite.addTests(loader.loadTestsFromTestCase(module))
-    if use_concurrent and processes != 1:
-        concurrent_suite = ConcurrentTestSuite(suite,
-                fork_for_tests(processes or multiprocessing.cpu_count()))
-        concurrent_suite.run(result)
-    else:
-        suite.run(result)
-
-    # Remove errors which just indicate a missing test. Since Python v3.5 If an
-    # ImportError or AttributeError occurs while traversing name then a
-    # synthetic test that raises that error when run will be returned. These
-    # errors are included in the errors accumulated by result.errors.
-    if test_name:
-        errors = []
-        for test, err in result.errors:
-            if ("has no attribute '%s'" % test_name) not in err:
-                errors.append((test, err))
-            result.testsRun -= 1
-        result.errors = errors
-
-    print(result)
-    for test, err in result.errors:
-        print(test.id(), err)
-    for test, err in result.failures:
-        print(err, result.failures)
-    if result.skipped:
-        print('%d binman test%s SKIPPED:' %
-              (len(result.skipped), 's' if len(result.skipped) > 1 else ''))
-        for skip_info in result.skipped:
-            print('%s: %s' % (skip_info[0], skip_info[1]))
-    if result.errors or result.failures:
-        print('binman tests FAILED')
-        return 1
-    return 0
+    test_util.RunTestSuites(
+        result, debug, verbosity, test_preserve_dirs, processes, test_name,
+        toolpath,
+        [entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
+         elf_test.TestElf, image_test.TestImage, cbfs_util_test.TestCbfs])
+
+    return test_util.ReportResult('binman', test_name, result)
 
 def GetEntryModules(include_testing=True):
     """Get a set of entry class implementations
diff --git a/tools/patman/test_util.py b/tools/patman/test_util.py
index 6575b11c1a..bb5a298e39 100644
--- a/tools/patman/test_util.py
+++ b/tools/patman/test_util.py
@@ -5,13 +5,21 @@ 
 
 from contextlib import contextmanager
 import glob
+import multiprocessing
 import os
 import sys
+import unittest
 
 import command
 
 from io import StringIO
 
+use_concurrent = True
+try:
+    from concurrencytest import ConcurrentTestSuite, fork_for_tests
+except:
+    use_concurrent = False
+
 
 def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
     """Run tests and check that we get 100% coverage
@@ -86,3 +94,95 @@  def capture_sys_output():
         yield capture_out, capture_err
     finally:
         sys.stdout, sys.stderr = old_out, old_err
+
+
+def ReportResult(toolname:str, test_name: str, result: unittest.TestResult):
+    """Report the results from a suite of tests
+
+    Args:
+        toolname: Name of the tool that ran the tests
+        test_name: Name of test that was run, or None for all
+        result: A unittest.TestResult object containing the results
+    """
+    # Remove errors which just indicate a missing test. Since Python v3.5 If an
+    # ImportError or AttributeError occurs while traversing name then a
+    # synthetic test that raises that error when run will be returned. These
+    # errors are included in the errors accumulated by result.errors.
+    if test_name:
+        errors = []
+
+        for test, err in result.errors:
+            if ("has no attribute '%s'" % test_name) not in err:
+                errors.append((test, err))
+            result.testsRun -= 1
+        result.errors = errors
+
+    print(result)
+    for test, err in result.errors:
+        print(test.id(), err)
+    for test, err in result.failures:
+        print(err, result.failures)
+    if result.skipped:
+        print('%d binman test%s SKIPPED:' %
+              (len(result.skipped), 's' if len(result.skipped) > 1 else ''))
+        for skip_info in result.skipped:
+            print('%s: %s' % (skip_info[0], skip_info[1]))
+    if result.errors or result.failures:
+        print('binman tests FAILED')
+        return 1
+    return 0
+
+
+def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
+                  test_name, toolpath, test_class_list):
+    """Run a series of test suites and collect the results
+
+    Args:
+        result: A unittest.TestResult object to add the results to
+        debug: True to enable debugging, which shows a full stack trace on error
+        verbosity: Verbosity level to use (0-4)
+        test_preserve_dirs: True to preserve the input directory used by tests
+            so that it can be examined afterwards (only useful for debugging
+            tests). If a single test is selected (in args[0]) it also preserves
+            the output directory for this test. Both directories are displayed
+            on the command line.
+        processes: Number of processes to use to run tests (None=same as #CPUs)
+        test_name: Name of test to run, or None for all
+        toolpath: List of paths to use for tools
+        test_class_list: List of test classes to run
+    """
+    for module in []:
+        suite = doctest.DocTestSuite(module)
+        suite.run(result)
+
+    sys.argv = [sys.argv[0]]
+    if debug:
+        sys.argv.append('-D')
+    if verbosity:
+        sys.argv.append('-v%d' % verbosity)
+    if toolpath:
+        for path in toolpath:
+            sys.argv += ['--toolpath', path]
+
+    suite = unittest.TestSuite()
+    loader = unittest.TestLoader()
+    for module in test_class_list:
+        # Test the test module about our arguments, if it is interested
+        if hasattr(module, 'setup_test_args'):
+            setup_test_args = getattr(module, 'setup_test_args')
+            setup_test_args(preserve_indir=test_preserve_dirs,
+                preserve_outdirs=test_preserve_dirs and test_name is not None,
+                toolpath=toolpath, verbosity=verbosity)
+        if test_name:
+            try:
+                suite.addTests(loader.loadTestsFromName(test_name, module))
+            except AttributeError:
+                continue
+        else:
+            suite.addTests(loader.loadTestsFromTestCase(module))
+    if use_concurrent and processes != 1:
+        concurrent_suite = ConcurrentTestSuite(suite,
+                fork_for_tests(processes or multiprocessing.cpu_count()))
+        concurrent_suite.run(result)
+    else:
+        suite.run(result)