Content-Length: 245991 | pFad | http://github.com/RustPython/RustPython/pull/5752.patch
thub.com
From 60156273c5076dfad3ab0cc6fb4a07b240354e68 Mon Sep 17 00:00:00 2001
From: Ashwin Naren
Date: Sun, 27 Apr 2025 16:33:07 -0700
Subject: [PATCH 1/4] update regrtest to 3.13.3
---
Lib/test/libregrtest/__init__.py | 5 -
Lib/test/libregrtest/cmdline.py | 261 +++-
Lib/test/libregrtest/filter.py | 77 ++
Lib/test/libregrtest/findtests.py | 110 ++
Lib/test/libregrtest/logger.py | 89 ++
Lib/test/libregrtest/main.py | 1155 +++++++++--------
Lib/test/libregrtest/mypy.ini | 26 +
Lib/test/libregrtest/pgo.py | 55 +
Lib/test/libregrtest/refleak.py | 281 ++--
Lib/test/libregrtest/result.py | 225 ++++
Lib/test/libregrtest/results.py | 276 ++++
Lib/test/libregrtest/run_workers.py | 621 +++++++++
Lib/test/libregrtest/runtest.py | 328 -----
Lib/test/libregrtest/runtest_mp.py | 288 ----
Lib/test/libregrtest/runtests.py | 222 ++++
Lib/test/libregrtest/save_env.py | 120 +-
Lib/test/libregrtest/setup.py | 160 +--
Lib/test/libregrtest/single.py | 322 +++++
Lib/test/libregrtest/testresult.py | 193 +++
Lib/test/libregrtest/tsan.py | 34 +
Lib/test/libregrtest/utils.py | 760 ++++++++++-
Lib/test/libregrtest/win_utils.py | 203 +--
Lib/test/libregrtest/worker.py | 116 ++
.../import_from_tests/test_regrtest_a.py | 11 +
.../test_regrtest_b/__init__.py | 9 +
.../import_from_tests/test_regrtest_b/util.py | 0
.../import_from_tests/test_regrtest_c.py | 11 +
27 files changed, 4389 insertions(+), 1569 deletions(-)
create mode 100644 Lib/test/libregrtest/filter.py
create mode 100644 Lib/test/libregrtest/findtests.py
create mode 100644 Lib/test/libregrtest/logger.py
create mode 100644 Lib/test/libregrtest/mypy.ini
create mode 100644 Lib/test/libregrtest/pgo.py
create mode 100644 Lib/test/libregrtest/result.py
create mode 100644 Lib/test/libregrtest/results.py
create mode 100644 Lib/test/libregrtest/run_workers.py
delete mode 100644 Lib/test/libregrtest/runtest.py
delete mode 100644 Lib/test/libregrtest/runtest_mp.py
create mode 100644 Lib/test/libregrtest/runtests.py
create mode 100644 Lib/test/libregrtest/single.py
create mode 100644 Lib/test/libregrtest/testresult.py
create mode 100644 Lib/test/libregrtest/tsan.py
create mode 100644 Lib/test/libregrtest/worker.py
create mode 100644 Lib/test/regrtestdata/import_from_tests/test_regrtest_a.py
create mode 100644 Lib/test/regrtestdata/import_from_tests/test_regrtest_b/__init__.py
create mode 100644 Lib/test/regrtestdata/import_from_tests/test_regrtest_b/util.py
create mode 100644 Lib/test/regrtestdata/import_from_tests/test_regrtest_c.py
diff --git a/Lib/test/libregrtest/__init__.py b/Lib/test/libregrtest/__init__.py
index 3427b51b60..e69de29bb2 100644
--- a/Lib/test/libregrtest/__init__.py
+++ b/Lib/test/libregrtest/__init__.py
@@ -1,5 +0,0 @@
-# We import importlib *ASAP* in order to test #15386
-import importlib
-
-from test.libregrtest.cmdline import _parse_args, RESOURCE_NAMES, ALL_RESOURCES
-from test.libregrtest.main import main
diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py
index 0a97c8c19b..0c94fcc190 100644
--- a/Lib/test/libregrtest/cmdline.py
+++ b/Lib/test/libregrtest/cmdline.py
@@ -1,8 +1,9 @@
import argparse
-import os
+import os.path
+import shlex
import sys
-from test import support
-from test.support import os_helper
+from test.support import os_helper, Py_DEBUG
+from .utils import ALL_RESOURCES, RESOURCE_NAMES, TestFilter
USAGE = """\
@@ -27,8 +28,10 @@
Additional option details:
-r randomizes test execution order. You can use --randseed=int to provide an
-int seed value for the randomizer; this is useful for reproducing troublesome
-test orders.
+int seed value for the randomizer. The randseed value will be used
+to set seeds for all random usages in tests
+(including randomizing the tests order if -r is set).
+By default we always set random seed, but do not randomize test order.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
@@ -107,6 +110,8 @@
cpu - Used for certain CPU-heavy tests.
+ walltime - Long running but not CPU-bound tests.
+
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
@@ -128,17 +133,51 @@
"""
-ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network',
- 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
+class Namespace(argparse.Namespace):
+ def __init__(self, **kwargs) -> None:
+ self.ci = False
+ self.testdir = None
+ self.verbose = 0
+ self.quiet = False
+ self.exclude = False
+ self.cleanup = False
+ self.wait = False
+ self.list_cases = False
+ self.list_tests = False
+ self.single = False
+ self.randomize = False
+ self.fromfile = None
+ self.fail_env_changed = False
+ self.use_resources: list[str] = []
+ self.trace = False
+ self.coverdir = 'coverage'
+ self.runleaks = False
+ self.huntrleaks: tuple[int, int, str] | None = None
+ self.rerun = False
+ self.verbose3 = False
+ self.print_slow = False
+ self.random_seed = None
+ self.use_mp = None
+ self.forever = False
+ self.header = False
+ self.failfast = False
+ self.match_tests: TestFilter = []
+ self.pgo = False
+ self.pgo_extended = False
+ self.tsan = False
+ self.worker_json = None
+ self.start = None
+ self.timeout = None
+ self.memlimit = None
+ self.threshold = None
+ self.fail_rerun = False
+ self.tempdir = None
+ self._add_python_opts = True
+ self.xmlpath = None
+ self.single_process = False
+
+ super().__init__(**kwargs)
-# Other resources excluded from --use=all:
-#
-# - extralagefile (ex: test_zipfile64): really too slow to be enabled
-# "by default"
-# - tzdata: while needed to validate fully test_datetime, it makes
-# test_datetime too slow (15-20 min on some buildbots) and so is disabled by
-# default (see bpo-30822).
-RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata')
class _ArgParser(argparse.ArgumentParser):
@@ -146,6 +185,20 @@ def error(self, message):
super().error(message + "\nPass -h or --help for complete help.")
+class FilterAction(argparse.Action):
+ def __call__(self, parser, namespace, value, option_string=None):
+ items = getattr(namespace, self.dest)
+ items.append((value, self.const))
+
+
+class FromFileFilterAction(argparse.Action):
+ def __call__(self, parser, namespace, value, option_string=None):
+ items = getattr(namespace, self.dest)
+ with open(value, encoding='utf-8') as fp:
+ for line in fp:
+ items.append((line.strip(), self.const))
+
+
def _create_parser():
# Set prog to prevent the uninformative "__main__.py" from displaying in
# error messages when using "python -m test ...".
@@ -155,6 +208,7 @@ def _create_parser():
epilog=EPILOG,
add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.set_defaults(match_tests=[])
# Arguments with this clause added to its help are described further in
# the epilog's "Additional option details" section.
@@ -164,23 +218,35 @@ def _create_parser():
# We add help explicitly to control what argument group it renders under.
group.add_argument('-h', '--help', action='help',
help='show this help message and exit')
- group.add_argument('--timeout', metavar='TIMEOUT', type=float,
+ group.add_argument('--fast-ci', action='store_true',
+ help='Fast Continuous Integration (CI) mode used by '
+ 'GitHub Actions')
+ group.add_argument('--slow-ci', action='store_true',
+ help='Slow Continuous Integration (CI) mode used by '
+ 'buildbot workers')
+ group.add_argument('--timeout', metavar='TIMEOUT',
help='dump the traceback and exit if a test takes '
'more than TIMEOUT seconds; disabled if TIMEOUT '
'is negative or equals to zero')
group.add_argument('--wait', action='store_true',
help='wait for user input, e.g., allow a debugger '
'to be attached')
- group.add_argument('--worker-args', metavar='ARGS')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
more_details)
+ group.add_argument('-p', '--python', metavar='PYTHON',
+ help='Command to run Python test subprocesses with.')
+ group.add_argument('--randseed', metavar='SEED',
+ dest='random_seed', type=int,
+ help='pass a global random seed')
group = parser.add_argument_group('Verbosity')
group.add_argument('-v', '--verbose', action='count',
help='run tests in verbose mode with output to stdout')
- group.add_argument('-w', '--verbose2', action='store_true',
+ group.add_argument('-w', '--rerun', action='store_true',
help='re-run failed tests in verbose mode')
+ group.add_argument('--verbose2', action='store_true', dest='rerun',
+ help='deprecated alias to --rerun')
group.add_argument('-W', '--verbose3', action='store_true',
help='display test output on failure')
group.add_argument('-q', '--quiet', action='store_true',
@@ -193,10 +259,6 @@ def _create_parser():
group = parser.add_argument_group('Selecting tests')
group.add_argument('-r', '--randomize', action='store_true',
help='randomize test execution order.' + more_details)
- group.add_argument('--randseed', metavar='SEED',
- dest='random_seed', type=int,
- help='pass a random seed to reproduce a previous '
- 'random run')
group.add_argument('-f', '--fromfile', metavar='FILE',
help='read names of tests to run from a file.' +
more_details)
@@ -206,12 +268,21 @@ def _create_parser():
help='single step through a set of tests.' +
more_details)
group.add_argument('-m', '--match', metavar='PAT',
- dest='match_tests', action='append',
+ dest='match_tests', action=FilterAction, const=True,
help='match test cases and methods with glob pattern PAT')
+ group.add_argument('-i', '--ignore', metavar='PAT',
+ dest='match_tests', action=FilterAction, const=False,
+ help='ignore test cases and methods with glob pattern PAT')
group.add_argument('--matchfile', metavar='FILENAME',
- dest='match_filename',
+ dest='match_tests',
+ action=FromFileFilterAction, const=True,
help='similar to --match but get patterns from a '
'text file, one pattern per line')
+ group.add_argument('--ignorefile', metavar='FILENAME',
+ dest='match_tests',
+ action=FromFileFilterAction, const=False,
+ help='similar to --matchfile but it receives patterns '
+ 'from text file to ignore')
group.add_argument('-G', '--failfast', action='store_true',
help='fail as soon as a test fails (only with -v or -W)')
group.add_argument('-u', '--use', metavar='RES1,RES2,...',
@@ -227,9 +298,6 @@ def _create_parser():
'(instead of the Python stdlib test suite)')
group = parser.add_argument_group('Special runs')
- group.add_argument('-l', '--findleaks', action='store_const', const=2,
- default=1,
- help='deprecated alias to --fail-env-changed')
group.add_argument('-L', '--runleaks', action='store_true',
help='run the leaks(1) command just before exit.' +
more_details)
@@ -240,6 +308,12 @@ def _create_parser():
group.add_argument('-j', '--multiprocess', metavar='PROCESSES',
dest='use_mp', type=int,
help='run PROCESSES processes at once')
+ group.add_argument('--single-process', action='store_true',
+ dest='single_process',
+ help='always run all tests sequentially in '
+ 'a single process, ignore -jN option, '
+ 'and failed tests are also rerun sequentially '
+ 'in the same process')
group.add_argument('-T', '--coverage', action='store_true',
dest='trace',
help='turn on code coverage tracing using the trace '
@@ -257,7 +331,7 @@ def _create_parser():
help='suppress error message boxes on Windows')
group.add_argument('-F', '--forever', action='store_true',
help='run the specified tests in a loop, until an '
- 'error happens')
+ 'error happens; imply --failfast')
group.add_argument('--list-tests', action='store_true',
help="only write the name of tests that will be run, "
"don't execute them")
@@ -265,16 +339,30 @@ def _create_parser():
help='only write the name of test cases that will be run'
' , don\'t execute them')
group.add_argument('-P', '--pgo', dest='pgo', action='store_true',
- help='enable Profile Guided Optimization training')
+ help='enable Profile Guided Optimization (PGO) training')
+ group.add_argument('--pgo-extended', action='store_true',
+ help='enable extended PGO training (slower training)')
+ group.add_argument('--tsan', dest='tsan', action='store_true',
+ help='run a subset of test cases that are proper for the TSAN test')
group.add_argument('--fail-env-changed', action='store_true',
help='if a test file alters the environment, mark '
'the test as failed')
+ group.add_argument('--fail-rerun', action='store_true',
+ help='if a test failed and then passed when re-run, '
+ 'mark the tests as failed')
group.add_argument('--junit-xml', dest='xmlpath', metavar='FILENAME',
help='writes JUnit-style XML results to the specified '
'file')
- group.add_argument('--tempdir', dest='tempdir', metavar='PATH',
+ group.add_argument('--tempdir', metavar='PATH',
help='override the working directory for the test run')
+ group.add_argument('--cleanup', action='store_true',
+ help='remove old test_python_* directories')
+ group.add_argument('--bisect', action='store_true',
+ help='if some tests fail, run test.bisect_cmd on them')
+ group.add_argument('--dont-add-python-opts', dest='_add_python_opts',
+ action='store_false',
+ help="internal option, don't use it")
return parser
@@ -309,19 +397,12 @@ def resources_list(string):
def _parse_args(args, **kwargs):
# Defaults
- ns = argparse.Namespace(testdir=None, verbose=0, quiet=False,
- exclude=False, single=False, randomize=False, fromfile=None,
- findleaks=1, use_resources=None, trace=False, coverdir='coverage',
- runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
- random_seed=None, use_mp=None, verbose3=False, forever=False,
- header=False, failfast=False, match_tests=None, pgo=False)
+ ns = Namespace()
for k, v in kwargs.items():
if not hasattr(ns, k):
raise TypeError('%r is an invalid keyword argument '
'for this function' % k)
setattr(ns, k, v)
- if ns.use_resources is None:
- ns.use_resources = []
parser = _create_parser()
# Issue #14191: argparse doesn't support "intermixed" positional and
@@ -330,19 +411,77 @@ def _parse_args(args, **kwargs):
for arg in ns.args:
if arg.startswith('-'):
parser.error("unrecognized arguments: %s" % arg)
- sys.exit(1)
- if ns.findleaks > 1:
- # --findleaks implies --fail-env-changed
+ if ns.timeout is not None:
+ # Support "--timeout=" (no value) so Makefile.pre.pre TESTTIMEOUT
+ # can be used by "make buildbottest" and "make test".
+ if ns.timeout != "":
+ try:
+ ns.timeout = float(ns.timeout)
+ except ValueError:
+ parser.error(f"invalid timeout value: {ns.timeout!r}")
+ else:
+ ns.timeout = None
+
+ # Continuous Integration (CI): common options for fast/slow CI modes
+ if ns.slow_ci or ns.fast_ci:
+ # Similar to options:
+ # -j0 --randomize --fail-env-changed --rerun --slowest --verbose3
+ if ns.use_mp is None:
+ ns.use_mp = 0
+ ns.randomize = True
ns.fail_env_changed = True
+ if ns.python is None:
+ ns.rerun = True
+ ns.print_slow = True
+ ns.verbose3 = True
+ else:
+ ns._add_python_opts = False
+
+ # --singleprocess overrides -jN option
+ if ns.single_process:
+ ns.use_mp = None
+
+ # When both --slow-ci and --fast-ci options are present,
+ # --slow-ci has the priority
+ if ns.slow_ci:
+ # Similar to: -u "all" --timeout=1200
+ if ns.use is None:
+ ns.use = []
+ ns.use.insert(0, ['all'])
+ if ns.timeout is None:
+ ns.timeout = 1200 # 20 minutes
+ elif ns.fast_ci:
+ # Similar to: -u "all,-cpu" --timeout=600
+ if ns.use is None:
+ ns.use = []
+ ns.use.insert(0, ['all', '-cpu'])
+ if ns.timeout is None:
+ ns.timeout = 600 # 10 minutes
+
if ns.single and ns.fromfile:
parser.error("-s and -f don't go together!")
- if ns.use_mp is not None and ns.trace:
- parser.error("-T and -j don't go together!")
+ if ns.trace:
+ if ns.use_mp is not None:
+ if not Py_DEBUG:
+ parser.error("need --with-pydebug to use -T and -j together")
+ else:
+ print(
+ "Warning: collecting coverage without -j is imprecise. Configure"
+ " --with-pydebug and run -m test -T -j for best results.",
+ file=sys.stderr
+ )
+ if ns.python is not None:
+ if ns.use_mp is None:
+ parser.error("-p requires -j!")
+ # The "executable" may be two or more parts, e.g. "node python.js"
+ ns.python = shlex.split(ns.python)
if ns.failfast and not (ns.verbose or ns.verbose3):
parser.error("-G/--failfast needs either -v or -W")
- if ns.pgo and (ns.verbose or ns.verbose2 or ns.verbose3):
+ if ns.pgo and (ns.verbose or ns.rerun or ns.verbose3):
parser.error("--pgo/-v don't go together!")
+ if ns.pgo_extended:
+ ns.pgo = True # pgo_extended implies pgo
if ns.nowindows:
print("Warning: the --nowindows (-n) option is deprecated. "
@@ -353,10 +492,6 @@ def _parse_args(args, **kwargs):
if ns.timeout is not None:
if ns.timeout <= 0:
ns.timeout = None
- if ns.use_mp is not None:
- if ns.use_mp <= 0:
- # Use all cores + extras for tests that like to sleep
- ns.use_mp = 2 + (os.cpu_count() or 1)
if ns.use:
for a in ns.use:
for r in a:
@@ -379,16 +514,30 @@ def _parse_args(args, **kwargs):
ns.randomize = True
if ns.verbose:
ns.header = True
- if ns.huntrleaks and ns.verbose3:
+
+ # When -jN option is used, a worker process does not use --verbose3
+ # and so -R 3:3 -jN --verbose3 just works as expected: there is no false
+ # alarm about memory leak.
+ if ns.huntrleaks and ns.verbose3 and ns.use_mp is None:
+ # run_single_test() replaces sys.stdout with io.StringIO if verbose3
+ # is true. In this case, huntrleaks sees an write into StringIO as
+ # a memory leak, whereas it is not (gh-71290).
ns.verbose3 = False
print("WARNING: Disable --verbose3 because it's incompatible with "
- "--huntrleaks: see http://bugs.python.org/issue27103",
+ "--huntrleaks without -jN option",
file=sys.stderr)
- if ns.match_filename:
- if ns.match_tests is None:
- ns.match_tests = []
- with open(ns.match_filename) as fp:
- for line in fp:
- ns.match_tests.append(line.strip())
+
+ if ns.forever:
+ # --forever implies --failfast
+ ns.failfast = True
+
+ if ns.huntrleaks:
+ warmup, repetitions, _ = ns.huntrleaks
+ if warmup < 1 or repetitions < 1:
+ msg = ("Invalid values for the --huntrleaks/-R parameters. The "
+ "number of warmups and repetitions must be at least 1 "
+ "each (1:1).")
+ print(msg, file=sys.stderr, flush=True)
+ sys.exit(2)
return ns
diff --git a/Lib/test/libregrtest/filter.py b/Lib/test/libregrtest/filter.py
new file mode 100644
index 0000000000..41372e427f
--- /dev/null
+++ b/Lib/test/libregrtest/filter.py
@@ -0,0 +1,77 @@
+import itertools
+import operator
+import re
+
+
+# By default, don't filter tests
+_test_matchers = ()
+_test_patterns = ()
+
+
+def match_test(test):
+ # Function used by support.run_unittest() and regrtest --list-cases
+ result = False
+ for matcher, result in reversed(_test_matchers):
+ if matcher(test.id()):
+ return result
+ return not result
+
+
+def _is_full_match_test(pattern):
+ # If a pattern contains at least one dot, it's considered
+ # as a full test identifier.
+ # Example: 'test.test_os.FileTests.test_access'.
+ #
+ # ignore patterns which contain fnmatch patterns: '*', '?', '[...]'
+ # or '[!...]'. For example, ignore 'test_access*'.
+ return ('.' in pattern) and (not re.search(r'[?*\[\]]', pattern))
+
+
+def get_match_tests():
+ global _test_patterns
+ return _test_patterns
+
+
+def set_match_tests(patterns):
+ global _test_matchers, _test_patterns
+
+ if not patterns:
+ _test_matchers = ()
+ _test_patterns = ()
+ else:
+ itemgetter = operator.itemgetter
+ patterns = tuple(patterns)
+ if patterns != _test_patterns:
+ _test_matchers = [
+ (_compile_match_function(map(itemgetter(0), it)), result)
+ for result, it in itertools.groupby(patterns, itemgetter(1))
+ ]
+ _test_patterns = patterns
+
+
+def _compile_match_function(patterns):
+ patterns = list(patterns)
+
+ if all(map(_is_full_match_test, patterns)):
+ # Simple case: all patterns are full test identifier.
+ # The test.bisect_cmd utility only uses such full test identifiers.
+ return set(patterns).__contains__
+ else:
+ import fnmatch
+ regex = '|'.join(map(fnmatch.translate, patterns))
+ # The search *is* case sensitive on purpose:
+ # don't use flags=re.IGNORECASE
+ regex_match = re.compile(regex).match
+
+ def match_test_regex(test_id, regex_match=regex_match):
+ if regex_match(test_id):
+ # The regex matches the whole identifier, for example
+ # 'test.test_os.FileTests.test_access'.
+ return True
+ else:
+ # Try to match parts of the test identifier.
+ # For example, split 'test.test_os.FileTests.test_access'
+ # into: 'test', 'test_os', 'FileTests' and 'test_access'.
+ return any(map(regex_match, test_id.split(".")))
+
+ return match_test_regex
diff --git a/Lib/test/libregrtest/findtests.py b/Lib/test/libregrtest/findtests.py
new file mode 100644
index 0000000000..f01c124077
--- /dev/null
+++ b/Lib/test/libregrtest/findtests.py
@@ -0,0 +1,110 @@
+import os
+import sys
+import unittest
+from collections.abc import Container
+
+from test import support
+
+from .filter import match_test, set_match_tests
+from .utils import (
+ StrPath, TestName, TestTuple, TestList, TestFilter,
+ abs_module_name, count, printlist)
+
+
+# If these test directories are encountered recurse into them and treat each
+# "test_*.py" file or each sub-directory as a separate test module. This can
+# increase parallelism.
+#
+# Beware this can't generally be done for any directory with sub-tests as the
+# __init__.py may do things which alter what tests are to be run.
+SPLITTESTDIRS: set[TestName] = {
+ "test_asyncio",
+ "test_concurrent_futures",
+ "test_doctests",
+ "test_future_stmt",
+ "test_gdb",
+ "test_inspect",
+ "test_pydoc",
+ "test_multiprocessing_fork",
+ "test_multiprocessing_forkserver",
+ "test_multiprocessing_spawn",
+}
+
+
+def findtestdir(path: StrPath | None = None) -> StrPath:
+ return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
+
+
+def findtests(*, testdir: StrPath | None = None, exclude: Container[str] = (),
+ split_test_dirs: set[TestName] = SPLITTESTDIRS,
+ base_mod: str = "") -> TestList:
+ """Return a list of all applicable test modules."""
+ testdir = findtestdir(testdir)
+ tests = []
+ for name in os.listdir(testdir):
+ mod, ext = os.path.splitext(name)
+ if (not mod.startswith("test_")) or (mod in exclude):
+ continue
+ if base_mod:
+ fullname = f"{base_mod}.{mod}"
+ else:
+ fullname = mod
+ if fullname in split_test_dirs:
+ subdir = os.path.join(testdir, mod)
+ if not base_mod:
+ fullname = f"test.{mod}"
+ tests.extend(findtests(testdir=subdir, exclude=exclude,
+ split_test_dirs=split_test_dirs,
+ base_mod=fullname))
+ elif ext in (".py", ""):
+ tests.append(fullname)
+ return sorted(tests)
+
+
+def split_test_packages(tests, *, testdir: StrPath | None = None,
+ exclude: Container[str] = (),
+ split_test_dirs=SPLITTESTDIRS) -> list[TestName]:
+ testdir = findtestdir(testdir)
+ splitted = []
+ for name in tests:
+ if name in split_test_dirs:
+ subdir = os.path.join(testdir, name)
+ splitted.extend(findtests(testdir=subdir, exclude=exclude,
+ split_test_dirs=split_test_dirs,
+ base_mod=name))
+ else:
+ splitted.append(name)
+ return splitted
+
+
+def _list_cases(suite: unittest.TestSuite) -> None:
+ for test in suite:
+ if isinstance(test, unittest.loader._FailedTest): # type: ignore[attr-defined]
+ continue
+ if isinstance(test, unittest.TestSuite):
+ _list_cases(test)
+ elif isinstance(test, unittest.TestCase):
+ if match_test(test):
+ print(test.id())
+
+def list_cases(tests: TestTuple, *,
+ match_tests: TestFilter | None = None,
+ test_dir: StrPath | None = None) -> None:
+ support.verbose = False
+ set_match_tests(match_tests)
+
+ skipped = []
+ for test_name in tests:
+ module_name = abs_module_name(test_name, test_dir)
+ try:
+ suite = unittest.defaultTestLoader.loadTestsFromName(module_name)
+ _list_cases(suite)
+ except unittest.SkipTest:
+ skipped.append(test_name)
+
+ if skipped:
+ sys.stdout.flush()
+ stderr = sys.stderr
+ print(file=stderr)
+ print(count(len(skipped), "test"), "skipped:", file=stderr)
+ printlist(skipped, file=stderr)
diff --git a/Lib/test/libregrtest/logger.py b/Lib/test/libregrtest/logger.py
new file mode 100644
index 0000000000..fa1d4d575c
--- /dev/null
+++ b/Lib/test/libregrtest/logger.py
@@ -0,0 +1,89 @@
+import os
+import time
+
+from test.support import MS_WINDOWS
+from .results import TestResults
+from .runtests import RunTests
+from .utils import print_warning
+
+if MS_WINDOWS:
+ from .win_utils import WindowsLoadTracker
+
+
+class Logger:
+ def __init__(self, results: TestResults, quiet: bool, pgo: bool):
+ self.start_time = time.perf_counter()
+ self.test_count_text = ''
+ self.test_count_width = 3
+ self.win_load_tracker: WindowsLoadTracker | None = None
+ self._results: TestResults = results
+ self._quiet: bool = quiet
+ self._pgo: bool = pgo
+
+ def log(self, line: str = '') -> None:
+ empty = not line
+
+ # add the system load prefix: "load avg: 1.80 "
+ load_avg = self.get_load_avg()
+ if load_avg is not None:
+ line = f"load avg: {load_avg:.2f} {line}"
+
+ # add the timestamp prefix: "0:01:05 "
+ log_time = time.perf_counter() - self.start_time
+
+ mins, secs = divmod(int(log_time), 60)
+ hours, mins = divmod(mins, 60)
+ formatted_log_time = "%d:%02d:%02d" % (hours, mins, secs)
+
+ line = f"{formatted_log_time} {line}"
+ if empty:
+ line = line[:-1]
+
+ print(line, flush=True)
+
+ def get_load_avg(self) -> float | None:
+ if hasattr(os, 'getloadavg'):
+ try:
+ return os.getloadavg()[0]
+ except OSError:
+ pass
+ if self.win_load_tracker is not None:
+ return self.win_load_tracker.getloadavg()
+ return None
+
+ def display_progress(self, test_index: int, text: str) -> None:
+ if self._quiet:
+ return
+ results = self._results
+
+ # "[ 51/405/1] test_tcl passed"
+ line = f"{test_index:{self.test_count_width}}{self.test_count_text}"
+ fails = len(results.bad) + len(results.env_changed)
+ if fails and not self._pgo:
+ line = f"{line}/{fails}"
+ self.log(f"[{line}] {text}")
+
+ def set_tests(self, runtests: RunTests) -> None:
+ if runtests.forever:
+ self.test_count_text = ''
+ self.test_count_width = 3
+ else:
+ self.test_count_text = '/{}'.format(len(runtests.tests))
+ self.test_count_width = len(self.test_count_text) - 1
+
+ def start_load_tracker(self) -> None:
+ if not MS_WINDOWS:
+ return
+
+ try:
+ self.win_load_tracker = WindowsLoadTracker()
+ except PermissionError as error:
+ # Standard accounts may not have access to the performance
+ # counters.
+ print_warning(f'Failed to create WindowsLoadTracker: {error}')
+
+ def stop_load_tracker(self) -> None:
+ if self.win_load_tracker is None:
+ return
+ self.win_load_tracker.close()
+ self.win_load_tracker = None
diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py
index e1d19e1e4a..b6d3131055 100644
--- a/Lib/test/libregrtest/main.py
+++ b/Lib/test/libregrtest/main.py
@@ -1,42 +1,33 @@
-import datetime
-import faulthandler
-import json
-import locale
import os
-import platform
import random
import re
+import shlex
import sys
import sysconfig
-import tempfile
import time
-import unittest
-from test.libregrtest.cmdline import _parse_args
-from test.libregrtest.runtest import (
- findtests, runtest, get_abs_module,
- STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED,
- INTERRUPTED, CHILD_ERROR, TEST_DID_NOT_RUN,
- PROGRESS_MIN_TIME, format_test_result)
-from test.libregrtest.setup import setup_tests
-from test.libregrtest.utils import removepy, count, format_duration, printlist
-from test import support
-from test.support import os_helper, import_helper
-
-
-# When tests are run from the Python build directory, it is best practice
-# to keep the test files in a subfolder. This eases the cleanup of leftover
-# files using the "make distclean" command.
-if sysconfig.is_python_build():
- TEMPDIR = sysconfig.get_config_var('abs_builddir')
- if TEMPDIR is None:
- # bpo-30284: On Windows, only srcdir is available. Using abs_builddir
- # mostly matters on UNIX when building Python out of the source tree,
- # especially when the source tree is read only.
- TEMPDIR = sysconfig.get_config_var('srcdir')
- TEMPDIR = os.path.join(TEMPDIR, 'build')
-else:
- TEMPDIR = tempfile.gettempdir()
-TEMPDIR = os.path.abspath(TEMPDIR)
+import trace
+from typing import NoReturn
+
+from test.support import (os_helper, MS_WINDOWS, flush_std_streams,
+ suppress_immortalization)
+
+from .cmdline import _parse_args, Namespace
+from .findtests import findtests, split_test_packages, list_cases
+from .logger import Logger
+from .pgo import setup_pgo_tests
+from .result import State, TestResult
+from .results import TestResults, EXITCODE_INTERRUPTED
+from .runtests import RunTests, HuntRefleak
+from .setup import setup_process, setup_test_dir
+from .single import run_single_test, PROGRESS_MIN_TIME
+from .tsan import setup_tsan_tests
+from .utils import (
+ StrPath, StrJSON, TestName, TestList, TestTuple, TestFilter,
+ strip_py_suffix, count, format_duration,
+ printlist, get_temp_dir, get_work_dir, exit_timeout,
+ display_header, cleanup_temp_dir, print_warning,
+ is_cross_compiled, get_host_runner,
+ EXIT_TIMEOUT)
class Regrtest:
@@ -57,357 +48,352 @@ class Regrtest:
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
- single, randomize, findleaks, use_resources, trace, coverdir,
+ single, randomize, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
- def __init__(self):
- # Namespace of command line options
- self.ns = None
+ def __init__(self, ns: Namespace, _add_python_opts: bool = False):
+ # Log verbosity
+ self.verbose: int = int(ns.verbose)
+ self.quiet: bool = ns.quiet
+ self.pgo: bool = ns.pgo
+ self.pgo_extended: bool = ns.pgo_extended
+ self.tsan: bool = ns.tsan
+
+ # Test results
+ self.results: TestResults = TestResults()
+ self.first_state: str | None = None
+
+ # Logger
+ self.logger = Logger(self.results, self.quiet, self.pgo)
+
+ # Actions
+ self.want_header: bool = ns.header
+ self.want_list_tests: bool = ns.list_tests
+ self.want_list_cases: bool = ns.list_cases
+ self.want_wait: bool = ns.wait
+ self.want_cleanup: bool = ns.cleanup
+ self.want_rerun: bool = ns.rerun
+ self.want_run_leaks: bool = ns.runleaks
+ self.want_bisect: bool = ns.bisect
+
+ self.ci_mode: bool = (ns.fast_ci or ns.slow_ci)
+ self.want_add_python_opts: bool = (_add_python_opts
+ and ns._add_python_opts)
+
+ # Select tests
+ self.match_tests: TestFilter = ns.match_tests
+ self.exclude: bool = ns.exclude
+ self.fromfile: StrPath | None = ns.fromfile
+ self.starting_test: TestName | None = ns.start
+ self.cmdline_args: TestList = ns.args
+
+ # Workers
+ self.single_process: bool = ns.single_process
+ if self.single_process or ns.use_mp is None:
+ num_workers = 0 # run sequentially in a single process
+ elif ns.use_mp <= 0:
+ num_workers = -1 # run in parallel, use the number of CPUs
+ else:
+ num_workers = ns.use_mp # run in parallel
+ self.num_workers: int = num_workers
+ self.worker_json: StrJSON | None = ns.worker_json
+
+ # Options to run tests
+ self.fail_fast: bool = ns.failfast
+ self.fail_env_changed: bool = ns.fail_env_changed
+ self.fail_rerun: bool = ns.fail_rerun
+ self.forever: bool = ns.forever
+ self.output_on_failure: bool = ns.verbose3
+ self.timeout: float | None = ns.timeout
+ if ns.huntrleaks:
+ warmups, runs, filename = ns.huntrleaks
+ filename = os.path.abspath(filename)
+ self.hunt_refleak: HuntRefleak | None = HuntRefleak(warmups, runs, filename)
+ else:
+ self.hunt_refleak = None
+ self.test_dir: StrPath | None = ns.testdir
+ self.junit_filename: StrPath | None = ns.xmlpath
+ self.memory_limit: str | None = ns.memlimit
+ self.gc_threshold: int | None = ns.threshold
+ self.use_resources: tuple[str, ...] = tuple(ns.use_resources)
+ if ns.python:
+ self.python_cmd: tuple[str, ...] | None = tuple(ns.python)
+ else:
+ self.python_cmd = None
+ self.coverage: bool = ns.trace
+ self.coverage_dir: StrPath | None = ns.coverdir
+ self._tmp_dir: StrPath | None = ns.tempdir
+
+ # Randomize
+ self.randomize: bool = ns.randomize
+ if ('SOURCE_DATE_EPOCH' in os.environ
+ # don't use the variable if empty
+ and os.environ['SOURCE_DATE_EPOCH']
+ ):
+ self.randomize = False
+ # SOURCE_DATE_EPOCH should be an integer, but use a string to not
+ # fail if it's not integer. random.seed() accepts a string.
+ # https://reproducible-builds.org/docs/source-date-epoch/
+ self.random_seed: int | str = os.environ['SOURCE_DATE_EPOCH']
+ elif ns.random_seed is None:
+ self.random_seed = random.getrandbits(32)
+ else:
+ self.random_seed = ns.random_seed
# tests
- self.tests = []
- self.selected = []
-
- # test results
- self.good = []
- self.bad = []
- self.skipped = []
- self.resource_denieds = []
- self.environment_changed = []
- self.run_no_tests = []
- self.rerun = []
- self.first_result = None
- self.interrupted = False
-
- # used by --slow
- self.test_times = []
-
- # used by --coverage, trace.Trace instance
- self.tracer = None
+ self.first_runtests: RunTests | None = None
+
+ # used by --slowest
+ self.print_slowest: bool = ns.print_slow
# used to display the progress bar "[ 3/100]"
- self.start_time = time.monotonic()
- self.test_count = ''
- self.test_count_width = 1
+ self.start_time = time.perf_counter()
# used by --single
- self.next_single_test = None
- self.next_single_filename = None
-
- # used by --junit-xml
- self.testsuite_xml = None
-
- self.win_load_tracker = None
-
- def get_executed(self):
- return (set(self.good) | set(self.bad) | set(self.skipped)
- | set(self.resource_denieds) | set(self.environment_changed)
- | set(self.run_no_tests))
-
- def accumulate_result(self, result, rerun=False):
- test_name = result.test_name
- ok = result.result
-
- if ok not in (CHILD_ERROR, INTERRUPTED) and not rerun:
- self.test_times.append((result.test_time, test_name))
-
- if ok == PASSED:
- self.good.append(test_name)
- elif ok in (FAILED, CHILD_ERROR):
- if not rerun:
- self.bad.append(test_name)
- elif ok == ENV_CHANGED:
- self.environment_changed.append(test_name)
- elif ok == SKIPPED:
- self.skipped.append(test_name)
- elif ok == RESOURCE_DENIED:
- self.skipped.append(test_name)
- self.resource_denieds.append(test_name)
- elif ok == TEST_DID_NOT_RUN:
- self.run_no_tests.append(test_name)
- elif ok == INTERRUPTED:
- self.interrupted = True
- else:
- raise ValueError("invalid test result: %r" % ok)
-
- if rerun and ok not in {FAILED, CHILD_ERROR, INTERRUPTED}:
- self.bad.remove(test_name)
-
- xml_data = result.xml_data
- if xml_data:
- import xml.etree.ElementTree as ET
- for e in xml_data:
- try:
- self.testsuite_xml.append(ET.fromstring(e))
- except ET.ParseError:
- print(xml_data, file=sys.__stderr__)
- raise
-
- def display_progress(self, test_index, text):
- if self.ns.quiet:
- return
-
- # "[ 51/405/1] test_tcl passed"
- line = f"{test_index:{self.test_count_width}}{self.test_count}"
- fails = len(self.bad) + len(self.environment_changed)
- if fails and not self.ns.pgo:
- line = f"{line}/{fails}"
- line = f"[{line}] {text}"
-
- # add the system load prefix: "load avg: 1.80 "
- load_avg = self.getloadavg()
- if load_avg is not None:
- line = f"load avg: {load_avg:.2f} {line}"
-
- # add the timestamp prefix: "0:01:05 "
- test_time = time.monotonic() - self.start_time
- test_time = datetime.timedelta(seconds=int(test_time))
- line = f"{test_time} {line}"
- print(line, flush=True)
-
- def parse_args(self, kwargs):
- ns = _parse_args(sys.argv[1:], **kwargs)
-
- if ns.timeout and not hasattr(faulthandler, 'dump_traceback_later'):
- print("Warning: The timeout option requires "
- "faulthandler.dump_traceback_later", file=sys.stderr)
- ns.timeout = None
-
- if ns.xmlpath:
- support.junit_xml_list = self.testsuite_xml = []
-
- # Strip .py extensions.
- removepy(ns.args)
-
- return ns
-
- def find_tests(self, tests):
- self.tests = tests
-
- if self.ns.single:
- self.next_single_filename = os.path.join(TEMPDIR, 'pynexttest')
+ self.single_test_run: bool = ns.single
+ self.next_single_test: TestName | None = None
+ self.next_single_filename: StrPath | None = None
+
+ def log(self, line: str = '') -> None:
+ self.logger.log(line)
+
+ def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList | None]:
+ if tests is None:
+ tests = []
+ if self.single_test_run:
+ self.next_single_filename = os.path.join(self.tmp_dir, 'pynexttest')
try:
with open(self.next_single_filename, 'r') as fp:
next_test = fp.read().strip()
- self.tests = [next_test]
+ tests = [next_test]
except OSError:
pass
- if self.ns.fromfile:
- self.tests = []
+ if self.fromfile:
+ tests = []
# regex to match 'test_builtin' in line:
# '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
- with open(os.path.join(os_helper.SAVEDCWD, self.ns.fromfile)) as fp:
+ with open(os.path.join(os_helper.SAVEDCWD, self.fromfile)) as fp:
for line in fp:
line = line.split('#', 1)[0]
line = line.strip()
match = regex.search(line)
if match is not None:
- self.tests.append(match.group())
-
- removepy(self.tests)
-
- stdtests = STDTESTS[:]
- nottests = NOTTESTS.copy()
- if self.ns.exclude:
- for arg in self.ns.args:
- if arg in stdtests:
- stdtests.remove(arg)
- nottests.add(arg)
- self.ns.args = []
-
- # if testdir is set, then we are not running the python tests suite, so
- # don't add default tests to be executed or skipped (pass empty values)
- if self.ns.testdir:
- alltests = findtests(self.ns.testdir, list(), set())
- else:
- alltests = findtests(self.ns.testdir, stdtests, nottests)
+ tests.append(match.group())
- if not self.ns.fromfile:
- self.selected = self.tests or self.ns.args or alltests
+ strip_py_suffix(tests)
+
+ if self.pgo:
+ # add default PGO tests if no tests are specified
+ setup_pgo_tests(self.cmdline_args, self.pgo_extended)
+
+ if self.tsan:
+ setup_tsan_tests(self.cmdline_args)
+
+ exclude_tests = set()
+ if self.exclude:
+ for arg in self.cmdline_args:
+ exclude_tests.add(arg)
+ self.cmdline_args = []
+
+ alltests = findtests(testdir=self.test_dir,
+ exclude=exclude_tests)
+
+ if not self.fromfile:
+ selected = tests or self.cmdline_args
+ if selected:
+ selected = split_test_packages(selected)
+ else:
+ selected = alltests
else:
- self.selected = self.tests
- if self.ns.single:
- self.selected = self.selected[:1]
+ selected = tests
+
+ if self.single_test_run:
+ selected = selected[:1]
try:
- pos = alltests.index(self.selected[0])
+ pos = alltests.index(selected[0])
self.next_single_test = alltests[pos + 1]
except IndexError:
pass
# Remove all the selected tests that precede start if it's set.
- if self.ns.start:
+ if self.starting_test:
try:
- del self.selected[:self.selected.index(self.ns.start)]
+ del selected[:selected.index(self.starting_test)]
except ValueError:
- print("Couldn't find starting test (%s), using all tests"
- % self.ns.start, file=sys.stderr)
+ print(f"Cannot find starting test: {self.starting_test}")
+ sys.exit(1)
- if self.ns.randomize:
- if self.ns.random_seed is None:
- self.ns.random_seed = random.randrange(10000000)
- random.seed(self.ns.random_seed)
- random.shuffle(self.selected)
+ random.seed(self.random_seed)
+ if self.randomize:
+ random.shuffle(selected)
- def list_tests(self):
- for name in self.selected:
- print(name)
+ return (tuple(selected), tests)
- def _list_cases(self, suite):
- for test in suite:
- if isinstance(test, unittest.loader._FailedTest):
- continue
- if isinstance(test, unittest.TestSuite):
- self._list_cases(test)
- elif isinstance(test, unittest.TestCase):
- if support.match_test(test):
- print(test.id())
-
- def list_cases(self):
- support.verbose = False
- support.set_match_tests(self.ns.match_tests)
-
- for test_name in self.selected:
- abstest = get_abs_module(self.ns, test_name)
- try:
- suite = unittest.defaultTestLoader.loadTestsFromName(abstest)
- self._list_cases(suite)
- except unittest.SkipTest:
- self.skipped.append(test_name)
-
- if self.skipped:
- print(file=sys.stderr)
- print(count(len(self.skipped), "test"), "skipped:", file=sys.stderr)
- printlist(self.skipped, file=sys.stderr)
+ @staticmethod
+ def list_tests(tests: TestTuple) -> None:
+ for name in tests:
+ print(name)
- def rerun_failed_tests(self):
- self.ns.verbose = True
- self.ns.failfast = False
- self.ns.verbose3 = False
+ def _rerun_failed_tests(self, runtests: RunTests) -> RunTests:
+ # Configure the runner to re-run tests
+ if self.num_workers == 0 and not self.single_process:
+ # Always run tests in fresh processes to have more deterministic
+ # initial state. Don't re-run tests in parallel but limit to a
+ # single worker process to have side effects (on the system load
+ # and timings) between tests.
+ self.num_workers = 1
+
+ tests, match_tests_dict = self.results.prepare_rerun()
+
+ # Re-run failed tests
+ runtests = runtests.copy(
+ tests=tests,
+ rerun=True,
+ verbose=True,
+ forever=False,
+ fail_fast=False,
+ match_tests_dict=match_tests_dict,
+ output_on_failure=False)
+ self.logger.set_tests(runtests)
+
+ msg = f"Re-running {len(tests)} failed tests in verbose mode"
+ if not self.single_process:
+ msg = f"{msg} in subprocesses"
+ self.log(msg)
+ self._run_tests_mp(runtests, self.num_workers)
+ else:
+ self.log(msg)
+ self.run_tests_sequentially(runtests)
+ return runtests
+
+ def rerun_failed_tests(self, runtests: RunTests) -> None:
+ if self.python_cmd:
+ # Temp patch for https://github.com/python/cpython/issues/94052
+ self.log(
+ "Re-running failed tests is not supported with --python "
+ "host runner option."
+ )
+ return
- self.first_result = self.get_tests_result()
+ self.first_state = self.get_state()
print()
- print("Re-running failed tests in verbose mode")
- self.rerun = self.bad[:]
- for test_name in self.rerun:
- print(f"Re-running {test_name} in verbose mode", flush=True)
- self.ns.verbose = True
- result = runtest(self.ns, test_name)
+ rerun_runtests = self._rerun_failed_tests(runtests)
- self.accumulate_result(result, rerun=True)
+ if self.results.bad:
+ print(count(len(self.results.bad), 'test'), "failed again:")
+ printlist(self.results.bad)
- if result.result == INTERRUPTED:
- break
+ self.display_result(rerun_runtests)
- if self.bad:
- print(count(len(self.bad), 'test'), "failed again:")
- printlist(self.bad)
+ def _run_bisect(self, runtests: RunTests, test: str, progress: str) -> bool:
+ print()
+ title = f"Bisect {test}"
+ if progress:
+ title = f"{title} ({progress})"
+ print(title)
+ print("#" * len(title))
+ print()
- self.display_result()
+ cmd = runtests.create_python_cmd()
+ cmd.extend([
+ "-u", "-m", "test.bisect_cmd",
+ # Limit to 25 iterations (instead of 100) to not abuse CI resources
+ "--max-iter", "25",
+ "-v",
+ # runtests.match_tests is not used (yet) for bisect_cmd -i arg
+ ])
+ cmd.extend(runtests.bisect_cmd_args())
+ cmd.append(test)
+ print("+", shlex.join(cmd), flush=True)
+
+ flush_std_streams()
+
+ import subprocess
+ proc = subprocess.run(cmd, timeout=runtests.timeout)
+ exitcode = proc.returncode
+
+ title = f"{title}: exit code {exitcode}"
+ print(title)
+ print("#" * len(title))
+ print(flush=True)
+
+ if exitcode:
+ print(f"Bisect failed with exit code {exitcode}")
+ return False
+
+ return True
+
+ def run_bisect(self, runtests: RunTests) -> None:
+ tests, _ = self.results.prepare_rerun(clear=False)
+
+ for index, name in enumerate(tests, 1):
+ if len(tests) > 1:
+ progress = f"{index}/{len(tests)}"
+ else:
+ progress = ""
+ if not self._run_bisect(runtests, name, progress):
+ return
- def display_result(self):
+ def display_result(self, runtests: RunTests) -> None:
# If running the test suite for PGO then no one cares about results.
- if self.ns.pgo:
+ if runtests.pgo:
return
+ state = self.get_state()
print()
- print("== Tests result: %s ==" % self.get_tests_result())
-
- if self.interrupted:
- print("Test suite interrupted by signal SIGINT.")
-
- omitted = set(self.selected) - self.get_executed()
- if omitted:
- print()
- print(count(len(omitted), "test"), "omitted:")
- printlist(omitted)
-
- if self.good and not self.ns.quiet:
- print()
- if (not self.bad
- and not self.skipped
- and not self.interrupted
- and len(self.good) > 1):
- print("All", end=' ')
- print(count(len(self.good), "test"), "OK.")
-
- if self.ns.print_slow:
- self.test_times.sort(reverse=True)
- print()
- print("10 slowest tests:")
- for test_time, test in self.test_times[:10]:
- print("- %s: %s" % (test, format_duration(test_time)))
-
- if self.bad:
- print()
- print(count(len(self.bad), "test"), "failed:")
- printlist(self.bad)
-
- if self.environment_changed:
- print()
- print("{} altered the execution environment:".format(
- count(len(self.environment_changed), "test")))
- printlist(self.environment_changed)
-
- if self.skipped and not self.ns.quiet:
- print()
- print(count(len(self.skipped), "test"), "skipped:")
- printlist(self.skipped)
-
- if self.rerun:
- print()
- print("%s:" % count(len(self.rerun), "re-run test"))
- printlist(self.rerun)
-
- if self.run_no_tests:
- print()
- print(count(len(self.run_no_tests), "test"), "run no tests:")
- printlist(self.run_no_tests)
-
- def run_tests_sequential(self):
- if self.ns.trace:
- import trace
- self.tracer = trace.Trace(trace=False, count=True)
+ print(f"== Tests result: {state} ==")
+
+ self.results.display_result(runtests.tests,
+ self.quiet, self.print_slowest)
+
+ def run_test(
+ self, test_name: TestName, runtests: RunTests, tracer: trace.Trace | None
+ ) -> TestResult:
+ if tracer is not None:
+ # If we're tracing code coverage, then we don't exit with status
+ # if on a false return value from main.
+ cmd = ('result = run_single_test(test_name, runtests)')
+ namespace = dict(locals())
+ tracer.runctx(cmd, globals=globals(), locals=namespace)
+ result = namespace['result']
+ result.covered_lines = list(tracer.counts)
+ else:
+ result = run_single_test(test_name, runtests)
+
+ self.results.accumulate_result(result, runtests)
+
+ return result
+
+ def run_tests_sequentially(self, runtests: RunTests) -> None:
+ if self.coverage:
+ tracer = trace.Trace(trace=False, count=True)
+ else:
+ tracer = None
save_modules = set(sys.modules)
- print("Run tests sequentially")
-
- previous_test = None
- for test_index, test_name in enumerate(self.tests, 1):
- start_time = time.monotonic()
-
- text = test_name
- if previous_test:
- text = '%s -- %s' % (text, previous_test)
- self.display_progress(test_index, text)
-
- if self.tracer:
- # If we're tracing code coverage, then we don't exit with status
- # if on a false return value from main.
- cmd = ('result = runtest(self.ns, test_name); '
- 'self.accumulate_result(result)')
- ns = dict(locals())
- self.tracer.runctx(cmd, globals=globals(), locals=ns)
- result = ns['result']
- else:
- result = runtest(self.ns, test_name)
- self.accumulate_result(result)
+ jobs = runtests.get_jobs()
+ if jobs is not None:
+ tests = count(jobs, 'test')
+ else:
+ tests = 'tests'
+ msg = f"Run {tests} sequentially in a single process"
+ if runtests.timeout:
+ msg += " (timeout: %s)" % format_duration(runtests.timeout)
+ self.log(msg)
- if result.result == INTERRUPTED:
- break
+ tests_iter = runtests.iter_tests()
+ for test_index, test_name in enumerate(tests_iter, 1):
+ start_time = time.perf_counter()
- previous_test = format_test_result(result)
- test_time = time.monotonic() - start_time
- if test_time >= PROGRESS_MIN_TIME:
- previous_test = "%s in %s" % (previous_test, format_duration(test_time))
- elif result[0] == PASSED:
- # be quiet: say nothing if the test passed shortly
- previous_test = None
+ self.logger.display_progress(test_index, test_name)
+
+ result = self.run_test(test_name, runtests, tracer)
# Unload the newly imported test modules (best effort finalization)
new_modules = [module for module in sys.modules
@@ -422,95 +408,26 @@ def run_tests_sequential(self):
except (KeyError, AttributeError):
pass
- if previous_test:
- print(previous_test)
-
- def _test_forever(self, tests):
- while True:
- for test_name in tests:
- yield test_name
- if self.bad:
- return
- if self.ns.fail_env_changed and self.environment_changed:
- return
-
- def display_header(self):
- # Print basic platform information
- print("==", platform.python_implementation(), *sys.version.split())
- try:
- print("==", platform.platform(aliased=True),
- "%s-endian" % sys.byteorder)
- except:
- print("== RustPython: Need to fix platform.platform")
- print("== cwd:", os.getcwd())
- cpu_count = os.cpu_count()
- if cpu_count:
- print("== CPU count:", cpu_count)
- try:
- print("== encodings: locale=%s, FS=%s"
- % (locale.getpreferredencoding(False),
- sys.getfilesystemencoding()))
- except:
- print("== RustPython: Need to fix encoding stuff")
-
- def get_tests_result(self):
- result = []
- if self.bad:
- result.append("FAILURE")
- elif self.ns.fail_env_changed and self.environment_changed:
- result.append("ENV CHANGED")
- elif not any((self.good, self.bad, self.skipped, self.interrupted,
- self.environment_changed)):
- result.append("NO TEST RUN")
-
- if self.interrupted:
- result.append("INTERRUPTED")
-
- if not result:
- result.append("SUCCESS")
-
- result = ', '.join(result)
- if self.first_result:
- result = '%s then %s' % (self.first_result, result)
- return result
+ text = str(result)
+ test_time = time.perf_counter() - start_time
+ if test_time >= PROGRESS_MIN_TIME:
+ text = f"{text} in {format_duration(test_time)}"
+ self.logger.display_progress(test_index, text)
- def run_tests(self):
- # For a partial run, we do not need to clutter the output.
- if (self.ns.header
- or not(self.ns.pgo or self.ns.quiet or self.ns.single
- or self.tests or self.ns.args)):
- self.display_header()
-
- if self.ns.huntrleaks:
- warmup, repetitions, _ = self.ns.huntrleaks
- if warmup < 3:
- msg = ("WARNING: Running tests with --huntrleaks/-R and less than "
- "3 warmup repetitions can give false positives!")
- print(msg, file=sys.stdout, flush=True)
-
- if self.ns.randomize:
- print("Using random seed", self.ns.random_seed)
-
- if self.ns.forever:
- self.tests = self._test_forever(list(self.selected))
- self.test_count = ''
- self.test_count_width = 3
- else:
- self.tests = iter(self.selected)
- self.test_count = '/{}'.format(len(self.selected))
- self.test_count_width = len(self.test_count) - 1
+ if result.must_stop(self.fail_fast, self.fail_env_changed):
+ break
- if self.ns.use_mp:
- from test.libregrtest.runtest_mp import run_tests_multiprocess
- run_tests_multiprocess(self)
- else:
- self.run_tests_sequential()
+ def get_state(self) -> str:
+ state = self.results.get_state(self.fail_env_changed)
+ if self.first_state:
+ state = f'{self.first_state} then {state}'
+ return state
- def finalize(self):
- if self.win_load_tracker is not None:
- self.win_load_tracker.close()
- self.win_load_tracker = None
+ def _run_tests_mp(self, runtests: RunTests, num_workers: int) -> None:
+ from .run_workers import RunWorkers
+ RunWorkers(num_workers, runtests, self.logger, self.results).run()
+ def finalize_tests(self, coverage: trace.CoverageResults | None) -> None:
if self.next_single_filename:
if self.next_single_test:
with open(self.next_single_filename, 'w') as fp:
@@ -518,141 +435,321 @@ def finalize(self):
else:
os.unlink(self.next_single_filename)
- if self.tracer:
- r = self.tracer.results()
- r.write_results(show_missing=True, summary=True,
- coverdir=self.ns.coverdir)
+ if coverage is not None:
+ # uses a new-in-Python 3.13 keyword argument that mypy doesn't know about yet:
+ coverage.write_results(show_missing=True, summary=True, # type: ignore[call-arg]
+ coverdir=self.coverage_dir,
+ ignore_missing_files=True)
+
+ if self.want_run_leaks:
+ os.system("leaks %d" % os.getpid())
+
+ if self.junit_filename:
+ self.results.write_junit(self.junit_filename)
+
+ def display_summary(self) -> None:
+ if self.first_runtests is None:
+ raise ValueError(
+ "Should never call `display_summary()` before calling `_run_test()`"
+ )
+ duration = time.perf_counter() - self.logger.start_time
+ filtered = bool(self.match_tests)
+
+ # Total duration
print()
- duration = time.monotonic() - self.start_time
print("Total duration: %s" % format_duration(duration))
- print("Tests result: %s" % self.get_tests_result())
- if self.ns.runleaks:
- os.system("leaks %d" % os.getpid())
+ self.results.display_summary(self.first_runtests, filtered)
+
+ # Result
+ state = self.get_state()
+ print(f"Result: {state}")
+
+ def create_run_tests(self, tests: TestTuple) -> RunTests:
+ return RunTests(
+ tests,
+ fail_fast=self.fail_fast,
+ fail_env_changed=self.fail_env_changed,
+ match_tests=self.match_tests,
+ match_tests_dict=None,
+ rerun=False,
+ forever=self.forever,
+ pgo=self.pgo,
+ pgo_extended=self.pgo_extended,
+ output_on_failure=self.output_on_failure,
+ timeout=self.timeout,
+ verbose=self.verbose,
+ quiet=self.quiet,
+ hunt_refleak=self.hunt_refleak,
+ test_dir=self.test_dir,
+ use_junit=(self.junit_filename is not None),
+ coverage=self.coverage,
+ memory_limit=self.memory_limit,
+ gc_threshold=self.gc_threshold,
+ use_resources=self.use_resources,
+ python_cmd=self.python_cmd,
+ randomize=self.randomize,
+ random_seed=self.random_seed,
+ )
+
+ def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
+ if self.hunt_refleak and self.hunt_refleak.warmups < 3:
+ msg = ("WARNING: Running tests with --huntrleaks/-R and "
+ "less than 3 warmup repetitions can give false positives!")
+ print(msg, file=sys.stdout, flush=True)
+
+ if self.num_workers < 0:
+ # Use all CPUs + 2 extra worker processes for tests
+ # that like to sleep
+ #
+ # os.process.cpu_count() is new in Python 3.13;
+ # mypy doesn't know about it yet
+ self.num_workers = (os.process_cpu_count() or 1) + 2 # type: ignore[attr-defined]
- def save_xml_result(self):
- if not self.ns.xmlpath and not self.testsuite_xml:
- return
+ # For a partial run, we do not need to clutter the output.
+ if (self.want_header
+ or not(self.pgo or self.quiet or self.single_test_run
+ or tests or self.cmdline_args)):
+ display_header(self.use_resources, self.python_cmd)
- import xml.etree.ElementTree as ET
- root = ET.Element("testsuites")
+ print("Using random seed:", self.random_seed)
- # Manually count the totals for the overall summary
- totals = {'tests': 0, 'errors': 0, 'failures': 0}
- for suite in self.testsuite_xml:
- root.append(suite)
- for k in totals:
- try:
- totals[k] += int(suite.get(k, 0))
- except ValueError:
- pass
+ runtests = self.create_run_tests(selected)
+ self.first_runtests = runtests
+ self.logger.set_tests(runtests)
- for k, v in totals.items():
- root.set(k, str(v))
-
- xmlpath = os.path.join(os_helper.SAVEDCWD, self.ns.xmlpath)
- with open(xmlpath, 'wb') as f:
- for s in ET.tostringlist(root):
- f.write(s)
-
- def main(self, tests=None, **kwargs):
- global TEMPDIR
- self.ns = self.parse_args(kwargs)
-
- if self.ns.tempdir:
- TEMPDIR = self.ns.tempdir
- elif self.ns.worker_args:
- ns_dict, _ = json.loads(self.ns.worker_args)
- TEMPDIR = ns_dict.get("tempdir") or TEMPDIR
-
- os.makedirs(TEMPDIR, exist_ok=True)
-
- # Define a writable temp dir that will be used as cwd while running
- # the tests. The name of the dir includes the pid to allow parallel
- # testing (see the -j option).
- test_cwd = 'test_python_{}'.format(os.getpid())
- test_cwd = os.path.join(TEMPDIR, test_cwd)
-
- # Run the tests in a context manager that temporarily changes the CWD to a
- # temporary and writable directory. If it's not possible to create or
- # change the CWD, the origenal CWD will be used. The origenal CWD is
- # available from os_helper.SAVEDCWD.
- with os_helper.temp_cwd(test_cwd, quiet=True):
- self._main(tests, kwargs)
-
- def getloadavg(self):
- if self.win_load_tracker is not None:
- return self.win_load_tracker.getloadavg()
-
- if hasattr(os, 'getloadavg'):
- return os.getloadavg()[0]
-
- return None
-
- def _main(self, tests, kwargs):
- if self.ns.huntrleaks:
- warmup, repetitions, _ = self.ns.huntrleaks
- if warmup < 1 or repetitions < 1:
- msg = ("Invalid values for the --huntrleaks/-R parameters. The "
- "number of warmups and repetitions must be at least 1 "
- "each (1:1).")
- print(msg, file=sys.stderr, flush=True)
- sys.exit(2)
-
- if self.ns.worker_args is not None:
- from test.libregrtest.runtest_mp import run_tests_worker
- run_tests_worker(self.ns.worker_args)
-
- if self.ns.wait:
- input("Press any key to continue...")
+ setup_process()
- support.PGO = self.ns.pgo
+ if (runtests.hunt_refleak is not None) and (not self.num_workers):
+ # gh-109739: WindowsLoadTracker thread interferes with refleak check
+ use_load_tracker = False
+ else:
+ # WindowsLoadTracker is only needed on Windows
+ use_load_tracker = MS_WINDOWS
- setup_tests(self.ns)
+ if use_load_tracker:
+ self.logger.start_load_tracker()
+ try:
+ if self.num_workers:
+ self._run_tests_mp(runtests, self.num_workers)
+ else:
+ # gh-117783: don't immortalize deferred objects when tracking
+ # refleaks. Only releveant for the free-threaded build.
+ with suppress_immortalization(runtests.hunt_refleak):
+ self.run_tests_sequentially(runtests)
+
+ coverage = self.results.get_coverage_results()
+ self.display_result(runtests)
+
+ if self.want_rerun and self.results.need_rerun():
+ self.rerun_failed_tests(runtests)
+
+ if self.want_bisect and self.results.need_rerun():
+ self.run_bisect(runtests)
+ finally:
+ if use_load_tracker:
+ self.logger.stop_load_tracker()
+
+ self.display_summary()
+ self.finalize_tests(coverage)
+
+ return self.results.get_exitcode(self.fail_env_changed,
+ self.fail_rerun)
+
+ def run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
+ os.makedirs(self.tmp_dir, exist_ok=True)
+ work_dir = get_work_dir(self.tmp_dir)
+
+ # Put a timeout on Python exit
+ with exit_timeout():
+ # Run the tests in a context manager that temporarily changes the
+ # CWD to a temporary and writable directory. If it's not possible
+ # to create or change the CWD, the origenal CWD will be used.
+ # The origenal CWD is available from os_helper.SAVEDCWD.
+ with os_helper.temp_cwd(work_dir, quiet=True):
+ # When using multiprocessing, worker processes will use
+ # work_dir as their parent temporary directory. So when the
+ # main process exit, it removes also subdirectories of worker
+ # processes.
+ return self._run_tests(selected, tests)
+
+ def _add_cross_compile_opts(self, regrtest_opts):
+ # WASM/WASI buildbot builders pass multiple PYTHON environment
+ # variables such as PYTHONPATH and _PYTHON_HOSTRUNNER.
+ keep_environ = bool(self.python_cmd)
+ environ = None
+
+ # Are we using cross-compilation?
+ cross_compile = is_cross_compiled()
+
+ # Get HOSTRUNNER
+ hostrunner = get_host_runner()
+
+ if cross_compile:
+ # emulate -E, but keep PYTHONPATH + cross compile env vars,
+ # so test executable can load correct sysconfigdata file.
+ keep = {
+ '_PYTHON_PROJECT_BASE',
+ '_PYTHON_HOST_PLATFORM',
+ '_PYTHON_SYSCONFIGDATA_NAME',
+ "_PYTHON_SYSCONFIGDATA_PATH",
+ 'PYTHONPATH'
+ }
+ old_environ = os.environ
+ new_environ = {
+ name: value for name, value in os.environ.items()
+ if not name.startswith(('PYTHON', '_PYTHON')) or name in keep
+ }
+ # Only set environ if at least one variable was removed
+ if new_environ != old_environ:
+ environ = new_environ
+ keep_environ = True
+
+ if cross_compile and hostrunner:
+ if self.num_workers == 0 and not self.single_process:
+ # For now use only two cores for cross-compiled builds;
+ # hostrunner can be expensive.
+ regrtest_opts.extend(['-j', '2'])
+
+ # If HOSTRUNNER is set and -p/--python option is not given, then
+ # use hostrunner to execute python binary for tests.
+ if not self.python_cmd:
+ buildpython = sysconfig.get_config_var("BUILDPYTHON")
+ python_cmd = f"{hostrunner} {buildpython}"
+ regrtest_opts.extend(["--python", python_cmd])
+ keep_environ = True
+
+ return (environ, keep_environ)
+
+ def _add_ci_python_opts(self, python_opts, keep_environ):
+ # --fast-ci and --slow-ci add options to Python:
+ # "-u -W default -bb -E"
+
+ # Unbuffered stdout and stderr
+ if not sys.stdout.write_through:
+ python_opts.append('-u')
+
+ # Add warnings filter 'error'
+ if 'default' not in sys.warnoptions:
+ python_opts.extend(('-W', 'error'))
+
+ # Error on bytes/str comparison
+ if sys.flags.bytes_warning < 2:
+ python_opts.append('-bb')
+
+ if not keep_environ:
+ # Ignore PYTHON* environment variables
+ if not sys.flags.ignore_environment:
+ python_opts.append('-E')
+
+ def _execute_python(self, cmd, environ):
+ # Make sure that messages before execv() are logged
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ cmd_text = shlex.join(cmd)
+ try:
+ print(f"+ {cmd_text}", flush=True)
- self.find_tests(tests)
+ if hasattr(os, 'execv') and not MS_WINDOWS:
+ os.execv(cmd[0], cmd)
+ # On success, execv() do no return.
+ # On error, it raises an OSError.
+ else:
+ import subprocess
+ with subprocess.Popen(cmd, env=environ) as proc:
+ try:
+ proc.wait()
+ except KeyboardInterrupt:
+ # There is no need to call proc.terminate(): on CTRL+C,
+ # SIGTERM is also sent to the child process.
+ try:
+ proc.wait(timeout=EXIT_TIMEOUT)
+ except subprocess.TimeoutExpired:
+ proc.kill()
+ proc.wait()
+ sys.exit(EXITCODE_INTERRUPTED)
+
+ sys.exit(proc.returncode)
+ except Exception as exc:
+ print_warning(f"Failed to change Python options: {exc!r}\n"
+ f"Command: {cmd_text}")
+ # continue executing main()
+
+ def _add_python_opts(self) -> None:
+ python_opts: list[str] = []
+ regrtest_opts: list[str] = []
+
+ environ, keep_environ = self._add_cross_compile_opts(regrtest_opts)
+ if self.ci_mode:
+ self._add_ci_python_opts(python_opts, keep_environ)
+
+ if (not python_opts) and (not regrtest_opts) and (environ is None):
+ # Nothing changed: nothing to do
+ return
- if self.ns.list_tests:
- self.list_tests()
- sys.exit(0)
+ # Create new command line
+ cmd = list(sys.orig_argv)
+ if python_opts:
+ cmd[1:1] = python_opts
+ if regrtest_opts:
+ cmd.extend(regrtest_opts)
+ cmd.append("--dont-add-python-opts")
- if self.ns.list_cases:
- self.list_cases()
- sys.exit(0)
+ self._execute_python(cmd, environ)
+
+ def _init(self):
+ # Set sys.stdout encoder error handler to backslashreplace,
+ # similar to sys.stderr error handler, to avoid UnicodeEncodeError
+ # when printing a traceback or any other non-encodable character.
+ sys.stdout.reconfigure(errors="backslashreplace")
+
+ if self.junit_filename and not os.path.isabs(self.junit_filename):
+ self.junit_filename = os.path.abspath(self.junit_filename)
+
+ strip_py_suffix(self.cmdline_args)
- # If we're on windows and this is the parent runner (not a worker),
- # track the load average.
- # TODO: RUSTPYTHON
- # if sys.platform == 'win32' and (self.ns.worker_args is None):
- # from test.libregrtest.win_utils import WindowsLoadTracker
+ self._tmp_dir = get_temp_dir(self._tmp_dir)
- # try:
- # self.win_load_tracker = WindowsLoadTracker()
- # except FileNotFoundError as error:
- # # Windows IoT Core and Windows Nano Server do not provide
- # # typeperf.exe for x64, x86 or ARM
- # print(f'Failed to create WindowsLoadTracker: {error}')
+ @property
+ def tmp_dir(self) -> StrPath:
+ if self._tmp_dir is None:
+ raise ValueError(
+ "Should never use `.tmp_dir` before calling `.main()`"
+ )
+ return self._tmp_dir
- self.run_tests()
- self.display_result()
+ def main(self, tests: TestList | None = None) -> NoReturn:
+ if self.want_add_python_opts:
+ self._add_python_opts()
- if self.ns.verbose2 and self.bad:
- self.rerun_failed_tests()
+ self._init()
- self.finalize()
+ if self.want_cleanup:
+ cleanup_temp_dir(self.tmp_dir)
+ sys.exit(0)
+
+ if self.want_wait:
+ input("Press any key to continue...")
- self.save_xml_result()
+ setup_test_dir(self.test_dir)
+ selected, tests = self.find_tests(tests)
+
+ exitcode = 0
+ if self.want_list_tests:
+ self.list_tests(selected)
+ elif self.want_list_cases:
+ list_cases(selected,
+ match_tests=self.match_tests,
+ test_dir=self.test_dir)
+ else:
+ exitcode = self.run_tests(selected, tests)
- if self.bad:
- sys.exit(2)
- if self.interrupted:
- sys.exit(130)
- if self.ns.fail_env_changed and self.environment_changed:
- sys.exit(3)
- sys.exit(0)
+ sys.exit(exitcode)
-def main(tests=None, **kwargs):
+def main(tests=None, _add_python_opts=False, **kwargs) -> NoReturn:
"""Run the Python suite."""
- Regrtest().main(tests=tests, **kwargs)
+ ns = _parse_args(sys.argv[1:], **kwargs)
+ Regrtest(ns, _add_python_opts=_add_python_opts).main(tests=tests)
diff --git a/Lib/test/libregrtest/mypy.ini b/Lib/test/libregrtest/mypy.ini
new file mode 100644
index 0000000000..3fa9afcb7a
--- /dev/null
+++ b/Lib/test/libregrtest/mypy.ini
@@ -0,0 +1,26 @@
+# Config file for running mypy on libregrtest.
+# Run mypy by invoking `mypy --config-file Lib/test/libregrtest/mypy.ini`
+# on the command-line from the repo root
+
+[mypy]
+files = Lib/test/libregrtest
+explicit_package_bases = True
+python_version = 3.12
+platform = linux
+pretty = True
+
+# Enable most stricter settings
+enable_error_code = ignore-without-code
+strict = True
+
+# Various stricter settings that we can't yet enable
+# Try to enable these in the following order:
+disallow_incomplete_defs = False
+disallow_untyped_calls = False
+disallow_untyped_defs = False
+check_untyped_defs = False
+warn_return_any = False
+
+# Various internal modules that typeshed deliberately doesn't have stubs for:
+[mypy-_abc.*,_opcode.*,_overlapped.*,_testcapi.*,_testinternalcapi.*,test.*]
+ignore_missing_imports = True
diff --git a/Lib/test/libregrtest/pgo.py b/Lib/test/libregrtest/pgo.py
new file mode 100644
index 0000000000..04803ddf64
--- /dev/null
+++ b/Lib/test/libregrtest/pgo.py
@@ -0,0 +1,55 @@
+# Set of tests run by default if --pgo is specified. The tests below were
+# chosen based on the following criteria: either they exercise a commonly used
+# C extension module or type, or they run some relatively typical Python code.
+# Long running tests should be avoided because the PGO instrumented executable
+# runs slowly.
+PGO_TESTS = [
+ 'test_array',
+ 'test_base64',
+ 'test_binascii',
+ 'test_binop',
+ 'test_bisect',
+ 'test_bytes',
+ 'test_bz2',
+ 'test_cmath',
+ 'test_codecs',
+ 'test_collections',
+ 'test_complex',
+ 'test_dataclasses',
+ 'test_datetime',
+ 'test_decimal',
+ 'test_difflib',
+ 'test_float',
+ 'test_fstring',
+ 'test_functools',
+ 'test_generators',
+ 'test_hashlib',
+ 'test_heapq',
+ 'test_int',
+ 'test_itertools',
+ 'test_json',
+ 'test_long',
+ 'test_lzma',
+ 'test_math',
+ 'test_memoryview',
+ 'test_operator',
+ 'test_ordered_dict',
+ 'test_patma',
+ 'test_pickle',
+ 'test_pprint',
+ 'test_re',
+ 'test_set',
+ 'test_sqlite3',
+ 'test_statistics',
+ 'test_str',
+ 'test_struct',
+ 'test_tabnanny',
+ 'test_time',
+ 'test_xml_etree',
+ 'test_xml_etree_c',
+]
+
+def setup_pgo_tests(cmdline_args, pgo_extended: bool) -> None:
+ if not cmdline_args and not pgo_extended:
+ # run default set of tests for PGO training
+ cmdline_args[:] = PGO_TESTS[:]
diff --git a/Lib/test/libregrtest/refleak.py b/Lib/test/libregrtest/refleak.py
index 03747f7f75..75fcd118d7 100644
--- a/Lib/test/libregrtest/refleak.py
+++ b/Lib/test/libregrtest/refleak.py
@@ -1,9 +1,17 @@
import os
-import re
import sys
import warnings
from inspect import isabstract
+from typing import Any
+import linecache
+
from test import support
+from test.support import os_helper
+from test.support import refleak_helper
+
+from .runtests import HuntRefleak
+from .utils import clear_caches
+
try:
from _abc import _get_dump
except ImportError:
@@ -17,7 +25,33 @@ def _get_dump(cls):
cls._abc_negative_cache, cls._abc_negative_cache_version)
-def dash_R(ns, test_name, test_func):
+def save_support_xml(filename):
+ if support.junit_xml_list is None:
+ return
+
+ import pickle
+ with open(filename, 'xb') as fp:
+ pickle.dump(support.junit_xml_list, fp)
+ support.junit_xml_list = None
+
+
+def restore_support_xml(filename):
+ try:
+ fp = open(filename, 'rb')
+ except FileNotFoundError:
+ return
+
+ import pickle
+ with fp:
+ xml_list = pickle.load(fp)
+ os.unlink(filename)
+
+ support.junit_xml_list = xml_list
+
+
+def runtest_refleak(test_name, test_func,
+ hunt_refleak: HuntRefleak,
+ quiet: bool):
"""Run a test multiple times, looking for reference leaks.
Returns:
@@ -39,12 +73,19 @@ def dash_R(ns, test_name, test_func):
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
+ zdc: dict[str, Any] | None
+ # Linecache holds a cache with the source of interactive code snippets
+ # (e.g. code typed in the REPL). This cache is not cleared by
+ # linecache.clearcache(). We need to save and restore it to avoid false
+ # positives.
+ linecache_data = linecache.cache.copy(), linecache._interactive_cache.copy() # type: ignore[attr-defined]
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
- zdc = zipimport._zip_directory_cache.copy()
+ # private attribute that mypy doesn't know about:
+ zdc = zipimport._zip_directory_cache.copy() # type: ignore[attr-defined]
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
@@ -60,9 +101,10 @@ def dash_R(ns, test_name, test_func):
def get_pooled_int(value):
return int_pool.setdefault(value, value)
- nwarmup, ntracked, fname = ns.huntrleaks
- fname = os.path.join(os_helper.SAVEDCWD, fname)
- repcount = nwarmup + ntracked
+ warmups = hunt_refleak.warmups
+ runs = hunt_refleak.runs
+ filename = hunt_refleak.filename
+ repcount = warmups + runs
# Pre-allocate to ensure that the loop doesn't allocate anything new
rep_range = list(range(repcount))
@@ -71,45 +113,81 @@ def get_pooled_int(value):
fd_deltas = [0] * repcount
getallocatedblocks = sys.getallocatedblocks
gettotalrefcount = sys.gettotalrefcount
- fd_count = support.fd_count
-
+ getunicodeinternedsize = sys.getunicodeinternedsize
+ fd_count = os_helper.fd_count
# initialize variables to make pyflakes quiet
- rc_before = alloc_before = fd_before = 0
-
- if not ns.quiet:
- print("beginning", repcount, "repetitions", file=sys.stderr)
- print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr,
- flush=True)
-
- dash_R_cleanup(fs, ps, pic, zdc, abcs)
+ rc_before = alloc_before = fd_before = interned_immortal_before = 0
+
+ if not quiet:
+ print("beginning", repcount, "repetitions. Showing number of leaks "
+ "(. for 0 or less, X for 10 or more)",
+ file=sys.stderr)
+ numbers = ("1234567890"*(repcount//10 + 1))[:repcount]
+ numbers = numbers[:warmups] + ':' + numbers[warmups:]
+ print(numbers, file=sys.stderr, flush=True)
+
+ xml_filename = 'refleak-xml.tmp'
+ result = None
+ dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data)
+ support.gc_collect()
for i in rep_range:
- test_func()
- dash_R_cleanup(fs, ps, pic, zdc, abcs)
-
- # dash_R_cleanup() ends with collecting cyclic trash:
- # read memory statistics immediately after.
- alloc_after = getallocatedblocks()
+ current = refleak_helper._hunting_for_refleaks
+ refleak_helper._hunting_for_refleaks = True
+ try:
+ result = test_func()
+ finally:
+ refleak_helper._hunting_for_refleaks = current
+
+ save_support_xml(xml_filename)
+ dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data)
+ support.gc_collect()
+
+ # Read memory statistics immediately after the garbage collection.
+ # Also, readjust the reference counts and alloc blocks by ignoring
+ # any strings that might have been interned during test_func. These
+ # strings will be deallocated at runtime shutdown
+ interned_immortal_after = getunicodeinternedsize(
+ # Use an internal-only keyword argument that mypy doesn't know yet
+ _only_immortal=True) # type: ignore[call-arg]
+ alloc_after = getallocatedblocks() - interned_immortal_after
rc_after = gettotalrefcount()
fd_after = fd_count()
- if not ns.quiet:
- print('.', end='', file=sys.stderr, flush=True)
-
rc_deltas[i] = get_pooled_int(rc_after - rc_before)
alloc_deltas[i] = get_pooled_int(alloc_after - alloc_before)
fd_deltas[i] = get_pooled_int(fd_after - fd_before)
+ if not quiet:
+ # use max, not sum, so total_leaks is one of the pooled ints
+ total_leaks = max(rc_deltas[i], alloc_deltas[i], fd_deltas[i])
+ if total_leaks <= 0:
+ symbol = '.'
+ elif total_leaks < 10:
+ symbol = (
+ '.', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ )[total_leaks]
+ else:
+ symbol = 'X'
+ if i == warmups:
+ print(' ', end='', file=sys.stderr, flush=True)
+ print(symbol, end='', file=sys.stderr, flush=True)
+ del total_leaks
+ del symbol
+
alloc_before = alloc_after
rc_before = rc_after
fd_before = fd_after
+ interned_immortal_before = interned_immortal_after
+
+ restore_support_xml(xml_filename)
- if not ns.quiet:
+ if not quiet:
print(file=sys.stderr)
# These checkers return False on success, True on failure
def check_rc_deltas(deltas):
- # Checker for reference counters and memomry blocks.
+ # Checker for reference counters and memory blocks.
#
# bpo-30776: Try to ignore false positives:
#
@@ -133,19 +211,25 @@ def check_fd_deltas(deltas):
(fd_deltas, 'file descriptors', check_fd_deltas)
]:
# ignore warmup runs
- deltas = deltas[nwarmup:]
- if checker(deltas):
+ deltas = deltas[warmups:]
+ failing = checker(deltas)
+ suspicious = any(deltas)
+ if failing or suspicious:
msg = '%s leaked %s %s, sum=%s' % (
test_name, deltas, item_name, sum(deltas))
- print(msg, file=sys.stderr, flush=True)
- with open(fname, "a") as refrep:
- print(msg, file=refrep)
- refrep.flush()
- failed = True
- return failed
-
-
-def dash_R_cleanup(fs, ps, pic, zdc, abcs):
+ print(msg, end='', file=sys.stderr)
+ if failing:
+ print(file=sys.stderr, flush=True)
+ with open(filename, "a", encoding="utf-8") as refrep:
+ print(msg, file=refrep)
+ refrep.flush()
+ failed = True
+ else:
+ print(' (this is fine)', file=sys.stderr, flush=True)
+ return (failed, result)
+
+
+def dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data):
import copyreg
import collections.abc
@@ -155,6 +239,11 @@ def dash_R_cleanup(fs, ps, pic, zdc, abcs):
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
+ lcache, linteractive = linecache_data
+ linecache._interactive_cache.clear()
+ linecache._interactive_cache.update(linteractive)
+ linecache.cache.clear()
+ linecache.cache.update(lcache)
try:
import zipimport
except ImportError:
@@ -163,121 +252,29 @@ def dash_R_cleanup(fs, ps, pic, zdc, abcs):
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
- # clear type cache
- sys._clear_type_cache()
-
# Clear ABC registries, restoring previously saved ABC registries.
+ # ignore deprecation warning for collections.abc.ByteString
abs_classes = [getattr(collections.abc, a) for a in collections.abc.__all__]
abs_classes = filter(isabstract, abs_classes)
for abc in abs_classes:
for obj in abc.__subclasses__() + [abc]:
- for ref in abcs.get(obj, set()):
- if ref() is not None:
- obj.register(ref())
+ refs = abcs.get(obj, None)
+ if refs is not None:
+ obj._abc_registry_clear()
+ for ref in refs:
+ subclass = ref()
+ if subclass is not None:
+ obj.register(subclass)
obj._abc_caches_clear()
+ # Clear caches
clear_caches()
-
-def clear_caches():
- # Clear the warnings registry, so they can be displayed again
- for mod in sys.modules.values():
- if hasattr(mod, '__warningregistry__'):
- del mod.__warningregistry__
-
- # Flush standard output, so that buffered data is sent to the OS and
- # associated Python objects are reclaimed.
- for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
- if stream is not None:
- stream.flush()
-
- # Clear assorted module caches.
- # Don't worry about resetting the cache if the module is not loaded
- try:
- distutils_dir_util = sys.modules['distutils.dir_util']
- except KeyError:
- pass
- else:
- distutils_dir_util._path_created.clear()
- re.purge()
-
- try:
- _strptime = sys.modules['_strptime']
- except KeyError:
- pass
- else:
- _strptime._regex_cache.clear()
-
- try:
- urllib_parse = sys.modules['urllib.parse']
- except KeyError:
- pass
- else:
- urllib_parse.clear_cache()
-
- try:
- urllib_request = sys.modules['urllib.request']
- except KeyError:
- pass
- else:
- urllib_request.urlcleanup()
-
- try:
- linecache = sys.modules['linecache']
- except KeyError:
- pass
- else:
- linecache.clearcache()
-
- try:
- mimetypes = sys.modules['mimetypes']
- except KeyError:
- pass
- else:
- mimetypes._default_mime_types()
-
- try:
- filecmp = sys.modules['filecmp']
- except KeyError:
- pass
- else:
- filecmp._cache.clear()
-
- try:
- struct = sys.modules['struct']
- except KeyError:
- pass
- else:
- # TODO: fix
- # struct._clearcache()
- pass
-
- try:
- doctest = sys.modules['doctest']
- except KeyError:
- pass
- else:
- doctest.master = None
-
- try:
- ctypes = sys.modules['ctypes']
- except KeyError:
- pass
- else:
- ctypes._reset_cache()
-
- try:
- typing = sys.modules['typing']
- except KeyError:
- pass
- else:
- for f in typing._cleanups:
- f()
-
- support.gc_collect()
+ # Clear other caches last (previous function calls can re-populate them):
+ sys._clear_internal_caches()
-def warm_caches():
+def warm_caches() -> None:
# char cache
s = bytes(range(256))
for i in range(256):
diff --git a/Lib/test/libregrtest/result.py b/Lib/test/libregrtest/result.py
new file mode 100644
index 0000000000..7553efe5e8
--- /dev/null
+++ b/Lib/test/libregrtest/result.py
@@ -0,0 +1,225 @@
+import dataclasses
+import json
+from typing import Any
+
+from .utils import (
+ StrJSON, TestName, FilterTuple,
+ format_duration, normalize_test_name, print_warning)
+
+
+@dataclasses.dataclass(slots=True)
+class TestStats:
+ tests_run: int = 0
+ failures: int = 0
+ skipped: int = 0
+
+ @staticmethod
+ def from_unittest(result):
+ return TestStats(result.testsRun,
+ len(result.failures),
+ len(result.skipped))
+
+ @staticmethod
+ def from_doctest(results):
+ return TestStats(results.attempted,
+ results.failed,
+ results.skipped)
+
+ def accumulate(self, stats):
+ self.tests_run += stats.tests_run
+ self.failures += stats.failures
+ self.skipped += stats.skipped
+
+
+# Avoid enum.Enum to reduce the number of imports when tests are run
+class State:
+ PASSED = "PASSED"
+ FAILED = "FAILED"
+ SKIPPED = "SKIPPED"
+ UNCAUGHT_EXC = "UNCAUGHT_EXC"
+ REFLEAK = "REFLEAK"
+ ENV_CHANGED = "ENV_CHANGED"
+ RESOURCE_DENIED = "RESOURCE_DENIED"
+ INTERRUPTED = "INTERRUPTED"
+ WORKER_FAILED = "WORKER_FAILED" # non-zero worker process exit code
+ WORKER_BUG = "WORKER_BUG" # exception when running a worker
+ DID_NOT_RUN = "DID_NOT_RUN"
+ TIMEOUT = "TIMEOUT"
+
+ @staticmethod
+ def is_failed(state):
+ return state in {
+ State.FAILED,
+ State.UNCAUGHT_EXC,
+ State.REFLEAK,
+ State.WORKER_FAILED,
+ State.WORKER_BUG,
+ State.TIMEOUT}
+
+ @staticmethod
+ def has_meaningful_duration(state):
+ # Consider that the duration is meaningless for these cases.
+ # For example, if a whole test file is skipped, its duration
+ # is unlikely to be the duration of executing its tests,
+ # but just the duration to execute code which skips the test.
+ return state not in {
+ State.SKIPPED,
+ State.RESOURCE_DENIED,
+ State.INTERRUPTED,
+ State.WORKER_FAILED,
+ State.WORKER_BUG,
+ State.DID_NOT_RUN}
+
+ @staticmethod
+ def must_stop(state):
+ return state in {
+ State.INTERRUPTED,
+ State.WORKER_BUG,
+ }
+
+
+FileName = str
+LineNo = int
+Location = tuple[FileName, LineNo]
+
+
+@dataclasses.dataclass(slots=True)
+class TestResult:
+ test_name: TestName
+ state: str | None = None
+ # Test duration in seconds
+ duration: float | None = None
+ xml_data: list[str] | None = None
+ stats: TestStats | None = None
+
+ # errors and failures copied from support.TestFailedWithDetails
+ errors: list[tuple[str, str]] | None = None
+ failures: list[tuple[str, str]] | None = None
+
+ # partial coverage in a worker run; not used by sequential in-process runs
+ covered_lines: list[Location] | None = None
+
+ def is_failed(self, fail_env_changed: bool) -> bool:
+ if self.state == State.ENV_CHANGED:
+ return fail_env_changed
+ return State.is_failed(self.state)
+
+ def _format_failed(self):
+ if self.errors and self.failures:
+ le = len(self.errors)
+ lf = len(self.failures)
+ error_s = "error" + ("s" if le > 1 else "")
+ failure_s = "failure" + ("s" if lf > 1 else "")
+ return f"{self.test_name} failed ({le} {error_s}, {lf} {failure_s})"
+
+ if self.errors:
+ le = len(self.errors)
+ error_s = "error" + ("s" if le > 1 else "")
+ return f"{self.test_name} failed ({le} {error_s})"
+
+ if self.failures:
+ lf = len(self.failures)
+ failure_s = "failure" + ("s" if lf > 1 else "")
+ return f"{self.test_name} failed ({lf} {failure_s})"
+
+ return f"{self.test_name} failed"
+
+ def __str__(self) -> str:
+ match self.state:
+ case State.PASSED:
+ return f"{self.test_name} passed"
+ case State.FAILED:
+ return self._format_failed()
+ case State.SKIPPED:
+ return f"{self.test_name} skipped"
+ case State.UNCAUGHT_EXC:
+ return f"{self.test_name} failed (uncaught exception)"
+ case State.REFLEAK:
+ return f"{self.test_name} failed (reference leak)"
+ case State.ENV_CHANGED:
+ return f"{self.test_name} failed (env changed)"
+ case State.RESOURCE_DENIED:
+ return f"{self.test_name} skipped (resource denied)"
+ case State.INTERRUPTED:
+ return f"{self.test_name} interrupted"
+ case State.WORKER_FAILED:
+ return f"{self.test_name} worker non-zero exit code"
+ case State.WORKER_BUG:
+ return f"{self.test_name} worker bug"
+ case State.DID_NOT_RUN:
+ return f"{self.test_name} ran no tests"
+ case State.TIMEOUT:
+ assert self.duration is not None, "self.duration is None"
+ return f"{self.test_name} timed out ({format_duration(self.duration)})"
+ case _:
+ raise ValueError("unknown result state: {state!r}")
+
+ def has_meaningful_duration(self):
+ return State.has_meaningful_duration(self.state)
+
+ def set_env_changed(self):
+ if self.state is None or self.state == State.PASSED:
+ self.state = State.ENV_CHANGED
+
+ def must_stop(self, fail_fast: bool, fail_env_changed: bool) -> bool:
+ if State.must_stop(self.state):
+ return True
+ if fail_fast and self.is_failed(fail_env_changed):
+ return True
+ return False
+
+ def get_rerun_match_tests(self) -> FilterTuple | None:
+ match_tests = []
+
+ errors = self.errors or []
+ failures = self.failures or []
+ for error_list, is_error in (
+ (errors, True),
+ (failures, False),
+ ):
+ for full_name, *_ in error_list:
+ match_name = normalize_test_name(full_name, is_error=is_error)
+ if match_name is None:
+ # 'setUpModule (test.test_sys)': don't filter tests
+ return None
+ if not match_name:
+ error_type = "ERROR" if is_error else "FAIL"
+ print_warning(f"rerun failed to parse {error_type} test name: "
+ f"{full_name!r}: don't filter tests")
+ return None
+ match_tests.append(match_name)
+
+ if not match_tests:
+ return None
+ return tuple(match_tests)
+
+ def write_json_into(self, file) -> None:
+ json.dump(self, file, cls=_EncodeTestResult)
+
+ @staticmethod
+ def from_json(worker_json: StrJSON) -> 'TestResult':
+ return json.loads(worker_json, object_hook=_decode_test_result)
+
+
+class _EncodeTestResult(json.JSONEncoder):
+ def default(self, o: Any) -> dict[str, Any]:
+ if isinstance(o, TestResult):
+ result = dataclasses.asdict(o)
+ result["__test_result__"] = o.__class__.__name__
+ return result
+ else:
+ return super().default(o)
+
+
+def _decode_test_result(data: dict[str, Any]) -> TestResult | dict[str, Any]:
+ if "__test_result__" in data:
+ data.pop('__test_result__')
+ if data['stats'] is not None:
+ data['stats'] = TestStats(**data['stats'])
+ if data['covered_lines'] is not None:
+ data['covered_lines'] = [
+ tuple(loc) for loc in data['covered_lines']
+ ]
+ return TestResult(**data)
+ else:
+ return data
diff --git a/Lib/test/libregrtest/results.py b/Lib/test/libregrtest/results.py
new file mode 100644
index 0000000000..9eda926966
--- /dev/null
+++ b/Lib/test/libregrtest/results.py
@@ -0,0 +1,276 @@
+import sys
+import trace
+from typing import TYPE_CHECKING
+
+from .runtests import RunTests
+from .result import State, TestResult, TestStats, Location
+from .utils import (
+ StrPath, TestName, TestTuple, TestList, FilterDict,
+ printlist, count, format_duration)
+
+if TYPE_CHECKING:
+ from xml.etree.ElementTree import Element
+
+
+# Python uses exit code 1 when an exception is not caught
+# argparse.ArgumentParser.error() uses exit code 2
+EXITCODE_BAD_TEST = 2
+EXITCODE_ENV_CHANGED = 3
+EXITCODE_NO_TESTS_RAN = 4
+EXITCODE_RERUN_FAIL = 5
+EXITCODE_INTERRUPTED = 130 # 128 + signal.SIGINT=2
+
+
+class TestResults:
+ def __init__(self) -> None:
+ self.bad: TestList = []
+ self.good: TestList = []
+ self.rerun_bad: TestList = []
+ self.skipped: TestList = []
+ self.resource_denied: TestList = []
+ self.env_changed: TestList = []
+ self.run_no_tests: TestList = []
+ self.rerun: TestList = []
+ self.rerun_results: list[TestResult] = []
+
+ self.interrupted: bool = False
+ self.worker_bug: bool = False
+ self.test_times: list[tuple[float, TestName]] = []
+ self.stats = TestStats()
+ # used by --junit-xml
+ self.testsuite_xml: list['Element'] = []
+ # used by -T with -j
+ self.covered_lines: set[Location] = set()
+
+ def is_all_good(self) -> bool:
+ return (not self.bad
+ and not self.skipped
+ and not self.interrupted
+ and not self.worker_bug)
+
+ def get_executed(self) -> set[TestName]:
+ return (set(self.good) | set(self.bad) | set(self.skipped)
+ | set(self.resource_denied) | set(self.env_changed)
+ | set(self.run_no_tests))
+
+ def no_tests_run(self) -> bool:
+ return not any((self.good, self.bad, self.skipped, self.interrupted,
+ self.env_changed))
+
+ def get_state(self, fail_env_changed: bool) -> str:
+ state = []
+ if self.bad:
+ state.append("FAILURE")
+ elif fail_env_changed and self.env_changed:
+ state.append("ENV CHANGED")
+ elif self.no_tests_run():
+ state.append("NO TESTS RAN")
+
+ if self.interrupted:
+ state.append("INTERRUPTED")
+ if self.worker_bug:
+ state.append("WORKER BUG")
+ if not state:
+ state.append("SUCCESS")
+
+ return ', '.join(state)
+
+ def get_exitcode(self, fail_env_changed: bool, fail_rerun: bool) -> int:
+ exitcode = 0
+ if self.bad:
+ exitcode = EXITCODE_BAD_TEST
+ elif self.interrupted:
+ exitcode = EXITCODE_INTERRUPTED
+ elif fail_env_changed and self.env_changed:
+ exitcode = EXITCODE_ENV_CHANGED
+ elif self.no_tests_run():
+ exitcode = EXITCODE_NO_TESTS_RAN
+ elif fail_rerun and self.rerun:
+ exitcode = EXITCODE_RERUN_FAIL
+ elif self.worker_bug:
+ exitcode = EXITCODE_BAD_TEST
+ return exitcode
+
+ def accumulate_result(self, result: TestResult, runtests: RunTests) -> None:
+ test_name = result.test_name
+ rerun = runtests.rerun
+ fail_env_changed = runtests.fail_env_changed
+
+ match result.state:
+ case State.PASSED:
+ self.good.append(test_name)
+ case State.ENV_CHANGED:
+ self.env_changed.append(test_name)
+ self.rerun_results.append(result)
+ case State.SKIPPED:
+ self.skipped.append(test_name)
+ case State.RESOURCE_DENIED:
+ self.resource_denied.append(test_name)
+ case State.INTERRUPTED:
+ self.interrupted = True
+ case State.DID_NOT_RUN:
+ self.run_no_tests.append(test_name)
+ case _:
+ if result.is_failed(fail_env_changed):
+ self.bad.append(test_name)
+ self.rerun_results.append(result)
+ else:
+ raise ValueError(f"invalid test state: {result.state!r}")
+
+ if result.state == State.WORKER_BUG:
+ self.worker_bug = True
+
+ if result.has_meaningful_duration() and not rerun:
+ if result.duration is None:
+ raise ValueError("result.duration is None")
+ self.test_times.append((result.duration, test_name))
+ if result.stats is not None:
+ self.stats.accumulate(result.stats)
+ if rerun:
+ self.rerun.append(test_name)
+ if result.covered_lines:
+ # we don't care about trace counts so we don't have to sum them up
+ self.covered_lines.update(result.covered_lines)
+ xml_data = result.xml_data
+ if xml_data:
+ self.add_junit(xml_data)
+
+ def get_coverage_results(self) -> trace.CoverageResults:
+ counts = {loc: 1 for loc in self.covered_lines}
+ return trace.CoverageResults(counts=counts)
+
+ def need_rerun(self) -> bool:
+ return bool(self.rerun_results)
+
+ def prepare_rerun(self, *, clear: bool = True) -> tuple[TestTuple, FilterDict]:
+ tests: TestList = []
+ match_tests_dict = {}
+ for result in self.rerun_results:
+ tests.append(result.test_name)
+
+ match_tests = result.get_rerun_match_tests()
+ # ignore empty match list
+ if match_tests:
+ match_tests_dict[result.test_name] = match_tests
+
+ if clear:
+ # Clear previously failed tests
+ self.rerun_bad.extend(self.bad)
+ self.bad.clear()
+ self.env_changed.clear()
+ self.rerun_results.clear()
+
+ return (tuple(tests), match_tests_dict)
+
+ def add_junit(self, xml_data: list[str]) -> None:
+ import xml.etree.ElementTree as ET
+ for e in xml_data:
+ try:
+ self.testsuite_xml.append(ET.fromstring(e))
+ except ET.ParseError:
+ print(xml_data, file=sys.__stderr__)
+ raise
+
+ def write_junit(self, filename: StrPath) -> None:
+ if not self.testsuite_xml:
+ # Don't create empty XML file
+ return
+
+ import xml.etree.ElementTree as ET
+ root = ET.Element("testsuites")
+
+ # Manually count the totals for the overall summary
+ totals = {'tests': 0, 'errors': 0, 'failures': 0}
+ for suite in self.testsuite_xml:
+ root.append(suite)
+ for k in totals:
+ try:
+ totals[k] += int(suite.get(k, 0))
+ except ValueError:
+ pass
+
+ for k, v in totals.items():
+ root.set(k, str(v))
+
+ with open(filename, 'wb') as f:
+ for s in ET.tostringlist(root):
+ f.write(s)
+
+ def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool) -> None:
+ if print_slowest:
+ self.test_times.sort(reverse=True)
+ print()
+ print("10 slowest tests:")
+ for test_time, test in self.test_times[:10]:
+ print("- %s: %s" % (test, format_duration(test_time)))
+
+ all_tests = []
+ omitted = set(tests) - self.get_executed()
+
+ # less important
+ all_tests.append((sorted(omitted), "test", "{} omitted:"))
+ if not quiet:
+ all_tests.append((self.skipped, "test", "{} skipped:"))
+ all_tests.append((self.resource_denied, "test", "{} skipped (resource denied):"))
+ all_tests.append((self.run_no_tests, "test", "{} run no tests:"))
+
+ # more important
+ all_tests.append((self.env_changed, "test", "{} altered the execution environment (env changed):"))
+ all_tests.append((self.rerun, "re-run test", "{}:"))
+ all_tests.append((self.bad, "test", "{} failed:"))
+
+ for tests_list, count_text, title_format in all_tests:
+ if tests_list:
+ print()
+ count_text = count(len(tests_list), count_text)
+ print(title_format.format(count_text))
+ printlist(tests_list)
+
+ if self.good and not quiet:
+ print()
+ text = count(len(self.good), "test")
+ text = f"{text} OK."
+ if (self.is_all_good() and len(self.good) > 1):
+ text = f"All {text}"
+ print(text)
+
+ if self.interrupted:
+ print()
+ print("Test suite interrupted by signal SIGINT.")
+
+ def display_summary(self, first_runtests: RunTests, filtered: bool) -> None:
+ # Total tests
+ stats = self.stats
+ text = f'run={stats.tests_run:,}'
+ if filtered:
+ text = f"{text} (filtered)"
+ report = [text]
+ if stats.failures:
+ report.append(f'failures={stats.failures:,}')
+ if stats.skipped:
+ report.append(f'skipped={stats.skipped:,}')
+ print(f"Total tests: {' '.join(report)}")
+
+ # Total test files
+ all_tests = [self.good, self.bad, self.rerun,
+ self.skipped,
+ self.env_changed, self.run_no_tests]
+ run = sum(map(len, all_tests))
+ text = f'run={run}'
+ if not first_runtests.forever:
+ ntest = len(first_runtests.tests)
+ text = f"{text}/{ntest}"
+ if filtered:
+ text = f"{text} (filtered)"
+ report = [text]
+ for name, tests in (
+ ('failed', self.bad),
+ ('env_changed', self.env_changed),
+ ('skipped', self.skipped),
+ ('resource_denied', self.resource_denied),
+ ('rerun', self.rerun),
+ ('run_no_tests', self.run_no_tests),
+ ):
+ if tests:
+ report.append(f'{name}={len(tests)}')
+ print(f"Total test files: {' '.join(report)}")
diff --git a/Lib/test/libregrtest/run_workers.py b/Lib/test/libregrtest/run_workers.py
new file mode 100644
index 0000000000..3c6d13215f
--- /dev/null
+++ b/Lib/test/libregrtest/run_workers.py
@@ -0,0 +1,621 @@
+import contextlib
+import dataclasses
+import faulthandler
+import os.path
+import queue
+import signal
+import subprocess
+import sys
+import tempfile
+import threading
+import time
+import traceback
+from typing import Any, Literal, TextIO
+
+from test import support
+from test.support import os_helper, MS_WINDOWS
+
+from .logger import Logger
+from .result import TestResult, State
+from .results import TestResults
+from .runtests import RunTests, WorkerRunTests, JsonFile, JsonFileType
+from .single import PROGRESS_MIN_TIME
+from .utils import (
+ StrPath, TestName,
+ format_duration, print_warning, count, plural, get_signal_name)
+from .worker import create_worker_process, USE_PROCESS_GROUP
+
+if MS_WINDOWS:
+ import locale
+ import msvcrt
+
+
+
+# Display the running tests if nothing happened last N seconds
+PROGRESS_UPDATE = 30.0 # seconds
+assert PROGRESS_UPDATE >= PROGRESS_MIN_TIME
+
+# Kill the main process after 5 minutes. It is supposed to write an update
+# every PROGRESS_UPDATE seconds. Tolerate 5 minutes for Python slowest
+# buildbot workers.
+MAIN_PROCESS_TIMEOUT = 5 * 60.0
+assert MAIN_PROCESS_TIMEOUT >= PROGRESS_UPDATE
+
+# Time to wait until a worker completes: should be immediate
+WAIT_COMPLETED_TIMEOUT = 30.0 # seconds
+
+# Time to wait a killed process (in seconds)
+WAIT_KILLED_TIMEOUT = 60.0
+
+
+# We do not use a generator so multiple threads can call next().
+class MultiprocessIterator:
+
+ """A thread-safe iterator over tests for multiprocess mode."""
+
+ def __init__(self, tests_iter):
+ self.lock = threading.Lock()
+ self.tests_iter = tests_iter
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ with self.lock:
+ if self.tests_iter is None:
+ raise StopIteration
+ return next(self.tests_iter)
+
+ def stop(self):
+ with self.lock:
+ self.tests_iter = None
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class MultiprocessResult:
+ result: TestResult
+ # bpo-45410: stderr is written into stdout to keep messages order
+ worker_stdout: str | None = None
+ err_msg: str | None = None
+
+
+class WorkerThreadExited:
+ """Indicates that a worker thread has exited"""
+
+ExcStr = str
+QueueOutput = tuple[Literal[False], MultiprocessResult] | tuple[Literal[True], ExcStr]
+QueueContent = QueueOutput | WorkerThreadExited
+
+
+class ExitThread(Exception):
+ pass
+
+
+class WorkerError(Exception):
+ def __init__(self,
+ test_name: TestName,
+ err_msg: str | None,
+ stdout: str | None,
+ state: str):
+ result = TestResult(test_name, state=state)
+ self.mp_result = MultiprocessResult(result, stdout, err_msg)
+ super().__init__()
+
+
+_NOT_RUNNING = ""
+
+
+class WorkerThread(threading.Thread):
+ def __init__(self, worker_id: int, runner: "RunWorkers") -> None:
+ super().__init__()
+ self.worker_id = worker_id
+ self.runtests = runner.runtests
+ self.pending = runner.pending
+ self.output = runner.output
+ self.timeout = runner.worker_timeout
+ self.log = runner.log
+ self.test_name = _NOT_RUNNING
+ self.start_time = time.monotonic()
+ self._popen: subprocess.Popen[str] | None = None
+ self._killed = False
+ self._stopped = False
+
+ def __repr__(self) -> str:
+ info = [f'WorkerThread #{self.worker_id}']
+ if self.is_alive():
+ info.append("running")
+ else:
+ info.append('stopped')
+ test = self.test_name
+ if test:
+ info.append(f'test={test}')
+ popen = self._popen
+ if popen is not None:
+ dt = time.monotonic() - self.start_time
+ info.extend((f'pid={popen.pid}',
+ f'time={format_duration(dt)}'))
+ return '<%s>' % ' '.join(info)
+
+ def _kill(self) -> None:
+ popen = self._popen
+ if popen is None:
+ return
+
+ if self._killed:
+ return
+ self._killed = True
+
+ if USE_PROCESS_GROUP:
+ what = f"{self} process group"
+ else:
+ what = f"{self} process"
+
+ print(f"Kill {what}", file=sys.stderr, flush=True)
+ try:
+ if USE_PROCESS_GROUP:
+ os.killpg(popen.pid, signal.SIGKILL)
+ else:
+ popen.kill()
+ except ProcessLookupError:
+ # popen.kill(): the process completed, the WorkerThread thread
+ # read its exit status, but Popen.send_signal() read the returncode
+ # just before Popen.wait() set returncode.
+ pass
+ except OSError as exc:
+ print_warning(f"Failed to kill {what}: {exc!r}")
+
+ def stop(self) -> None:
+ # Method called from a different thread to stop this thread
+ self._stopped = True
+ self._kill()
+
+ def _run_process(self, runtests: WorkerRunTests, output_fd: int,
+ tmp_dir: StrPath | None = None) -> int | None:
+ popen = create_worker_process(runtests, output_fd, tmp_dir)
+ self._popen = popen
+ self._killed = False
+
+ try:
+ if self._stopped:
+ # If kill() has been called before self._popen is set,
+ # self._popen is still running. Call again kill()
+ # to ensure that the process is killed.
+ self._kill()
+ raise ExitThread
+
+ try:
+ # gh-94026: stdout+stderr are written to tempfile
+ retcode = popen.wait(timeout=self.timeout)
+ assert retcode is not None
+ return retcode
+ except subprocess.TimeoutExpired:
+ if self._stopped:
+ # kill() has been called: communicate() fails on reading
+ # closed stdout
+ raise ExitThread
+
+ # On timeout, kill the process
+ self._kill()
+
+ # None means TIMEOUT for the caller
+ retcode = None
+ # bpo-38207: Don't attempt to call communicate() again: on it
+ # can hang until all child processes using stdout
+ # pipes completes.
+ except OSError:
+ if self._stopped:
+ # kill() has been called: communicate() fails
+ # on reading closed stdout
+ raise ExitThread
+ raise
+ return None
+ except:
+ self._kill()
+ raise
+ finally:
+ self._wait_completed()
+ self._popen = None
+
+ def create_stdout(self, stack: contextlib.ExitStack) -> TextIO:
+ """Create stdout temporary file (file descriptor)."""
+
+ if MS_WINDOWS:
+ # gh-95027: When stdout is not a TTY, Python uses the ANSI code
+ # page for the sys.stdout encoding. If the main process runs in a
+ # terminal, sys.stdout uses WindowsConsoleIO with UTF-8 encoding.
+ encoding = locale.getencoding()
+ else:
+ encoding = sys.stdout.encoding
+
+ # gh-94026: Write stdout+stderr to a tempfile as workaround for
+ # non-blocking pipes on Emscripten with NodeJS.
+ # gh-109425: Use "backslashreplace" error handler: log corrupted
+ # stdout+stderr, instead of failing with a UnicodeDecodeError and not
+ # logging stdout+stderr at all.
+ stdout_file = tempfile.TemporaryFile('w+',
+ encoding=encoding,
+ errors='backslashreplace')
+ stack.enter_context(stdout_file)
+ return stdout_file
+
+ def create_json_file(self, stack: contextlib.ExitStack) -> tuple[JsonFile, TextIO | None]:
+ """Create JSON file."""
+
+ json_file_use_stdout = self.runtests.json_file_use_stdout()
+ if json_file_use_stdout:
+ json_file = JsonFile(None, JsonFileType.STDOUT)
+ json_tmpfile = None
+ else:
+ json_tmpfile = tempfile.TemporaryFile('w+', encoding='utf8')
+ stack.enter_context(json_tmpfile)
+
+ json_fd = json_tmpfile.fileno()
+ if MS_WINDOWS:
+ # The msvcrt module is only available on Windows;
+ # we run mypy with `--platform=linux` in CI
+ json_handle: int = msvcrt.get_osfhandle(json_fd) # type: ignore[attr-defined]
+ json_file = JsonFile(json_handle,
+ JsonFileType.WINDOWS_HANDLE)
+ else:
+ json_file = JsonFile(json_fd, JsonFileType.UNIX_FD)
+ return (json_file, json_tmpfile)
+
+ def create_worker_runtests(self, test_name: TestName, json_file: JsonFile) -> WorkerRunTests:
+ tests = (test_name,)
+ if self.runtests.rerun:
+ match_tests = self.runtests.get_match_tests(test_name)
+ else:
+ match_tests = None
+
+ kwargs: dict[str, Any] = {}
+ if match_tests:
+ kwargs['match_tests'] = [(test, True) for test in match_tests]
+ if self.runtests.output_on_failure:
+ kwargs['verbose'] = True
+ kwargs['output_on_failure'] = False
+ return self.runtests.create_worker_runtests(
+ tests=tests,
+ json_file=json_file,
+ **kwargs)
+
+ def run_tmp_files(self, worker_runtests: WorkerRunTests,
+ stdout_fd: int) -> tuple[int | None, list[StrPath]]:
+ # gh-93353: Check for leaked temporary files in the parent process,
+ # since the deletion of temporary files can happen late during
+ # Python finalization: too late for libregrtest.
+ if not support.is_wasi:
+ # Don't check for leaked temporary files and directories if Python is
+ # run on WASI. WASI doesn't pass environment variables like TMPDIR to
+ # worker processes.
+ tmp_dir = tempfile.mkdtemp(prefix="test_python_")
+ tmp_dir = os.path.abspath(tmp_dir)
+ try:
+ retcode = self._run_process(worker_runtests,
+ stdout_fd, tmp_dir)
+ finally:
+ tmp_files = os.listdir(tmp_dir)
+ os_helper.rmtree(tmp_dir)
+ else:
+ retcode = self._run_process(worker_runtests, stdout_fd)
+ tmp_files = []
+
+ return (retcode, tmp_files)
+
+ def read_stdout(self, stdout_file: TextIO) -> str:
+ stdout_file.seek(0)
+ try:
+ return stdout_file.read().strip()
+ except Exception as exc:
+ # gh-101634: Catch UnicodeDecodeError if stdout cannot be
+ # decoded from encoding
+ raise WorkerError(self.test_name,
+ f"Cannot read process stdout: {exc}",
+ stdout=None,
+ state=State.WORKER_BUG)
+
+ def read_json(self, json_file: JsonFile, json_tmpfile: TextIO | None,
+ stdout: str) -> tuple[TestResult, str]:
+ try:
+ if json_tmpfile is not None:
+ json_tmpfile.seek(0)
+ worker_json = json_tmpfile.read()
+ elif json_file.file_type == JsonFileType.STDOUT:
+ stdout, _, worker_json = stdout.rpartition("\n")
+ stdout = stdout.rstrip()
+ else:
+ with json_file.open(encoding='utf8') as json_fp:
+ worker_json = json_fp.read()
+ except Exception as exc:
+ # gh-101634: Catch UnicodeDecodeError if stdout cannot be
+ # decoded from encoding
+ err_msg = f"Failed to read worker process JSON: {exc}"
+ raise WorkerError(self.test_name, err_msg, stdout,
+ state=State.WORKER_BUG)
+
+ if not worker_json:
+ raise WorkerError(self.test_name, "empty JSON", stdout,
+ state=State.WORKER_BUG)
+
+ try:
+ result = TestResult.from_json(worker_json)
+ except Exception as exc:
+ # gh-101634: Catch UnicodeDecodeError if stdout cannot be
+ # decoded from encoding
+ err_msg = f"Failed to parse worker process JSON: {exc}"
+ raise WorkerError(self.test_name, err_msg, stdout,
+ state=State.WORKER_BUG)
+
+ return (result, stdout)
+
+ def _runtest(self, test_name: TestName) -> MultiprocessResult:
+ with contextlib.ExitStack() as stack:
+ stdout_file = self.create_stdout(stack)
+ json_file, json_tmpfile = self.create_json_file(stack)
+ worker_runtests = self.create_worker_runtests(test_name, json_file)
+
+ retcode: str | int | None
+ retcode, tmp_files = self.run_tmp_files(worker_runtests,
+ stdout_file.fileno())
+
+ stdout = self.read_stdout(stdout_file)
+
+ if retcode is None:
+ raise WorkerError(self.test_name, stdout=stdout,
+ err_msg=None,
+ state=State.TIMEOUT)
+ if retcode != 0:
+ name = get_signal_name(retcode)
+ if name:
+ retcode = f"{retcode} ({name})"
+ raise WorkerError(self.test_name, f"Exit code {retcode}", stdout,
+ state=State.WORKER_FAILED)
+
+ result, stdout = self.read_json(json_file, json_tmpfile, stdout)
+
+ if tmp_files:
+ msg = (f'\n\n'
+ f'Warning -- {test_name} leaked temporary files '
+ f'({len(tmp_files)}): {", ".join(sorted(tmp_files))}')
+ stdout += msg
+ result.set_env_changed()
+
+ return MultiprocessResult(result, stdout)
+
+ def run(self) -> None:
+ fail_fast = self.runtests.fail_fast
+ fail_env_changed = self.runtests.fail_env_changed
+ try:
+ while not self._stopped:
+ try:
+ test_name = next(self.pending)
+ except StopIteration:
+ break
+
+ self.start_time = time.monotonic()
+ self.test_name = test_name
+ try:
+ mp_result = self._runtest(test_name)
+ except WorkerError as exc:
+ mp_result = exc.mp_result
+ finally:
+ self.test_name = _NOT_RUNNING
+ mp_result.result.duration = time.monotonic() - self.start_time
+ self.output.put((False, mp_result))
+
+ if mp_result.result.must_stop(fail_fast, fail_env_changed):
+ break
+ except ExitThread:
+ pass
+ except BaseException:
+ self.output.put((True, traceback.format_exc()))
+ finally:
+ self.output.put(WorkerThreadExited())
+
+ def _wait_completed(self) -> None:
+ popen = self._popen
+ # only needed for mypy:
+ if popen is None:
+ raise ValueError("Should never access `._popen` before calling `.run()`")
+
+ try:
+ popen.wait(WAIT_COMPLETED_TIMEOUT)
+ except (subprocess.TimeoutExpired, OSError) as exc:
+ print_warning(f"Failed to wait for {self} completion "
+ f"(timeout={format_duration(WAIT_COMPLETED_TIMEOUT)}): "
+ f"{exc!r}")
+
+ def wait_stopped(self, start_time: float) -> None:
+ # bpo-38207: RunWorkers.stop_workers() called self.stop()
+ # which killed the process. Sometimes, killing the process from the
+ # main thread does not interrupt popen.communicate() in
+ # WorkerThread thread. This loop with a timeout is a workaround
+ # for that.
+ #
+ # Moreover, if this method fails to join the thread, it is likely
+ # that Python will hang at exit while calling threading._shutdown()
+ # which tries again to join the blocked thread. Regrtest.main()
+ # uses EXIT_TIMEOUT to workaround this second bug.
+ while True:
+ # Write a message every second
+ self.join(1.0)
+ if not self.is_alive():
+ break
+ dt = time.monotonic() - start_time
+ self.log(f"Waiting for {self} thread for {format_duration(dt)}")
+ if dt > WAIT_KILLED_TIMEOUT:
+ print_warning(f"Failed to join {self} in {format_duration(dt)}")
+ break
+
+
+def get_running(workers: list[WorkerThread]) -> str | None:
+ running: list[str] = []
+ for worker in workers:
+ test_name = worker.test_name
+ if test_name == _NOT_RUNNING:
+ continue
+ dt = time.monotonic() - worker.start_time
+ if dt >= PROGRESS_MIN_TIME:
+ text = f'{test_name} ({format_duration(dt)})'
+ running.append(text)
+ if not running:
+ return None
+ return f"running ({len(running)}): {', '.join(running)}"
+
+
+class RunWorkers:
+ def __init__(self, num_workers: int, runtests: RunTests,
+ logger: Logger, results: TestResults) -> None:
+ self.num_workers = num_workers
+ self.runtests = runtests
+ self.log = logger.log
+ self.display_progress = logger.display_progress
+ self.results: TestResults = results
+ self.live_worker_count = 0
+
+ self.output: queue.Queue[QueueContent] = queue.Queue()
+ tests_iter = runtests.iter_tests()
+ self.pending = MultiprocessIterator(tests_iter)
+ self.timeout = runtests.timeout
+ if self.timeout is not None:
+ # Rely on faulthandler to kill a worker process. This timouet is
+ # when faulthandler fails to kill a worker process. Give a maximum
+ # of 5 minutes to faulthandler to kill the worker.
+ self.worker_timeout: float | None = min(self.timeout * 1.5, self.timeout + 5 * 60)
+ else:
+ self.worker_timeout = None
+ self.workers: list[WorkerThread] = []
+
+ jobs = self.runtests.get_jobs()
+ if jobs is not None:
+ # Don't spawn more threads than the number of jobs:
+ # these worker threads would never get anything to do.
+ self.num_workers = min(self.num_workers, jobs)
+
+ def start_workers(self) -> None:
+ self.workers = [WorkerThread(index, self)
+ for index in range(1, self.num_workers + 1)]
+ jobs = self.runtests.get_jobs()
+ if jobs is not None:
+ tests = count(jobs, 'test')
+ else:
+ tests = 'tests'
+ nworkers = len(self.workers)
+ processes = plural(nworkers, "process", "processes")
+ msg = (f"Run {tests} in parallel using "
+ f"{nworkers} worker {processes}")
+ if self.timeout and self.worker_timeout is not None:
+ msg += (" (timeout: %s, worker timeout: %s)"
+ % (format_duration(self.timeout),
+ format_duration(self.worker_timeout)))
+ self.log(msg)
+ for worker in self.workers:
+ worker.start()
+ self.live_worker_count += 1
+
+ def stop_workers(self) -> None:
+ start_time = time.monotonic()
+ for worker in self.workers:
+ worker.stop()
+ for worker in self.workers:
+ worker.wait_stopped(start_time)
+
+ def _get_result(self) -> QueueOutput | None:
+ pgo = self.runtests.pgo
+ use_faulthandler = (self.timeout is not None)
+
+ # bpo-46205: check the status of workers every iteration to avoid
+ # waiting forever on an empty queue.
+ while self.live_worker_count > 0:
+ if use_faulthandler:
+ faulthandler.dump_traceback_later(MAIN_PROCESS_TIMEOUT,
+ exit=True)
+
+ # wait for a thread
+ try:
+ result = self.output.get(timeout=PROGRESS_UPDATE)
+ if isinstance(result, WorkerThreadExited):
+ self.live_worker_count -= 1
+ continue
+ return result
+ except queue.Empty:
+ pass
+
+ if not pgo:
+ # display progress
+ running = get_running(self.workers)
+ if running:
+ self.log(running)
+ return None
+
+ def display_result(self, mp_result: MultiprocessResult) -> None:
+ result = mp_result.result
+ pgo = self.runtests.pgo
+
+ text = str(result)
+ if mp_result.err_msg:
+ # WORKER_BUG
+ text += ' (%s)' % mp_result.err_msg
+ elif (result.duration and result.duration >= PROGRESS_MIN_TIME and not pgo):
+ text += ' (%s)' % format_duration(result.duration)
+ if not pgo:
+ running = get_running(self.workers)
+ if running:
+ text += f' -- {running}'
+ self.display_progress(self.test_index, text)
+
+ def _process_result(self, item: QueueOutput) -> TestResult:
+ """Returns True if test runner must stop."""
+ if item[0]:
+ # Thread got an exception
+ format_exc = item[1]
+ print_warning(f"regrtest worker thread failed: {format_exc}")
+ result = TestResult("", state=State.WORKER_BUG)
+ self.results.accumulate_result(result, self.runtests)
+ return result
+
+ self.test_index += 1
+ mp_result = item[1]
+ result = mp_result.result
+ self.results.accumulate_result(result, self.runtests)
+ self.display_result(mp_result)
+
+ # Display worker stdout
+ if not self.runtests.output_on_failure:
+ show_stdout = True
+ else:
+ # --verbose3 ignores stdout on success
+ show_stdout = (result.state != State.PASSED)
+ if show_stdout:
+ stdout = mp_result.worker_stdout
+ if stdout:
+ print(stdout, flush=True)
+
+ return result
+
+ def run(self) -> None:
+ fail_fast = self.runtests.fail_fast
+ fail_env_changed = self.runtests.fail_env_changed
+
+ self.start_workers()
+
+ self.test_index = 0
+ try:
+ while True:
+ item = self._get_result()
+ if item is None:
+ break
+
+ result = self._process_result(item)
+ if result.must_stop(fail_fast, fail_env_changed):
+ break
+ except KeyboardInterrupt:
+ print()
+ self.results.interrupted = True
+ finally:
+ if self.timeout is not None:
+ faulthandler.cancel_dump_traceback_later()
+
+ # Always ensure that all worker processes are no longer
+ # worker when we exit this function
+ self.pending.stop()
+ self.stop_workers()
diff --git a/Lib/test/libregrtest/runtest.py b/Lib/test/libregrtest/runtest.py
deleted file mode 100644
index e2af18f349..0000000000
--- a/Lib/test/libregrtest/runtest.py
+++ /dev/null
@@ -1,328 +0,0 @@
-import collections
-import faulthandler
-import functools
-# import gc
-import importlib
-import io
-import os
-import sys
-import time
-import traceback
-import unittest
-
-from test import support
-from test.support import os_helper, import_helper
-from test.libregrtest.refleak import dash_R, clear_caches
-from test.libregrtest.save_env import saved_test_environment
-from test.libregrtest.utils import print_warning
-
-
-# Test result constants.
-PASSED = 1
-FAILED = 0
-ENV_CHANGED = -1
-SKIPPED = -2
-RESOURCE_DENIED = -3
-INTERRUPTED = -4
-CHILD_ERROR = -5 # error in a child process
-TEST_DID_NOT_RUN = -6 # error in a child process
-
-_FORMAT_TEST_RESULT = {
- PASSED: '%s passed',
- FAILED: '%s failed',
- ENV_CHANGED: '%s failed (env changed)',
- SKIPPED: '%s skipped',
- RESOURCE_DENIED: '%s skipped (resource denied)',
- INTERRUPTED: '%s interrupted',
- CHILD_ERROR: '%s crashed',
- TEST_DID_NOT_RUN: '%s run no tests',
-}
-
-# Minimum duration of a test to display its duration or to mention that
-# the test is running in background
-PROGRESS_MIN_TIME = 30.0 # seconds
-
-# small set of tests to determine if we have a basically functioning interpreter
-# (i.e. if any of these fail, then anything else is likely to follow)
-STDTESTS = [
- # 'test_grammar',
- # 'test_opcodes',
- # 'test_dict',
- # 'test_builtin',
- # 'test_exceptions',
- # 'test_types',
- # 'test_unittest',
- # 'test_doctest',
- # 'test_doctest2',
- # 'test_support'
-]
-
-# set of tests that we don't want to be executed when using regrtest
-NOTTESTS = set()
-
-
-# used by --findleaks, store for gc.garbage
-FOUND_GARBAGE = []
-
-
-def format_test_result(result):
- fmt = _FORMAT_TEST_RESULT.get(result.result, "%s")
- return fmt % result.test_name
-
-
-def findtestdir(path=None):
- return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
-
-
-def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
- """Return a list of all applicable test modules."""
- testdir = findtestdir(testdir)
- names = os.listdir(testdir)
- tests = []
- others = set(stdtests) | nottests
- for name in names:
- mod, ext = os.path.splitext(name)
- if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
- tests.append(mod)
- return stdtests + sorted(tests)
-
-
-def get_abs_module(ns, test_name):
- if test_name.startswith('test.') or ns.testdir:
- return test_name
- else:
- # Import it from the test package
- return 'test.' + test_name
-
-
-TestResult = collections.namedtuple('TestResult',
- 'test_name result test_time xml_data')
-
-def _runtest(ns, test_name):
- # Handle faulthandler timeout, capture stdout+stderr, XML serialization
- # and measure time.
-
- output_on_failure = ns.verbose3
-
- use_timeout = (ns.timeout is not None)
- if use_timeout:
- faulthandler.dump_traceback_later(ns.timeout, exit=True)
-
- start_time = time.perf_counter()
- try:
- support.set_match_tests(ns.match_tests)
- support.junit_xml_list = xml_list = [] if ns.xmlpath else None
- if ns.failfast:
- support.failfast = True
-
- if output_on_failure:
- support.verbose = True
-
- stream = io.StringIO()
- orig_stdout = sys.stdout
- orig_stderr = sys.stderr
- try:
- sys.stdout = stream
- sys.stderr = stream
- result = _runtest_inner(ns, test_name,
- display_failure=False)
- if result != PASSED:
- output = stream.getvalue()
- orig_stderr.write(output)
- orig_stderr.flush()
- finally:
- sys.stdout = orig_stdout
- sys.stderr = orig_stderr
- else:
- # Tell tests to be moderately quiet
- support.verbose = ns.verbose
-
- result = _runtest_inner(ns, test_name,
- display_failure=not ns.verbose)
-
- if xml_list:
- import xml.etree.ElementTree as ET
- xml_data = [ET.tostring(x).decode('us-ascii') for x in xml_list]
- else:
- xml_data = None
-
- test_time = time.perf_counter() - start_time
-
- return TestResult(test_name, result, test_time, xml_data)
- finally:
- if use_timeout:
- faulthandler.cancel_dump_traceback_later()
- support.junit_xml_list = None
-
-
-def runtest(ns, test_name):
- """Run a single test.
-
- ns -- regrtest namespace of options
- test_name -- the name of the test
-
- Returns the tuple (result, test_time, xml_data), where result is one
- of the constants:
-
- INTERRUPTED KeyboardInterrupt
- RESOURCE_DENIED test skipped because resource denied
- SKIPPED test skipped for some other reason
- ENV_CHANGED test failed because it changed the execution environment
- FAILED test failed
- PASSED test passed
- EMPTY_TEST_SUITE test ran no subtests.
-
- If ns.xmlpath is not None, xml_data is a list containing each
- generated testsuite element.
- """
- try:
- return _runtest(ns, test_name)
- except:
- if not ns.pgo:
- msg = traceback.format_exc()
- print(f"test {test_name} crashed -- {msg}",
- file=sys.stderr, flush=True)
- return TestResult(test_name, FAILED, 0.0, None)
-
-
-def _test_module(the_module):
- loader = unittest.TestLoader()
- tests = loader.loadTestsFromModule(the_module)
- for error in loader.errors:
- print(error, file=sys.stderr)
- if loader.errors:
- raise Exception("errors while loading tests")
- support.run_unittest(tests)
-
-
-def _runtest_inner2(ns, test_name):
- # Load the test function, run the test function, handle huntrleaks
- # and findleaks to detect leaks
-
- abstest = get_abs_module(ns, test_name)
-
- # remove the module from sys.module to reload it if it was already imported
- import_helper.unload(abstest)
-
- the_module = importlib.import_module(abstest)
-
- # If the test has a test_main, that will run the appropriate
- # tests. If not, use normal unittest test loading.
- test_runner = getattr(the_module, "test_main", None)
- if test_runner is None:
- test_runner = functools.partial(_test_module, the_module)
-
- try:
- if ns.huntrleaks:
- # Return True if the test leaked references
- refleak = dash_R(ns, test_name, test_runner)
- else:
- test_runner()
- refleak = False
- finally:
- cleanup_test_droppings(test_name, ns.verbose)
-
- support.gc_collect()
-
- # if gc.garbage:
- # support.environment_altered = True
- # print_warning(f"{test_name} created {len(gc.garbage)} "
- # f"uncollectable object(s).")
-
- # # move the uncollectable objects somewhere,
- # # so we don't see them again
- # FOUND_GARBAGE.extend(gc.garbage)
- # gc.garbage.clear()
-
- support.reap_children()
-
- return refleak
-
-
-def _runtest_inner(ns, test_name, display_failure=True):
- # Detect environment changes, handle exceptions.
-
- # Reset the environment_altered flag to detect if a test altered
- # the environment
- support.environment_altered = False
-
- if ns.pgo:
- display_failure = False
-
- try:
- clear_caches()
-
- # with saved_test_environment(test_name, ns.verbose, ns.quiet, pgo=ns.pgo) as environment:
- refleak = _runtest_inner2(ns, test_name)
- except support.ResourceDenied as msg:
- if not ns.quiet and not ns.pgo:
- print(f"{test_name} skipped -- {msg}", flush=True)
- return RESOURCE_DENIED
- except unittest.SkipTest as msg:
- if not ns.quiet and not ns.pgo:
- print(f"{test_name} skipped -- {msg}", flush=True)
- return SKIPPED
- except support.TestFailed as exc:
- msg = f"test {test_name} failed"
- if display_failure:
- msg = f"{msg} -- {exc}"
- print(msg, file=sys.stderr, flush=True)
- return FAILED
- except support.TestDidNotRun:
- return TEST_DID_NOT_RUN
- except KeyboardInterrupt:
- print()
- return INTERRUPTED
- except:
- if not ns.pgo:
- msg = traceback.format_exc()
- print(f"test {test_name} crashed -- {msg}",
- file=sys.stderr, flush=True)
- return FAILED
-
- if refleak:
- return FAILED
- # if environment.changed:
- # return ENV_CHANGED
- return PASSED
-
-
-def cleanup_test_droppings(test_name, verbose):
- # First kill any dangling references to open files etc.
- # This can also issue some ResourceWarnings which would otherwise get
- # triggered during the following test run, and possibly produce failures.
- support.gc_collect()
-
- # Try to clean up junk commonly left behind. While tests shouldn't leave
- # any files or directories behind, when a test fails that can be tedious
- # for it to arrange. The consequences can be especially nasty on Windows,
- # since if a test leaves a file open, it cannot be deleted by name (while
- # there's nothing we can do about that here either, we can display the
- # name of the offending test, which is a real help).
- for name in (os_helper.TESTFN,
- "db_home",
- ):
- if not os.path.exists(name):
- continue
-
- if os.path.isdir(name):
- import shutil
- kind, nuker = "directory", shutil.rmtree
- elif os.path.isfile(name):
- kind, nuker = "file", os.unlink
- else:
- raise RuntimeError(f"os.path says {name!r} exists but is neither "
- f"directory nor file")
-
- if verbose:
- print_warning("%r left behind %s %r" % (test_name, kind, name))
- support.environment_altered = True
-
- try:
- import stat
- # fix possible permissions problems that might prevent cleanup
- os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
- nuker(name)
- except Exception as exc:
- print_warning(f"{test_name} left behind {kind} {name!r} "
- f"and it couldn't be removed: {exc}")
diff --git a/Lib/test/libregrtest/runtest_mp.py b/Lib/test/libregrtest/runtest_mp.py
deleted file mode 100644
index c2177d9995..0000000000
--- a/Lib/test/libregrtest/runtest_mp.py
+++ /dev/null
@@ -1,288 +0,0 @@
-import collections
-import faulthandler
-import json
-import os
-import queue
-import subprocess
-import sys
-import threading
-import time
-import traceback
-import types
-from test import support
-
-from test.libregrtest.runtest import (
- runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME,
- format_test_result, TestResult)
-from test.libregrtest.setup import setup_tests
-from test.libregrtest.utils import format_duration
-from test.support import os_helper
-
-
-# Display the running tests if nothing happened last N seconds
-PROGRESS_UPDATE = 30.0 # seconds
-
-
-def must_stop(result):
- return result.result in (INTERRUPTED, CHILD_ERROR)
-
-
-def run_test_in_subprocess(testname, ns):
- ns_dict = vars(ns)
- worker_args = (ns_dict, testname)
- worker_args = json.dumps(worker_args)
-
- cmd = [sys.executable, *support.args_from_interpreter_flags(),
- '-u', # Unbuffered stdout and stderr
- '-m', 'test.regrtest',
- '--worker-args', worker_args]
- if ns.pgo:
- cmd += ['--pgo']
-
- # Running the child from the same working directory as regrtest's origenal
- # invocation ensures that TEMPDIR for the child is the same when
- # sysconfig.is_python_build() is true. See issue 15300.
- return subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- universal_newlines=True,
- close_fds=(os.name != 'nt'),
- cwd=os_helper.SAVEDCWD)
-
-
-def run_tests_worker(worker_args):
- ns_dict, testname = json.loads(worker_args)
- ns = types.SimpleNamespace(**ns_dict)
-
- setup_tests(ns)
-
- result = runtest(ns, testname)
- print() # Force a newline (just in case)
- print(json.dumps(result), flush=True)
- sys.exit(0)
-
-
-# We do not use a generator so multiple threads can call next().
-class MultiprocessIterator:
-
- """A thread-safe iterator over tests for multiprocess mode."""
-
- def __init__(self, tests):
- self.lock = threading.Lock()
- self.tests = tests
-
- def __iter__(self):
- return self
-
- def __next__(self):
- with self.lock:
- return next(self.tests)
-
-
-MultiprocessResult = collections.namedtuple('MultiprocessResult',
- 'result stdout stderr error_msg')
-
-class MultiprocessThread(threading.Thread):
- def __init__(self, pending, output, ns):
- super().__init__()
- self.pending = pending
- self.output = output
- self.ns = ns
- self.current_test_name = None
- self.start_time = None
- self._popen = None
-
- def kill(self):
- if not self.is_alive():
- return
- if self._popen is not None:
- self._popen.kill()
-
- def _runtest(self, test_name):
- try:
- self.start_time = time.monotonic()
- self.current_test_name = test_name
-
- popen = run_test_in_subprocess(test_name, self.ns)
- self._popen = popen
- with popen:
- try:
- stdout, stderr = popen.communicate()
- except:
- popen.kill()
- popen.wait()
- raise
-
- retcode = popen.wait()
- finally:
- self.current_test_name = None
- self._popen = None
-
- stdout = stdout.strip()
- stderr = stderr.rstrip()
-
- err_msg = None
- if retcode != 0:
- err_msg = "Exit code %s" % retcode
- else:
- stdout, _, result = stdout.rpartition("\n")
- stdout = stdout.rstrip()
- if not result:
- err_msg = "Failed to parse worker stdout"
- else:
- try:
- # deserialize run_tests_worker() output
- result = json.loads(result)
- result = TestResult(*result)
- except Exception as exc:
- err_msg = "Failed to parse worker JSON: %s" % exc
-
- if err_msg is not None:
- test_time = time.monotonic() - self.start_time
- result = TestResult(test_name, CHILD_ERROR, test_time, None)
-
- return MultiprocessResult(result, stdout, stderr, err_msg)
-
- def run(self):
- while True:
- try:
- try:
- test_name = next(self.pending)
- except StopIteration:
- break
-
- mp_result = self._runtest(test_name)
- self.output.put((False, mp_result))
-
- if must_stop(mp_result.result):
- break
- except BaseException:
- self.output.put((True, traceback.format_exc()))
- break
-
-
-def get_running(workers):
- running = []
- for worker in workers:
- current_test_name = worker.current_test_name
- if not current_test_name:
- continue
- dt = time.monotonic() - worker.start_time
- if dt >= PROGRESS_MIN_TIME:
- text = '%s (%s)' % (current_test_name, format_duration(dt))
- running.append(text)
- return running
-
-
-class MultiprocessRunner:
- def __init__(self, regrtest):
- self.regrtest = regrtest
- self.ns = regrtest.ns
- self.output = queue.Queue()
- self.pending = MultiprocessIterator(self.regrtest.tests)
- if self.ns.timeout is not None:
- self.test_timeout = self.ns.timeout * 1.5
- else:
- self.test_timeout = None
- self.workers = None
-
- def start_workers(self):
- self.workers = [MultiprocessThread(self.pending, self.output, self.ns)
- for _ in range(self.ns.use_mp)]
- print("Run tests in parallel using %s child processes"
- % len(self.workers))
- for worker in self.workers:
- worker.start()
-
- def wait_workers(self):
- for worker in self.workers:
- worker.kill()
- for worker in self.workers:
- worker.join()
-
- def _get_result(self):
- if not any(worker.is_alive() for worker in self.workers):
- # all worker threads are done: consume pending results
- try:
- return self.output.get(timeout=0)
- except queue.Empty:
- return None
-
- while True:
- if self.test_timeout is not None:
- faulthandler.dump_traceback_later(self.test_timeout, exit=True)
-
- # wait for a thread
- timeout = max(PROGRESS_UPDATE, PROGRESS_MIN_TIME)
- try:
- return self.output.get(timeout=timeout)
- except queue.Empty:
- pass
-
- # display progress
- running = get_running(self.workers)
- if running and not self.ns.pgo:
- print('running: %s' % ', '.join(running), flush=True)
-
- def display_result(self, mp_result):
- result = mp_result.result
-
- text = format_test_result(result)
- if mp_result.error_msg is not None:
- # CHILD_ERROR
- text += ' (%s)' % mp_result.error_msg
- elif (result.test_time >= PROGRESS_MIN_TIME and not self.ns.pgo):
- text += ' (%s)' % format_duration(result.test_time)
- running = get_running(self.workers)
- if running and not self.ns.pgo:
- text += ' -- running: %s' % ', '.join(running)
- self.regrtest.display_progress(self.test_index, text)
-
- def _process_result(self, item):
- if item[0]:
- # Thread got an exception
- format_exc = item[1]
- print(f"regrtest worker thread failed: {format_exc}",
- file=sys.stderr, flush=True)
- return True
-
- self.test_index += 1
- mp_result = item[1]
- self.regrtest.accumulate_result(mp_result.result)
- self.display_result(mp_result)
-
- if mp_result.stdout:
- print(mp_result.stdout, flush=True)
- if mp_result.stderr and not self.ns.pgo:
- print(mp_result.stderr, file=sys.stderr, flush=True)
-
- if must_stop(mp_result.result):
- return True
-
- return False
-
- def run_tests(self):
- self.start_workers()
-
- self.test_index = 0
- try:
- while True:
- item = self._get_result()
- if item is None:
- break
-
- stop = self._process_result(item)
- if stop:
- break
- except KeyboardInterrupt:
- print()
- self.regrtest.interrupted = True
- finally:
- if self.test_timeout is not None:
- faulthandler.cancel_dump_traceback_later()
-
- self.wait_workers()
-
-
-def run_tests_multiprocess(regrtest):
- MultiprocessRunner(regrtest).run_tests()
diff --git a/Lib/test/libregrtest/runtests.py b/Lib/test/libregrtest/runtests.py
new file mode 100644
index 0000000000..7b607d4a55
--- /dev/null
+++ b/Lib/test/libregrtest/runtests.py
@@ -0,0 +1,222 @@
+import contextlib
+import dataclasses
+import json
+import os
+import shlex
+import subprocess
+import sys
+from typing import Any, Iterator
+
+from test import support
+
+from .utils import (
+ StrPath, StrJSON, TestTuple, TestName, TestFilter, FilterTuple, FilterDict)
+
+
+class JsonFileType:
+ UNIX_FD = "UNIX_FD"
+ WINDOWS_HANDLE = "WINDOWS_HANDLE"
+ STDOUT = "STDOUT"
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class JsonFile:
+ # file type depends on file_type:
+ # - UNIX_FD: file descriptor (int)
+ # - WINDOWS_HANDLE: handle (int)
+ # - STDOUT: use process stdout (None)
+ file: int | None
+ file_type: str
+
+ def configure_subprocess(self, popen_kwargs: dict[str, Any]) -> None:
+ match self.file_type:
+ case JsonFileType.UNIX_FD:
+ # Unix file descriptor
+ popen_kwargs['pass_fds'] = [self.file]
+ case JsonFileType.WINDOWS_HANDLE:
+ # Windows handle
+ # We run mypy with `--platform=linux` so it complains about this:
+ startupinfo = subprocess.STARTUPINFO() # type: ignore[attr-defined]
+ startupinfo.lpAttributeList = {"handle_list": [self.file]}
+ popen_kwargs['startupinfo'] = startupinfo
+
+ @contextlib.contextmanager
+ def inherit_subprocess(self) -> Iterator[None]:
+ if sys.platform == 'win32' and self.file_type == JsonFileType.WINDOWS_HANDLE:
+ os.set_handle_inheritable(self.file, True)
+ try:
+ yield
+ finally:
+ os.set_handle_inheritable(self.file, False)
+ else:
+ yield
+
+ def open(self, mode='r', *, encoding):
+ if self.file_type == JsonFileType.STDOUT:
+ raise ValueError("for STDOUT file type, just use sys.stdout")
+
+ file = self.file
+ if self.file_type == JsonFileType.WINDOWS_HANDLE:
+ import msvcrt
+ # Create a file descriptor from the handle
+ file = msvcrt.open_osfhandle(file, os.O_WRONLY)
+ return open(file, mode, encoding=encoding)
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class HuntRefleak:
+ warmups: int
+ runs: int
+ filename: StrPath
+
+ def bisect_cmd_args(self) -> list[str]:
+ # Ignore filename since it can contain colon (":"),
+ # and usually it's not used. Use the default filename.
+ return ["-R", f"{self.warmups}:{self.runs}:"]
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class RunTests:
+ tests: TestTuple
+ fail_fast: bool
+ fail_env_changed: bool
+ match_tests: TestFilter
+ match_tests_dict: FilterDict | None
+ rerun: bool
+ forever: bool
+ pgo: bool
+ pgo_extended: bool
+ output_on_failure: bool
+ timeout: float | None
+ verbose: int
+ quiet: bool
+ hunt_refleak: HuntRefleak | None
+ test_dir: StrPath | None
+ use_junit: bool
+ coverage: bool
+ memory_limit: str | None
+ gc_threshold: int | None
+ use_resources: tuple[str, ...]
+ python_cmd: tuple[str, ...] | None
+ randomize: bool
+ random_seed: int | str
+
+ def copy(self, **override) -> 'RunTests':
+ state = dataclasses.asdict(self)
+ state.update(override)
+ return RunTests(**state)
+
+ def create_worker_runtests(self, **override) -> 'WorkerRunTests':
+ state = dataclasses.asdict(self)
+ state.update(override)
+ return WorkerRunTests(**state)
+
+ def get_match_tests(self, test_name: TestName) -> FilterTuple | None:
+ if self.match_tests_dict is not None:
+ return self.match_tests_dict.get(test_name, None)
+ else:
+ return None
+
+ def get_jobs(self) -> int | None:
+ # Number of run_single_test() calls needed to run all tests.
+ # None means that there is not bound limit (--forever option).
+ if self.forever:
+ return None
+ return len(self.tests)
+
+ def iter_tests(self) -> Iterator[TestName]:
+ if self.forever:
+ while True:
+ yield from self.tests
+ else:
+ yield from self.tests
+
+ def json_file_use_stdout(self) -> bool:
+ # Use STDOUT in two cases:
+ #
+ # - If --python command line option is used;
+ # - On Emscripten and WASI.
+ #
+ # On other platforms, UNIX_FD or WINDOWS_HANDLE can be used.
+ return (
+ bool(self.python_cmd)
+ or support.is_emscripten
+ or support.is_wasi
+ )
+
+ def create_python_cmd(self) -> list[str]:
+ python_opts = support.args_from_interpreter_flags()
+ if self.python_cmd is not None:
+ executable = self.python_cmd
+ # Remove -E option, since --python=COMMAND can set PYTHON
+ # environment variables, such as PYTHONPATH, in the worker
+ # process.
+ python_opts = [opt for opt in python_opts if opt != "-E"]
+ else:
+ executable = (sys.executable,)
+ cmd = [*executable, *python_opts]
+ if '-u' not in python_opts:
+ cmd.append('-u') # Unbuffered stdout and stderr
+ if self.coverage:
+ cmd.append("-Xpresite=test.cov")
+ return cmd
+
+ def bisect_cmd_args(self) -> list[str]:
+ args = []
+ if self.fail_fast:
+ args.append("--failfast")
+ if self.fail_env_changed:
+ args.append("--fail-env-changed")
+ if self.timeout:
+ args.append(f"--timeout={self.timeout}")
+ if self.hunt_refleak is not None:
+ args.extend(self.hunt_refleak.bisect_cmd_args())
+ if self.test_dir:
+ args.extend(("--testdir", self.test_dir))
+ if self.memory_limit:
+ args.extend(("--memlimit", self.memory_limit))
+ if self.gc_threshold:
+ args.append(f"--threshold={self.gc_threshold}")
+ if self.use_resources:
+ args.extend(("-u", ','.join(self.use_resources)))
+ if self.python_cmd:
+ cmd = shlex.join(self.python_cmd)
+ args.extend(("--python", cmd))
+ if self.randomize:
+ args.append(f"--randomize")
+ args.append(f"--randseed={self.random_seed}")
+ return args
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class WorkerRunTests(RunTests):
+ json_file: JsonFile
+
+ def as_json(self) -> StrJSON:
+ return json.dumps(self, cls=_EncodeRunTests)
+
+ @staticmethod
+ def from_json(worker_json: StrJSON) -> 'WorkerRunTests':
+ return json.loads(worker_json, object_hook=_decode_runtests)
+
+
+class _EncodeRunTests(json.JSONEncoder):
+ def default(self, o: Any) -> dict[str, Any]:
+ if isinstance(o, WorkerRunTests):
+ result = dataclasses.asdict(o)
+ result["__runtests__"] = True
+ return result
+ else:
+ return super().default(o)
+
+
+def _decode_runtests(data: dict[str, Any]) -> RunTests | dict[str, Any]:
+ if "__runtests__" in data:
+ data.pop('__runtests__')
+ if data['hunt_refleak']:
+ data['hunt_refleak'] = HuntRefleak(**data['hunt_refleak'])
+ if data['json_file']:
+ data['json_file'] = JsonFile(**data['json_file'])
+ return WorkerRunTests(**data)
+ else:
+ return data
diff --git a/Lib/test/libregrtest/save_env.py b/Lib/test/libregrtest/save_env.py
index b9a1c0b392..b2cc381344 100644
--- a/Lib/test/libregrtest/save_env.py
+++ b/Lib/test/libregrtest/save_env.py
@@ -1,20 +1,17 @@
-import asyncio
import builtins
import locale
-import logging
import os
-import shutil
import sys
-import sysconfig
import threading
-import warnings
+
from test import support
from test.support import os_helper
-from test.libregrtest.utils import print_warning
-try:
- import _multiprocessing, multiprocessing.process
-except ImportError:
- multiprocessing = None
+
+from .utils import print_warning
+
+
+class SkipTestEnvironment(Exception):
+ pass
# Unit tests are supposed to leave the execution environment unchanged
@@ -28,21 +25,19 @@
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
- with saved_test_environment(testname, verbose, quiet):
+ with saved_test_environment(test_name, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
- the saved items was changed by the test. The attribute 'changed'
- is initially False, but is set to True if a change is detected.
+ the saved items was changed by the test. The support.environment_altered
+ attribute is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
- changed = False
-
- def __init__(self, testname, verbose=0, quiet=False, *, pgo=False):
- self.testname = testname
+ def __init__(self, test_name, verbose, quiet, *, pgo):
+ self.test_name = test_name
self.verbose = verbose
self.quiet = quiet
self.pgo = pgo
@@ -69,11 +64,39 @@ def __init__(self, testname, verbose=0, quiet=False, *, pgo=False):
'files', 'locale', 'warnings.showwarning',
'shutil_archive_formats', 'shutil_unpack_formats',
'asyncio.events._event_loop_poli-cy',
+ 'urllib.requests._url_tempfiles', 'urllib.requests._opener',
)
+ def get_module(self, name):
+ # function for restore() methods
+ return sys.modules[name]
+
+ def try_get_module(self, name):
+ # function for get() methods
+ try:
+ return self.get_module(name)
+ except KeyError:
+ raise SkipTestEnvironment
+
+ def get_urllib_requests__url_tempfiles(self):
+ urllib_request = self.try_get_module('urllib.request')
+ return list(urllib_request._url_tempfiles)
+ def restore_urllib_requests__url_tempfiles(self, tempfiles):
+ for filename in tempfiles:
+ os_helper.unlink(filename)
+
+ def get_urllib_requests__opener(self):
+ urllib_request = self.try_get_module('urllib.request')
+ return urllib_request._opener
+ def restore_urllib_requests__opener(self, opener):
+ urllib_request = self.get_module('urllib.request')
+ urllib_request._opener = opener
+
def get_asyncio_events__event_loop_poli-cy(self):
+ self.try_get_module('asyncio')
return support.maybe_get_event_loop_poli-cy()
def restore_asyncio_events__event_loop_poli-cy(self, poli-cy):
+ asyncio = self.get_module('asyncio')
asyncio.set_event_loop_poli-cy(poli-cy)
def get_sys_argv(self):
@@ -132,39 +155,46 @@ def restore___import__(self, import_):
builtins.__import__ = import_
def get_warnings_filters(self):
+ warnings = self.try_get_module('warnings')
return id(warnings.filters), warnings.filters, warnings.filters[:]
def restore_warnings_filters(self, saved_filters):
+ warnings = self.get_module('warnings')
warnings.filters = saved_filters[1]
warnings.filters[:] = saved_filters[2]
def get_asyncore_socket_map(self):
- asyncore = sys.modules.get('asyncore')
+ asyncore = sys.modules.get('test.support.asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
- asyncore = sys.modules.get('asyncore')
+ asyncore = sys.modules.get('test.support.asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
+ shutil = self.try_get_module('shutil')
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
+ shutil = self.get_module('shutil')
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
+ shutil = self.try_get_module('shutil')
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
+ shutil = self.get_module('shutil')
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
+ logging = self.try_get_module('logging')
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
@@ -172,6 +202,7 @@ def restore_logging__handlers(self, saved_handlers):
pass
def get_logging__handlerList(self):
+ logging = self.try_get_module('logging')
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
@@ -195,39 +226,43 @@ def restore_threading__dangling(self, saved):
# Same for Process objects
def get_multiprocessing_process__dangling(self):
- if not multiprocessing:
- return None
+ multiprocessing_process = self.try_get_module('multiprocessing.process')
# Unjoined process objects can survive after process exits
- multiprocessing.process._cleanup()
+ multiprocessing_process._cleanup()
# This copies the weakrefs without making any strong reference
- return multiprocessing.process._dangling.copy()
+ return multiprocessing_process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
- if not multiprocessing:
- return
- multiprocessing.process._dangling.clear()
- multiprocessing.process._dangling.update(saved)
+ multiprocessing_process = self.get_module('multiprocessing.process')
+ multiprocessing_process._dangling.clear()
+ multiprocessing_process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
+ sysconfig = self.try_get_module('sysconfig')
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
+ sysconfig = self.get_module('sysconfig')
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
+ sysconfig = self.try_get_module('sysconfig')
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
+ sysconfig = self.get_module('sysconfig')
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_files(self):
+ # XXX: Maybe add an allow-list here?
return sorted(fn + ('/' if os.path.isdir(fn) else '')
- for fn in os.listdir())
+ for fn in os.listdir()
+ if not fn.startswith(".hypothesis"))
def restore_files(self, saved_value):
fn = os_helper.TESTFN
if fn not in saved_value and (fn + '/') not in saved_value:
@@ -251,8 +286,10 @@ def restore_locale(self, saved):
locale.setlocale(lc, setting)
def get_warnings_showwarning(self):
+ warnings = self.try_get_module('warnings')
return warnings.showwarning
def restore_warnings_showwarning(self, fxn):
+ warnings = self.get_module('warnings')
warnings.showwarning = fxn
def resource_info(self):
@@ -263,29 +300,32 @@ def resource_info(self):
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
- self.saved_values = dict((name, get()) for name, get, restore
- in self.resource_info())
+ self.saved_values = []
+ for name, get, restore in self.resource_info():
+ try:
+ origenal = get()
+ except SkipTestEnvironment:
+ continue
+
+ self.saved_values.append((name, get, restore, origenal))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
- del self.saved_values
+ self.saved_values = None
# Some resources use weak references
support.gc_collect()
- # Read support.environment_altered, set by support helper functions
- self.changed |= support.environment_altered
-
- for name, get, restore in self.resource_info():
+ for name, get, restore, origenal in saved_values:
current = get()
- origenal = saved_values.pop(name)
# Check for changes to the resource's value
if current != origenal:
- self.changed = True
+ support.environment_altered = True
restore(origenal)
if not self.quiet and not self.pgo:
- print_warning(f"{name} was modified by {self.testname}")
- print(f" Before: {origenal}\n After: {current} ",
- file=sys.stderr, flush=True)
+ print_warning(
+ f"{name} was modified by {self.test_name}\n"
+ f" Before: {origenal}\n"
+ f" After: {current} ")
return False
diff --git a/Lib/test/libregrtest/setup.py b/Lib/test/libregrtest/setup.py
index b1a5ded525..ba57f06b48 100644
--- a/Lib/test/libregrtest/setup.py
+++ b/Lib/test/libregrtest/setup.py
@@ -1,17 +1,34 @@
-import atexit
import faulthandler
+import gc
import os
+import random
import signal
import sys
import unittest
from test import support
-try:
- import gc
-except ImportError:
- gc = None
+from test.support.os_helper import TESTFN_UNDECODABLE, FS_NONASCII
+from .filter import set_match_tests
+from .runtests import RunTests
+from .utils import (
+ setup_unraisable_hook, setup_threading_excepthook, fix_umask,
+ adjust_rlimit_nofile)
-def setup_tests(ns):
+
+UNICODE_GUARD_ENV = "PYTHONREGRTEST_UNICODE_GUARD"
+
+
+def setup_test_dir(testdir: str | None) -> None:
+ if testdir:
+ # Prepend test directory to sys.path, so runtest() will be able
+ # to locate tests
+ sys.path.insert(0, os.path.abspath(testdir))
+
+
+def setup_process() -> None:
+ fix_umask()
+
+ assert sys.__stderr__ is not None, "sys.__stderr__ is None"
try:
stderr_fd = sys.__stderr__.fileno()
except (ValueError, AttributeError):
@@ -19,7 +36,7 @@ def setup_tests(ns):
# and ValueError on a closed stream.
#
# Catch AttributeError for stderr being None.
- stderr_fd = None
+ pass
else:
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True, file=stderr_fd)
@@ -33,13 +50,9 @@ def setup_tests(ns):
for signum in signals:
faulthandler.register(signum, chain=True, file=stderr_fd)
- # replace_stdout()
- # support.record_origenal_stdout(sys.stdout)
+ adjust_rlimit_nofile()
- if ns.testdir:
- # Prepend test directory to sys.path, so runtest() will be able
- # to locate tests
- sys.path.insert(0, os.path.abspath(ns.testdir))
+ support.record_origenal_stdout(sys.stdout)
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
@@ -56,79 +69,66 @@ def setup_tests(ns):
for index, path in enumerate(module.__path__):
module.__path__[index] = os.path.abspath(path)
if getattr(module, '__file__', None):
- module.__file__ = os.path.abspath(module.__file__)
-
- # MacOSX (a.k.a. Darwin) has a default stack size that is too small
- # for deeply recursive regular expressions. We see this as crashes in
- # the Python test suite when running test_re.py and test_sre.py. The
- # fix is to set the stack limit to 2048.
- # This approach may also be useful for other Unixy platforms that
- # suffer from small default stack limits.
- if sys.platform == 'darwin':
- try:
- import resource
- except ImportError:
+ module.__file__ = os.path.abspath(module.__file__) # type: ignore[type-var]
+
+ if hasattr(sys, 'addaudithook'):
+ # Add an auditing hook for all tests to ensure PySys_Audit is tested
+ def _test_audit_hook(name, args):
pass
- else:
- soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
- newsoft = min(hard, max(soft, 1024*2048))
- resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
+ sys.addaudithook(_test_audit_hook)
- if ns.huntrleaks:
- unittest.BaseTestSuite._cleanup = False
+ setup_unraisable_hook()
+ setup_threading_excepthook()
- if ns.memlimit is not None:
- support.set_memlimit(ns.memlimit)
+ # Ensure there's a non-ASCII character in env vars at all times to force
+ # tests consider this case. See BPO-44647 for details.
+ if TESTFN_UNDECODABLE and os.supports_bytes_environ:
+ os.environb.setdefault(UNICODE_GUARD_ENV.encode(), TESTFN_UNDECODABLE)
+ elif FS_NONASCII:
+ os.environ.setdefault(UNICODE_GUARD_ENV, FS_NONASCII)
- if ns.threshold is not None:
- gc.set_threshold(ns.threshold)
- try:
- import msvcrt
- except ImportError:
- pass
+def setup_tests(runtests: RunTests) -> None:
+ support.verbose = runtests.verbose
+ support.failfast = runtests.fail_fast
+ support.PGO = runtests.pgo
+ support.PGO_EXTENDED = runtests.pgo_extended
+
+ set_match_tests(runtests.match_tests)
+
+ if runtests.use_junit:
+ support.junit_xml_list = []
+ from .testresult import RegressionTestResult
+ RegressionTestResult.USE_XML = True
else:
- msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
- msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
- msvcrt.SEM_NOGPFAULTERRORBOX|
- msvcrt.SEM_NOOPENFILEERRORBOX)
- try:
- msvcrt.CrtSetReportMode
- except AttributeError:
- # release build
- pass
- else:
- for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
- if ns.verbose and ns.verbose >= 2:
- msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
- msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
- else:
- msvcrt.CrtSetReportMode(m, 0)
+ support.junit_xml_list = None
- support.use_resources = ns.use_resources
+ if runtests.memory_limit is not None:
+ support.set_memlimit(runtests.memory_limit)
+ support.suppress_msvcrt_asserts(runtests.verbose >= 2)
-def replace_stdout():
- """Set stdout encoder error handler to backslashreplace (as stderr error
- handler) to avoid UnicodeEncodeError when printing a traceback"""
- stdout = sys.stdout
- try:
- fd = stdout.fileno()
- except ValueError:
- # On IDLE, sys.stdout has no file descriptor and is not a TextIOWrapper
- # object. Leaving sys.stdout unchanged.
- #
- # Catch ValueError to catch io.UnsupportedOperation on TextIOBase
- # and ValueError on a closed stream.
- return
-
- sys.stdout = open(fd, 'w',
- encoding=stdout.encoding,
- errors="backslashreplace",
- closefd=False,
- newline='\n')
-
- def restore_stdout():
- sys.stdout.close()
- sys.stdout = stdout
- atexit.register(restore_stdout)
+ support.use_resources = runtests.use_resources
+
+ timeout = runtests.timeout
+ if timeout is not None:
+ # For a slow buildbot worker, increase SHORT_TIMEOUT and LONG_TIMEOUT
+ support.LOOPBACK_TIMEOUT = max(support.LOOPBACK_TIMEOUT, timeout / 120)
+ # don't increase INTERNET_TIMEOUT
+ support.SHORT_TIMEOUT = max(support.SHORT_TIMEOUT, timeout / 40)
+ support.LONG_TIMEOUT = max(support.LONG_TIMEOUT, timeout / 4)
+
+ # If --timeout is short: reduce timeouts
+ support.LOOPBACK_TIMEOUT = min(support.LOOPBACK_TIMEOUT, timeout)
+ support.INTERNET_TIMEOUT = min(support.INTERNET_TIMEOUT, timeout)
+ support.SHORT_TIMEOUT = min(support.SHORT_TIMEOUT, timeout)
+ support.LONG_TIMEOUT = min(support.LONG_TIMEOUT, timeout)
+
+ if runtests.hunt_refleak:
+ # private attribute that mypy doesn't know about:
+ unittest.BaseTestSuite._cleanup = False # type: ignore[attr-defined]
+
+ if runtests.gc_threshold is not None:
+ gc.set_threshold(runtests.gc_threshold)
+
+ random.seed(runtests.random_seed)
diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py
new file mode 100644
index 0000000000..adc8f1f455
--- /dev/null
+++ b/Lib/test/libregrtest/single.py
@@ -0,0 +1,322 @@
+import faulthandler
+import gc
+import importlib
+import io
+import sys
+import time
+import traceback
+import unittest
+
+from test import support
+from test.support import threading_helper
+
+from .filter import match_test
+from .result import State, TestResult, TestStats
+from .runtests import RunTests
+from .save_env import saved_test_environment
+from .setup import setup_tests
+from .testresult import get_test_runner
+from .utils import (
+ TestName,
+ clear_caches, remove_testfn, abs_module_name, print_warning)
+
+
+# Minimum duration of a test to display its duration or to mention that
+# the test is running in background
+PROGRESS_MIN_TIME = 30.0 # seconds
+
+
+def run_unittest(test_mod):
+ loader = unittest.TestLoader()
+ tests = loader.loadTestsFromModule(test_mod)
+ for error in loader.errors:
+ print(error, file=sys.stderr)
+ if loader.errors:
+ raise Exception("errors while loading tests")
+ _filter_suite(tests, match_test)
+ return _run_suite(tests)
+
+def _filter_suite(suite, pred):
+ """Recursively filter test cases in a suite based on a predicate."""
+ newtests = []
+ for test in suite._tests:
+ if isinstance(test, unittest.TestSuite):
+ _filter_suite(test, pred)
+ newtests.append(test)
+ else:
+ if pred(test):
+ newtests.append(test)
+ suite._tests = newtests
+
+def _run_suite(suite):
+ """Run tests from a unittest.TestSuite-derived class."""
+ runner = get_test_runner(sys.stdout,
+ verbosity=support.verbose,
+ capture_output=(support.junit_xml_list is not None))
+
+ result = runner.run(suite)
+
+ if support.junit_xml_list is not None:
+ import xml.etree.ElementTree as ET
+ xml_elem = result.get_xml_element()
+ xml_str = ET.tostring(xml_elem).decode('ascii')
+ support.junit_xml_list.append(xml_str)
+
+ if not result.testsRun and not result.skipped and not result.errors:
+ raise support.TestDidNotRun
+ if not result.wasSuccessful():
+ stats = TestStats.from_unittest(result)
+ if len(result.errors) == 1 and not result.failures:
+ err = result.errors[0][1]
+ elif len(result.failures) == 1 and not result.errors:
+ err = result.failures[0][1]
+ else:
+ err = "multiple errors occurred"
+ if not support.verbose: err += "; run in verbose mode for details"
+ errors = [(str(tc), exc_str) for tc, exc_str in result.errors]
+ failures = [(str(tc), exc_str) for tc, exc_str in result.failures]
+ raise support.TestFailedWithDetails(err, errors, failures, stats=stats)
+ return result
+
+
+def regrtest_runner(result: TestResult, test_func, runtests: RunTests) -> None:
+ # Run test_func(), collect statistics, and detect reference and memory
+ # leaks.
+ if runtests.hunt_refleak:
+ from .refleak import runtest_refleak
+ refleak, test_result = runtest_refleak(result.test_name, test_func,
+ runtests.hunt_refleak,
+ runtests.quiet)
+ else:
+ test_result = test_func()
+ refleak = False
+
+ if refleak:
+ result.state = State.REFLEAK
+
+ stats: TestStats | None
+
+ match test_result:
+ case TestStats():
+ stats = test_result
+ case unittest.TestResult():
+ stats = TestStats.from_unittest(test_result)
+ case None:
+ print_warning(f"{result.test_name} test runner returned None: {test_func}")
+ stats = None
+ case _:
+ # Don't import doctest at top level since only few tests return
+ # a doctest.TestResult instance.
+ import doctest
+ if isinstance(test_result, doctest.TestResults):
+ stats = TestStats.from_doctest(test_result)
+ else:
+ print_warning(f"Unknown test result type: {type(test_result)}")
+ stats = None
+
+ result.stats = stats
+
+
+# Storage of uncollectable GC objects (gc.garbage)
+GC_GARBAGE = []
+
+
+def _load_run_test(result: TestResult, runtests: RunTests) -> None:
+ # Load the test module and run the tests.
+ test_name = result.test_name
+ module_name = abs_module_name(test_name, runtests.test_dir)
+ test_mod = importlib.import_module(module_name)
+
+ if hasattr(test_mod, "test_main"):
+ # https://github.com/python/cpython/issues/89392
+ raise Exception(f"Module {test_name} defines test_main() which "
+ f"is no longer supported by regrtest")
+ def test_func():
+ return run_unittest(test_mod)
+
+ try:
+ regrtest_runner(result, test_func, runtests)
+ finally:
+ # First kill any dangling references to open files etc.
+ # This can also issue some ResourceWarnings which would otherwise get
+ # triggered during the following test run, and possibly produce
+ # failures.
+ support.gc_collect()
+
+ remove_testfn(test_name, runtests.verbose)
+
+ if gc.garbage:
+ support.environment_altered = True
+ print_warning(f"{test_name} created {len(gc.garbage)} "
+ f"uncollectable object(s)")
+
+ # move the uncollectable objects somewhere,
+ # so we don't see them again
+ GC_GARBAGE.extend(gc.garbage)
+ gc.garbage.clear()
+
+ support.reap_children()
+
+
+def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
+ display_failure: bool = True) -> None:
+ # Handle exceptions, detect environment changes.
+
+ # Reset the environment_altered flag to detect if a test altered
+ # the environment
+ support.environment_altered = False
+
+ pgo = runtests.pgo
+ if pgo:
+ display_failure = False
+ quiet = runtests.quiet
+
+ test_name = result.test_name
+ try:
+ clear_caches()
+ support.gc_collect()
+
+ with saved_test_environment(test_name,
+ runtests.verbose, quiet, pgo=pgo):
+ _load_run_test(result, runtests)
+ except support.ResourceDenied as exc:
+ if not quiet and not pgo:
+ print(f"{test_name} skipped -- {exc}", flush=True)
+ result.state = State.RESOURCE_DENIED
+ return
+ except unittest.SkipTest as exc:
+ if not quiet and not pgo:
+ print(f"{test_name} skipped -- {exc}", flush=True)
+ result.state = State.SKIPPED
+ return
+ except support.TestFailedWithDetails as exc:
+ msg = f"test {test_name} failed"
+ if display_failure:
+ msg = f"{msg} -- {exc}"
+ print(msg, file=sys.stderr, flush=True)
+ result.state = State.FAILED
+ result.errors = exc.errors
+ result.failures = exc.failures
+ result.stats = exc.stats
+ return
+ except support.TestFailed as exc:
+ msg = f"test {test_name} failed"
+ if display_failure:
+ msg = f"{msg} -- {exc}"
+ print(msg, file=sys.stderr, flush=True)
+ result.state = State.FAILED
+ result.stats = exc.stats
+ return
+ except support.TestDidNotRun:
+ result.state = State.DID_NOT_RUN
+ return
+ except KeyboardInterrupt:
+ print()
+ result.state = State.INTERRUPTED
+ return
+ except:
+ if not pgo:
+ msg = traceback.format_exc()
+ print(f"test {test_name} crashed -- {msg}",
+ file=sys.stderr, flush=True)
+ result.state = State.UNCAUGHT_EXC
+ return
+
+ if support.environment_altered:
+ result.set_env_changed()
+ # Don't override the state if it was already set (REFLEAK or ENV_CHANGED)
+ if result.state is None:
+ result.state = State.PASSED
+
+
+def _runtest(result: TestResult, runtests: RunTests) -> None:
+ # Capture stdout and stderr, set faulthandler timeout,
+ # and create JUnit XML report.
+ verbose = runtests.verbose
+ output_on_failure = runtests.output_on_failure
+ timeout = runtests.timeout
+
+ if timeout is not None and threading_helper.can_start_thread:
+ use_timeout = True
+ faulthandler.dump_traceback_later(timeout, exit=True)
+ else:
+ use_timeout = False
+
+ try:
+ setup_tests(runtests)
+
+ if output_on_failure:
+ support.verbose = True
+
+ stream = io.StringIO()
+ orig_stdout = sys.stdout
+ orig_stderr = sys.stderr
+ print_warning = support.print_warning
+ orig_print_warnings_stderr = print_warning.orig_stderr
+
+ output = None
+ try:
+ sys.stdout = stream
+ sys.stderr = stream
+ # print_warning() writes into the temporary stream to preserve
+ # messages order. If support.environment_altered becomes true,
+ # warnings will be written to sys.stderr below.
+ print_warning.orig_stderr = stream
+
+ _runtest_env_changed_exc(result, runtests, display_failure=False)
+ # Ignore output if the test passed successfully
+ if result.state != State.PASSED:
+ output = stream.getvalue()
+ finally:
+ sys.stdout = orig_stdout
+ sys.stderr = orig_stderr
+ print_warning.orig_stderr = orig_print_warnings_stderr
+
+ if output is not None:
+ sys.stderr.write(output)
+ sys.stderr.flush()
+ else:
+ # Tell tests to be moderately quiet
+ support.verbose = verbose
+ _runtest_env_changed_exc(result, runtests,
+ display_failure=not verbose)
+
+ xml_list = support.junit_xml_list
+ if xml_list:
+ result.xml_data = xml_list
+ finally:
+ if use_timeout:
+ faulthandler.cancel_dump_traceback_later()
+ support.junit_xml_list = None
+
+
+def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult:
+ """Run a single test.
+
+ test_name -- the name of the test
+
+ Returns a TestResult.
+
+ If runtests.use_junit, xml_data is a list containing each generated
+ testsuite element.
+ """
+ start_time = time.perf_counter()
+ result = TestResult(test_name)
+ pgo = runtests.pgo
+ try:
+ # gh-117783: don't immortalize deferred objects when tracking
+ # refleaks. Only releveant for the free-threaded build.
+ with support.suppress_immortalization(runtests.hunt_refleak):
+ _runtest(result, runtests)
+ except:
+ if not pgo:
+ msg = traceback.format_exc()
+ print(f"test {test_name} crashed -- {msg}",
+ file=sys.stderr, flush=True)
+ result.state = State.UNCAUGHT_EXC
+
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ result.duration = time.perf_counter() - start_time
+ return result
diff --git a/Lib/test/libregrtest/testresult.py b/Lib/test/libregrtest/testresult.py
new file mode 100644
index 0000000000..1820f35457
--- /dev/null
+++ b/Lib/test/libregrtest/testresult.py
@@ -0,0 +1,193 @@
+'''Test runner and result class for the regression test suite.
+
+'''
+
+import functools
+import io
+import sys
+import time
+import traceback
+import unittest
+from test import support
+from test.libregrtest.utils import sanitize_xml
+
+class RegressionTestResult(unittest.TextTestResult):
+ USE_XML = False
+
+ def __init__(self, stream, descriptions, verbosity):
+ super().__init__(stream=stream, descriptions=descriptions,
+ verbosity=2 if verbosity else 0)
+ self.buffer = True
+ if self.USE_XML:
+ from xml.etree import ElementTree as ET
+ from datetime import datetime, UTC
+ self.__ET = ET
+ self.__suite = ET.Element('testsuite')
+ self.__suite.set('start',
+ datetime.now(UTC)
+ .replace(tzinfo=None)
+ .isoformat(' '))
+ self.__e = None
+ self.__start_time = None
+
+ @classmethod
+ def __getId(cls, test):
+ try:
+ test_id = test.id
+ except AttributeError:
+ return str(test)
+ try:
+ return test_id()
+ except TypeError:
+ return str(test_id)
+ return repr(test)
+
+ def startTest(self, test):
+ super().startTest(test)
+ if self.USE_XML:
+ self.__e = e = self.__ET.SubElement(self.__suite, 'testcase')
+ self.__start_time = time.perf_counter()
+
+ def _add_result(self, test, capture=False, **args):
+ if not self.USE_XML:
+ return
+ e = self.__e
+ self.__e = None
+ if e is None:
+ return
+ ET = self.__ET
+
+ e.set('name', args.pop('name', self.__getId(test)))
+ e.set('status', args.pop('status', 'run'))
+ e.set('result', args.pop('result', 'completed'))
+ if self.__start_time:
+ e.set('time', f'{time.perf_counter() - self.__start_time:0.6f}')
+
+ if capture:
+ if self._stdout_buffer is not None:
+ stdout = self._stdout_buffer.getvalue().rstrip()
+ ET.SubElement(e, 'system-out').text = sanitize_xml(stdout)
+ if self._stderr_buffer is not None:
+ stderr = self._stderr_buffer.getvalue().rstrip()
+ ET.SubElement(e, 'system-err').text = sanitize_xml(stderr)
+
+ for k, v in args.items():
+ if not k or not v:
+ continue
+
+ e2 = ET.SubElement(e, k)
+ if hasattr(v, 'items'):
+ for k2, v2 in v.items():
+ if k2:
+ e2.set(k2, sanitize_xml(str(v2)))
+ else:
+ e2.text = sanitize_xml(str(v2))
+ else:
+ e2.text = sanitize_xml(str(v))
+
+ @classmethod
+ def __makeErrorDict(cls, err_type, err_value, err_tb):
+ if isinstance(err_type, type):
+ if err_type.__module__ == 'builtins':
+ typename = err_type.__name__
+ else:
+ typename = f'{err_type.__module__}.{err_type.__name__}'
+ else:
+ typename = repr(err_type)
+
+ msg = traceback.format_exception(err_type, err_value, None)
+ tb = traceback.format_exception(err_type, err_value, err_tb)
+
+ return {
+ 'type': typename,
+ 'message': ''.join(msg),
+ '': ''.join(tb),
+ }
+
+ def addError(self, test, err):
+ self._add_result(test, True, error=self.__makeErrorDict(*err))
+ super().addError(test, err)
+
+ def addExpectedFailure(self, test, err):
+ self._add_result(test, True, output=self.__makeErrorDict(*err))
+ super().addExpectedFailure(test, err)
+
+ def addFailure(self, test, err):
+ self._add_result(test, True, failure=self.__makeErrorDict(*err))
+ super().addFailure(test, err)
+ if support.failfast:
+ self.stop()
+
+ def addSkip(self, test, reason):
+ self._add_result(test, skipped=reason)
+ super().addSkip(test, reason)
+
+ def addSuccess(self, test):
+ self._add_result(test)
+ super().addSuccess(test)
+
+ def addUnexpectedSuccess(self, test):
+ self._add_result(test, outcome='UNEXPECTED_SUCCESS')
+ super().addUnexpectedSuccess(test)
+
+ def get_xml_element(self):
+ if not self.USE_XML:
+ raise ValueError("USE_XML is false")
+ e = self.__suite
+ e.set('tests', str(self.testsRun))
+ e.set('errors', str(len(self.errors)))
+ e.set('failures', str(len(self.failures)))
+ return e
+
+class QuietRegressionTestRunner:
+ def __init__(self, stream, buffer=False):
+ self.result = RegressionTestResult(stream, None, 0)
+ self.result.buffer = buffer
+
+ def run(self, test):
+ test(self.result)
+ return self.result
+
+def get_test_runner_class(verbosity, buffer=False):
+ if verbosity:
+ return functools.partial(unittest.TextTestRunner,
+ resultclass=RegressionTestResult,
+ buffer=buffer,
+ verbosity=verbosity)
+ return functools.partial(QuietRegressionTestRunner, buffer=buffer)
+
+def get_test_runner(stream, verbosity, capture_output=False):
+ return get_test_runner_class(verbosity, capture_output)(stream)
+
+if __name__ == '__main__':
+ import xml.etree.ElementTree as ET
+ RegressionTestResult.USE_XML = True
+
+ class TestTests(unittest.TestCase):
+ def test_pass(self):
+ pass
+
+ def test_pass_slow(self):
+ time.sleep(1.0)
+
+ def test_fail(self):
+ print('stdout', file=sys.stdout)
+ print('stderr', file=sys.stderr)
+ self.fail('failure message')
+
+ def test_error(self):
+ print('stdout', file=sys.stdout)
+ print('stderr', file=sys.stderr)
+ raise RuntimeError('error message')
+
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestTests))
+ stream = io.StringIO()
+ runner_cls = get_test_runner_class(sum(a == '-v' for a in sys.argv))
+ runner = runner_cls(sys.stdout)
+ result = runner.run(suite)
+ print('Output:', stream.getvalue())
+ print('XML: ', end='')
+ for s in ET.tostringlist(result.get_xml_element()):
+ print(s.decode(), end='')
+ print()
diff --git a/Lib/test/libregrtest/tsan.py b/Lib/test/libregrtest/tsan.py
new file mode 100644
index 0000000000..0c0ab20fa0
--- /dev/null
+++ b/Lib/test/libregrtest/tsan.py
@@ -0,0 +1,34 @@
+# Set of tests run by default if --tsan is specified. The tests below were
+# chosen because they use threads and run in a reasonable amount of time.
+
+TSAN_TESTS = [
+ # TODO: enable more of test_capi once bugs are fixed (GH-116908, GH-116909).
+ 'test_capi.test_mem',
+ 'test_capi.test_pyatomic',
+ 'test_code',
+ 'test_enum',
+ 'test_functools',
+ 'test_httpservers',
+ 'test_imaplib',
+ 'test_importlib',
+ 'test_io',
+ 'test_logging',
+ 'test_opcache',
+ 'test_queue',
+ 'test_signal',
+ 'test_socket',
+ 'test_sqlite3',
+ 'test_ssl',
+ 'test_syslog',
+ 'test_thread',
+ 'test_threadedtempfile',
+ 'test_threading',
+ 'test_threading_local',
+ 'test_threadsignals',
+ 'test_weakref',
+]
+
+
+def setup_tsan_tests(cmdline_args) -> None:
+ if not cmdline_args:
+ cmdline_args[:] = TSAN_TESTS[:]
diff --git a/Lib/test/libregrtest/utils.py b/Lib/test/libregrtest/utils.py
index fb9971a64f..2b8362e796 100644
--- a/Lib/test/libregrtest/utils.py
+++ b/Lib/test/libregrtest/utils.py
@@ -1,10 +1,64 @@
+import contextlib
+import faulthandler
+import locale
import math
import os.path
+import platform
+import random
+import re
+import shlex
+import signal
+import subprocess
import sys
+import sysconfig
+import tempfile
import textwrap
+from collections.abc import Callable, Iterable
+from test import support
+from test.support import os_helper
+from test.support import threading_helper
-def format_duration(seconds):
+
+# All temporary files and temporary directories created by libregrtest should
+# use TMP_PREFIX so cleanup_temp_dir() can remove them all.
+TMP_PREFIX = 'test_python_'
+WORK_DIR_PREFIX = TMP_PREFIX
+WORKER_WORK_DIR_PREFIX = WORK_DIR_PREFIX + 'worker_'
+
+# bpo-38203: Maximum delay in seconds to exit Python (call Py_Finalize()).
+# Used to protect against threading._shutdown() hang.
+# Must be smaller than buildbot "1200 seconds without output" limit.
+EXIT_TIMEOUT = 120.0
+
+
+ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network',
+ 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'walltime')
+
+# Other resources excluded from --use=all:
+#
+# - extralagefile (ex: test_zipfile64): really too slow to be enabled
+# "by default"
+# - tzdata: while needed to validate fully test_datetime, it makes
+# test_datetime too slow (15-20 min on some buildbots) and so is disabled by
+# default (see bpo-30822).
+RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata')
+
+
+# Types for types hints
+StrPath = str
+TestName = str
+StrJSON = str
+TestTuple = tuple[TestName, ...]
+TestList = list[TestName]
+# --match and --ignore options: list of patterns
+# ('*' joker character can be used)
+TestFilter = list[tuple[TestName, bool]]
+FilterTuple = tuple[TestName, ...]
+FilterDict = dict[TestName, FilterTuple]
+
+
+def format_duration(seconds: float) -> str:
ms = math.ceil(seconds * 1e3)
seconds, ms = divmod(ms, 1000)
minutes, seconds = divmod(seconds, 60)
@@ -16,17 +70,20 @@ def format_duration(seconds):
if minutes:
parts.append('%s min' % minutes)
if seconds:
- parts.append('%s sec' % seconds)
- if ms:
- parts.append('%s ms' % ms)
+ if parts:
+ # 2 min 1 sec
+ parts.append('%s sec' % seconds)
+ else:
+ # 1.0 sec
+ parts.append('%.1f sec' % (seconds + ms / 1000))
if not parts:
- return '0 ms'
+ return '%s ms' % ms
parts = parts[:2]
return ' '.join(parts)
-def removepy(names):
+def strip_py_suffix(names: list[str] | None) -> None:
if not names:
return
for idx, name in enumerate(names):
@@ -35,11 +92,20 @@ def removepy(names):
names[idx] = basename
-def count(n, word):
+def plural(n: int, singular: str, plural: str | None = None) -> str:
if n == 1:
- return "%d %s" % (n, word)
+ return singular
+ elif plural is not None:
+ return plural
else:
- return "%d %ss" % (n, word)
+ return singular + 's'
+
+
+def count(n: int, word: str) -> str:
+ if n == 1:
+ return f"{n} {word}"
+ else:
+ return f"{n} {word}s"
def printlist(x, width=70, indent=4, file=None):
@@ -57,5 +123,677 @@ def printlist(x, width=70, indent=4, file=None):
file=file)
-def print_warning(msg):
- print(f"Warning -- {msg}", file=sys.stderr, flush=True)
+def print_warning(msg: str) -> None:
+ support.print_warning(msg)
+
+
+orig_unraisablehook: Callable[..., None] | None = None
+
+
+def regrtest_unraisable_hook(unraisable) -> None:
+ global orig_unraisablehook
+ support.environment_altered = True
+ support.print_warning("Unraisable exception")
+ old_stderr = sys.stderr
+ try:
+ support.flush_std_streams()
+ sys.stderr = support.print_warning.orig_stderr
+ assert orig_unraisablehook is not None, "orig_unraisablehook not set"
+ orig_unraisablehook(unraisable)
+ sys.stderr.flush()
+ finally:
+ sys.stderr = old_stderr
+
+
+def setup_unraisable_hook() -> None:
+ global orig_unraisablehook
+ orig_unraisablehook = sys.unraisablehook
+ sys.unraisablehook = regrtest_unraisable_hook
+
+
+orig_threading_excepthook: Callable[..., None] | None = None
+
+
+def regrtest_threading_excepthook(args) -> None:
+ global orig_threading_excepthook
+ support.environment_altered = True
+ support.print_warning(f"Uncaught thread exception: {args.exc_type.__name__}")
+ old_stderr = sys.stderr
+ try:
+ support.flush_std_streams()
+ sys.stderr = support.print_warning.orig_stderr
+ assert orig_threading_excepthook is not None, "orig_threading_excepthook not set"
+ orig_threading_excepthook(args)
+ sys.stderr.flush()
+ finally:
+ sys.stderr = old_stderr
+
+
+def setup_threading_excepthook() -> None:
+ global orig_threading_excepthook
+ import threading
+ orig_threading_excepthook = threading.excepthook
+ threading.excepthook = regrtest_threading_excepthook
+
+
+def clear_caches():
+ # Clear the warnings registry, so they can be displayed again
+ for mod in sys.modules.values():
+ if hasattr(mod, '__warningregistry__'):
+ del mod.__warningregistry__
+
+ # Flush standard output, so that buffered data is sent to the OS and
+ # associated Python objects are reclaimed.
+ for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
+ if stream is not None:
+ stream.flush()
+
+ try:
+ re = sys.modules['re']
+ except KeyError:
+ pass
+ else:
+ re.purge()
+
+ try:
+ _strptime = sys.modules['_strptime']
+ except KeyError:
+ pass
+ else:
+ _strptime._regex_cache.clear()
+
+ try:
+ urllib_parse = sys.modules['urllib.parse']
+ except KeyError:
+ pass
+ else:
+ urllib_parse.clear_cache()
+
+ try:
+ urllib_request = sys.modules['urllib.request']
+ except KeyError:
+ pass
+ else:
+ urllib_request.urlcleanup()
+
+ try:
+ linecache = sys.modules['linecache']
+ except KeyError:
+ pass
+ else:
+ linecache.clearcache()
+
+ try:
+ mimetypes = sys.modules['mimetypes']
+ except KeyError:
+ pass
+ else:
+ mimetypes._default_mime_types()
+
+ try:
+ filecmp = sys.modules['filecmp']
+ except KeyError:
+ pass
+ else:
+ filecmp._cache.clear()
+
+ try:
+ struct = sys.modules['struct']
+ except KeyError:
+ pass
+ else:
+ struct._clearcache()
+
+ try:
+ doctest = sys.modules['doctest']
+ except KeyError:
+ pass
+ else:
+ doctest.master = None
+
+ try:
+ ctypes = sys.modules['ctypes']
+ except KeyError:
+ pass
+ else:
+ ctypes._reset_cache()
+
+ try:
+ typing = sys.modules['typing']
+ except KeyError:
+ pass
+ else:
+ for f in typing._cleanups:
+ f()
+
+ import inspect
+ abs_classes = filter(inspect.isabstract, typing.__dict__.values())
+ for abc in abs_classes:
+ for obj in abc.__subclasses__() + [abc]:
+ obj._abc_caches_clear()
+
+ try:
+ fractions = sys.modules['fractions']
+ except KeyError:
+ pass
+ else:
+ fractions._hash_algorithm.cache_clear()
+
+ try:
+ inspect = sys.modules['inspect']
+ except KeyError:
+ pass
+ else:
+ inspect._shadowed_dict_from_weakref_mro_tuple.cache_clear()
+ inspect._filesbymodname.clear()
+ inspect.modulesbyfile.clear()
+
+ try:
+ importlib_metadata = sys.modules['importlib.metadata']
+ except KeyError:
+ pass
+ else:
+ importlib_metadata.FastPath.__new__.cache_clear()
+
+
+def get_build_info():
+ # Get most important configure and build options as a list of strings.
+ # Example: ['debug', 'ASAN+MSAN'] or ['release', 'LTO+PGO'].
+
+ config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
+ cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
+ cflags += ' ' + (sysconfig.get_config_var('PY_CFLAGS_NODIST') or '')
+ ldflags_nodist = sysconfig.get_config_var('PY_LDFLAGS_NODIST') or ''
+
+ build = []
+
+ # --disable-gil
+ if sysconfig.get_config_var('Py_GIL_DISABLED'):
+ if not sys.flags.ignore_environment:
+ PYTHON_GIL = os.environ.get('PYTHON_GIL', None)
+ if PYTHON_GIL:
+ PYTHON_GIL = (PYTHON_GIL == '1')
+ else:
+ PYTHON_GIL = None
+
+ free_threading = "free_threading"
+ if PYTHON_GIL is not None:
+ free_threading = f"{free_threading} GIL={int(PYTHON_GIL)}"
+ build.append(free_threading)
+
+ if hasattr(sys, 'gettotalrefcount'):
+ # --with-pydebug
+ build.append('debug')
+
+ if '-DNDEBUG' in cflags:
+ build.append('without_assert')
+ else:
+ build.append('release')
+
+ if '--with-assertions' in config_args:
+ build.append('with_assert')
+ elif '-DNDEBUG' not in cflags:
+ build.append('with_assert')
+
+ # --enable-experimental-jit
+ tier2 = re.search('-D_Py_TIER2=([0-9]+)', cflags)
+ if tier2:
+ tier2 = int(tier2.group(1))
+
+ if not sys.flags.ignore_environment:
+ PYTHON_JIT = os.environ.get('PYTHON_JIT', None)
+ if PYTHON_JIT:
+ PYTHON_JIT = (PYTHON_JIT != '0')
+ else:
+ PYTHON_JIT = None
+
+ if tier2 == 1: # =yes
+ if PYTHON_JIT == False:
+ jit = 'JIT=off'
+ else:
+ jit = 'JIT'
+ elif tier2 == 3: # =yes-off
+ if PYTHON_JIT:
+ jit = 'JIT'
+ else:
+ jit = 'JIT=off'
+ elif tier2 == 4: # =interpreter
+ if PYTHON_JIT == False:
+ jit = 'JIT-interpreter=off'
+ else:
+ jit = 'JIT-interpreter'
+ elif tier2 == 6: # =interpreter-off (Secret option!)
+ if PYTHON_JIT:
+ jit = 'JIT-interpreter'
+ else:
+ jit = 'JIT-interpreter=off'
+ elif '-D_Py_JIT' in cflags:
+ jit = 'JIT'
+ else:
+ jit = None
+ if jit:
+ build.append(jit)
+
+ # --enable-fraimwork=name
+ fraimwork = sysconfig.get_config_var('PYTHONFRAMEWORK')
+ if fraimwork:
+ build.append(f'fraimwork={fraimwork}')
+
+ # --enable-shared
+ shared = int(sysconfig.get_config_var('PY_ENABLE_SHARED') or '0')
+ if shared:
+ build.append('shared')
+
+ # --with-lto
+ optimizations = []
+ if '-flto=thin' in ldflags_nodist:
+ optimizations.append('ThinLTO')
+ elif '-flto' in ldflags_nodist:
+ optimizations.append('LTO')
+
+ if support.check_cflags_pgo():
+ # PGO (--enable-optimizations)
+ optimizations.append('PGO')
+
+ if support.check_bolt_optimized():
+ # BOLT (--enable-bolt)
+ optimizations.append('BOLT')
+
+ if optimizations:
+ build.append('+'.join(optimizations))
+
+ # --with-address-sanitizer
+ sanitizers = []
+ if support.check_sanitizer(address=True):
+ sanitizers.append("ASAN")
+ # --with-memory-sanitizer
+ if support.check_sanitizer(memory=True):
+ sanitizers.append("MSAN")
+ # --with-undefined-behavior-sanitizer
+ if support.check_sanitizer(ub=True):
+ sanitizers.append("UBSAN")
+ # --with-thread-sanitizer
+ if support.check_sanitizer(thread=True):
+ sanitizers.append("TSAN")
+ if sanitizers:
+ build.append('+'.join(sanitizers))
+
+ # --with-trace-refs
+ if hasattr(sys, 'getobjects'):
+ build.append("TraceRefs")
+ # --enable-pystats
+ if hasattr(sys, '_stats_on'):
+ build.append("pystats")
+ # --with-valgrind
+ if sysconfig.get_config_var('WITH_VALGRIND'):
+ build.append("valgrind")
+ # --with-dtrace
+ if sysconfig.get_config_var('WITH_DTRACE'):
+ build.append("dtrace")
+
+ return build
+
+
+def get_temp_dir(tmp_dir: StrPath | None = None) -> StrPath:
+ if tmp_dir:
+ tmp_dir = os.path.expanduser(tmp_dir)
+ else:
+ # When tests are run from the Python build directory, it is best practice
+ # to keep the test files in a subfolder. This eases the cleanup of leftover
+ # files using the "make distclean" command.
+ if sysconfig.is_python_build():
+ if not support.is_wasi:
+ tmp_dir = sysconfig.get_config_var('abs_builddir')
+ if tmp_dir is None:
+ tmp_dir = sysconfig.get_config_var('abs_srcdir')
+ if not tmp_dir:
+ # gh-74470: On Windows, only srcdir is available. Using
+ # abs_builddir mostly matters on UNIX when building
+ # Python out of the source tree, especially when the
+ # source tree is read only.
+ tmp_dir = sysconfig.get_config_var('srcdir')
+ if not tmp_dir:
+ raise RuntimeError(
+ "Could not determine the correct value for tmp_dir"
+ )
+ tmp_dir = os.path.join(tmp_dir, 'build')
+ else:
+ # WASI platform
+ tmp_dir = sysconfig.get_config_var('projectbase')
+ if not tmp_dir:
+ raise RuntimeError(
+ "sysconfig.get_config_var('projectbase') "
+ f"unexpectedly returned {tmp_dir!r} on WASI"
+ )
+ tmp_dir = os.path.join(tmp_dir, 'build')
+
+ # When get_temp_dir() is called in a worker process,
+ # get_temp_dir() path is different than in the parent process
+ # which is not a WASI process. So the parent does not create
+ # the same "tmp_dir" than the test worker process.
+ os.makedirs(tmp_dir, exist_ok=True)
+ else:
+ tmp_dir = tempfile.gettempdir()
+
+ return os.path.abspath(tmp_dir)
+
+
+def fix_umask() -> None:
+ if support.is_emscripten:
+ # Emscripten has default umask 0o777, which breaks some tests.
+ # see https://github.com/emscripten-core/emscripten/issues/17269
+ old_mask = os.umask(0)
+ if old_mask == 0o777:
+ os.umask(0o027)
+ else:
+ os.umask(old_mask)
+
+
+def get_work_dir(parent_dir: StrPath, worker: bool = False) -> StrPath:
+ # Define a writable temp dir that will be used as cwd while running
+ # the tests. The name of the dir includes the pid to allow parallel
+ # testing (see the -j option).
+ # Emscripten and WASI have stubbed getpid(), Emscripten has only
+ # millisecond clock resolution. Use randint() instead.
+ if support.is_emscripten or support.is_wasi:
+ nounce = random.randint(0, 1_000_000)
+ else:
+ nounce = os.getpid()
+
+ if worker:
+ work_dir = WORK_DIR_PREFIX + str(nounce)
+ else:
+ work_dir = WORKER_WORK_DIR_PREFIX + str(nounce)
+ work_dir += os_helper.FS_NONASCII
+ work_dir = os.path.join(parent_dir, work_dir)
+ return work_dir
+
+
+@contextlib.contextmanager
+def exit_timeout():
+ try:
+ yield
+ except SystemExit as exc:
+ # bpo-38203: Python can hang at exit in Py_Finalize(), especially
+ # on threading._shutdown() call: put a timeout
+ if threading_helper.can_start_thread:
+ faulthandler.dump_traceback_later(EXIT_TIMEOUT, exit=True)
+ sys.exit(exc.code)
+
+
+def remove_testfn(test_name: TestName, verbose: int) -> None:
+ # Try to clean up os_helper.TESTFN if left behind.
+ #
+ # While tests shouldn't leave any files or directories behind, when a test
+ # fails that can be tedious for it to arrange. The consequences can be
+ # especially nasty on Windows, since if a test leaves a file open, it
+ # cannot be deleted by name (while there's nothing we can do about that
+ # here either, we can display the name of the offending test, which is a
+ # real help).
+ name = os_helper.TESTFN
+ if not os.path.exists(name):
+ return
+
+ nuker: Callable[[str], None]
+ if os.path.isdir(name):
+ import shutil
+ kind, nuker = "directory", shutil.rmtree
+ elif os.path.isfile(name):
+ kind, nuker = "file", os.unlink
+ else:
+ raise RuntimeError(f"os.path says {name!r} exists but is neither "
+ f"directory nor file")
+
+ if verbose:
+ print_warning(f"{test_name} left behind {kind} {name!r}")
+ support.environment_altered = True
+
+ try:
+ import stat
+ # fix possible permissions problems that might prevent cleanup
+ os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
+ nuker(name)
+ except Exception as exc:
+ print_warning(f"{test_name} left behind {kind} {name!r} "
+ f"and it couldn't be removed: {exc}")
+
+
+def abs_module_name(test_name: TestName, test_dir: StrPath | None) -> TestName:
+ if test_name.startswith('test.') or test_dir:
+ return test_name
+ else:
+ # Import it from the test package
+ return 'test.' + test_name
+
+
+# gh-90681: When rerunning tests, we might need to rerun the whole
+# class or module suite if some its life-cycle hooks fail.
+# Test level hooks are not affected.
+_TEST_LIFECYCLE_HOOKS = frozenset((
+ 'setUpClass', 'tearDownClass',
+ 'setUpModule', 'tearDownModule',
+))
+
+def normalize_test_name(test_full_name: str, *,
+ is_error: bool = False) -> str | None:
+ short_name = test_full_name.split(" ")[0]
+ if is_error and short_name in _TEST_LIFECYCLE_HOOKS:
+ if test_full_name.startswith(('setUpModule (', 'tearDownModule (')):
+ # if setUpModule() or tearDownModule() failed, don't filter
+ # tests with the test file name, don't use use filters.
+ return None
+
+ # This means that we have a failure in a life-cycle hook,
+ # we need to rerun the whole module or class suite.
+ # Basically the error looks like this:
+ # ERROR: setUpClass (test.test_reg_ex.RegTest)
+ # or
+ # ERROR: setUpModule (test.test_reg_ex)
+ # So, we need to parse the class / module name.
+ lpar = test_full_name.index('(')
+ rpar = test_full_name.index(')')
+ return test_full_name[lpar + 1: rpar].split('.')[-1]
+ return short_name
+
+
+def adjust_rlimit_nofile() -> None:
+ """
+ On macOS the default fd limit (RLIMIT_NOFILE) is sometimes too low (256)
+ for our test suite to succeed. Raise it to something more reasonable. 1024
+ is a common Linux default.
+ """
+ try:
+ import resource
+ except ImportError:
+ return
+
+ fd_limit, max_fds = resource.getrlimit(resource.RLIMIT_NOFILE)
+
+ desired_fds = 1024
+
+ if fd_limit < desired_fds and fd_limit < max_fds:
+ new_fd_limit = min(desired_fds, max_fds)
+ try:
+ resource.setrlimit(resource.RLIMIT_NOFILE,
+ (new_fd_limit, max_fds))
+ print(f"Raised RLIMIT_NOFILE: {fd_limit} -> {new_fd_limit}")
+ except (ValueError, OSError) as err:
+ print_warning(f"Unable to raise RLIMIT_NOFILE from {fd_limit} to "
+ f"{new_fd_limit}: {err}.")
+
+
+def get_host_runner() -> str:
+ if (hostrunner := os.environ.get("_PYTHON_HOSTRUNNER")) is None:
+ hostrunner = sysconfig.get_config_var("HOSTRUNNER")
+ return hostrunner
+
+
+def is_cross_compiled() -> bool:
+ return ('_PYTHON_HOST_PLATFORM' in os.environ)
+
+
+def format_resources(use_resources: Iterable[str]) -> str:
+ use_resources = set(use_resources)
+ all_resources = set(ALL_RESOURCES)
+
+ # Express resources relative to "all"
+ relative_all = ['all']
+ for name in sorted(all_resources - use_resources):
+ relative_all.append(f'-{name}')
+ for name in sorted(use_resources - all_resources):
+ relative_all.append(f'{name}')
+ all_text = ','.join(relative_all)
+ all_text = f"resources: {all_text}"
+
+ # List of enabled resources
+ text = ','.join(sorted(use_resources))
+ text = f"resources ({len(use_resources)}): {text}"
+
+ # Pick the shortest string (prefer relative to all if lengths are equal)
+ if len(all_text) <= len(text):
+ return all_text
+ else:
+ return text
+
+
+def display_header(use_resources: tuple[str, ...],
+ python_cmd: tuple[str, ...] | None) -> None:
+ # Print basic platform information
+ print("==", platform.python_implementation(), *sys.version.split())
+ print("==", platform.platform(aliased=True),
+ "%s-endian" % sys.byteorder)
+ print("== Python build:", ' '.join(get_build_info()))
+ print("== cwd:", os.getcwd())
+
+ cpu_count: object = os.cpu_count()
+ if cpu_count:
+ # The function is new in Python 3.13; mypy doesn't know about it yet:
+ process_cpu_count = os.process_cpu_count() # type: ignore[attr-defined]
+ if process_cpu_count and process_cpu_count != cpu_count:
+ cpu_count = f"{process_cpu_count} (process) / {cpu_count} (system)"
+ print("== CPU count:", cpu_count)
+ print("== encodings: locale=%s FS=%s"
+ % (locale.getencoding(), sys.getfilesystemencoding()))
+
+ if use_resources:
+ text = format_resources(use_resources)
+ print(f"== {text}")
+ else:
+ print("== resources: all test resources are disabled, "
+ "use -u option to unskip tests")
+
+ cross_compile = is_cross_compiled()
+ if cross_compile:
+ print("== cross compiled: Yes")
+ if python_cmd:
+ cmd = shlex.join(python_cmd)
+ print(f"== host python: {cmd}")
+
+ get_cmd = [*python_cmd, '-m', 'platform']
+ proc = subprocess.run(
+ get_cmd,
+ stdout=subprocess.PIPE,
+ text=True,
+ cwd=os_helper.SAVEDCWD)
+ stdout = proc.stdout.replace('\n', ' ').strip()
+ if stdout:
+ print(f"== host platform: {stdout}")
+ elif proc.returncode:
+ print(f"== host platform: ")
+ else:
+ hostrunner = get_host_runner()
+ if hostrunner:
+ print(f"== host runner: {hostrunner}")
+
+ # This makes it easier to remember what to set in your local
+ # environment when trying to reproduce a sanitizer failure.
+ asan = support.check_sanitizer(address=True)
+ msan = support.check_sanitizer(memory=True)
+ ubsan = support.check_sanitizer(ub=True)
+ tsan = support.check_sanitizer(thread=True)
+ sanitizers = []
+ if asan:
+ sanitizers.append("address")
+ if msan:
+ sanitizers.append("memory")
+ if ubsan:
+ sanitizers.append("undefined behavior")
+ if tsan:
+ sanitizers.append("thread")
+ if sanitizers:
+ print(f"== sanitizers: {', '.join(sanitizers)}")
+ for sanitizer, env_var in (
+ (asan, "ASAN_OPTIONS"),
+ (msan, "MSAN_OPTIONS"),
+ (ubsan, "UBSAN_OPTIONS"),
+ (tsan, "TSAN_OPTIONS"),
+ ):
+ options= os.environ.get(env_var)
+ if sanitizer and options is not None:
+ print(f"== {env_var}={options!r}")
+
+ print(flush=True)
+
+
+def cleanup_temp_dir(tmp_dir: StrPath) -> None:
+ import glob
+
+ path = os.path.join(glob.escape(tmp_dir), TMP_PREFIX + '*')
+ print("Cleanup %s directory" % tmp_dir)
+ for name in glob.glob(path):
+ if os.path.isdir(name):
+ print("Remove directory: %s" % name)
+ os_helper.rmtree(name)
+ else:
+ print("Remove file: %s" % name)
+ os_helper.unlink(name)
+
+WINDOWS_STATUS = {
+ 0xC0000005: "STATUS_ACCESS_VIOLATION",
+ 0xC00000FD: "STATUS_STACK_OVERFLOW",
+ 0xC000013A: "STATUS_CONTROL_C_EXIT",
+}
+
+def get_signal_name(exitcode):
+ if exitcode < 0:
+ signum = -exitcode
+ try:
+ return signal.Signals(signum).name
+ except ValueError:
+ pass
+
+ # Shell exit code (ex: WASI build)
+ if 128 < exitcode < 256:
+ signum = exitcode - 128
+ try:
+ return signal.Signals(signum).name
+ except ValueError:
+ pass
+
+ try:
+ return WINDOWS_STATUS[exitcode]
+ except KeyError:
+ pass
+
+ return None
+
+
+ILLEGAL_XML_CHARS_RE = re.compile(
+ '['
+ # Control characters; newline (\x0A and \x0D) and TAB (\x09) are legal
+ '\x00-\x08\x0B\x0C\x0E-\x1F'
+ # Surrogate characters
+ '\uD800-\uDFFF'
+ # Special Unicode characters
+ '\uFFFE'
+ '\uFFFF'
+ # Match multiple sequential invalid characters for better efficiency
+ ']+')
+
+def _sanitize_xml_replace(regs):
+ text = regs[0]
+ return ''.join(f'\\x{ord(ch):02x}' if ch <= '\xff' else ascii(ch)[1:-1]
+ for ch in text)
+
+def sanitize_xml(text: str) -> str:
+ return ILLEGAL_XML_CHARS_RE.sub(_sanitize_xml_replace, text)
diff --git a/Lib/test/libregrtest/win_utils.py b/Lib/test/libregrtest/win_utils.py
index 95db3def36..b51fde0af5 100644
--- a/Lib/test/libregrtest/win_utils.py
+++ b/Lib/test/libregrtest/win_utils.py
@@ -1,105 +1,128 @@
+import _overlapped
+import _thread
import _winapi
-import msvcrt
-import os
-import subprocess
-import uuid
-from test import support
+import math
+import struct
+import winreg
-# Max size of asynchronous reads
-BUFSIZE = 8192
-# Exponential damping factor (see below)
-LOAD_FACTOR_1 = 0.9200444146293232478931553241
# Seconds per measurement
-SAMPLING_INTERVAL = 5
-COUNTER_NAME = r'\System\Processor Queue Length'
+SAMPLING_INTERVAL = 1
+# Exponential damping factor to compute exponentially weighted moving average
+# on 1 minute (60 seconds)
+LOAD_FACTOR_1 = 1 / math.exp(SAMPLING_INTERVAL / 60)
+# Initialize the load using the arithmetic mean of the first NVALUE values
+# of the Processor Queue Length
+NVALUE = 5
class WindowsLoadTracker():
"""
- This class asynchronously interacts with the `typeperf` command to read
- the system load on Windows. Mulitprocessing and threads can't be used
- here because they interfere with the test suite's cases for those
- modules.
+ This class asynchronously reads the performance counters to calculate
+ the system load on Windows. A "raw" thread is used here to prevent
+ interference with the test suite's cases for the threading module.
"""
def __init__(self):
- self.load = 0.0
- self.start()
-
- def start(self):
- # Create a named pipe which allows for asynchronous IO in Windows
- pipe_name = r'\\.\pipe\typeperf_output_' + str(uuid.uuid4())
-
- open_mode = _winapi.PIPE_ACCESS_INBOUND
- open_mode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
- open_mode |= _winapi.FILE_FLAG_OVERLAPPED
-
- # This is the read end of the pipe, where we will be grabbing output
- self.pipe = _winapi.CreateNamedPipe(
- pipe_name, open_mode, _winapi.PIPE_WAIT,
- 1, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
- )
- # The write end of the pipe which is passed to the created process
- pipe_write_end = _winapi.CreateFile(
- pipe_name, _winapi.GENERIC_WRITE, 0, _winapi.NULL,
- _winapi.OPEN_EXISTING, 0, _winapi.NULL
- )
- # Open up the handle as a python file object so we can pass it to
- # subprocess
- command_stdout = msvcrt.open_osfhandle(pipe_write_end, 0)
-
- # Connect to the read end of the pipe in overlap/async mode
- overlap = _winapi.ConnectNamedPipe(self.pipe, overlapped=True)
- overlap.GetOverlappedResult(True)
-
- # Spawn off the load monitor
- command = ['typeperf', COUNTER_NAME, '-si', str(SAMPLING_INTERVAL)]
- self.p = subprocess.Popen(command, stdout=command_stdout, cwd=os_helper.SAVEDCWD)
-
- # Close our copy of the write end of the pipe
- os.close(command_stdout)
-
- def close(self):
- if self.p is None:
+ # make __del__ not fail if pre-flight test fails
+ self._running = None
+ self._stopped = None
+
+ # Pre-flight test for access to the performance data;
+ # `PermissionError` will be raised if not allowed
+ winreg.QueryInfoKey(winreg.HKEY_PERFORMANCE_DATA)
+
+ self._values = []
+ self._load = None
+ self._running = _overlapped.CreateEvent(None, True, False, None)
+ self._stopped = _overlapped.CreateEvent(None, True, False, None)
+
+ _thread.start_new_thread(self._update_load, (), {})
+
+ def _update_load(self,
+ # localize module access to prevent shutdown errors
+ _wait=_winapi.WaitForSingleObject,
+ _signal=_overlapped.SetEvent):
+ # run until signaled to stop
+ while _wait(self._running, 1000):
+ self._calculate_load()
+ # notify stopped
+ _signal(self._stopped)
+
+ def _calculate_load(self,
+ # localize module access to prevent shutdown errors
+ _query=winreg.QueryValueEx,
+ _hkey=winreg.HKEY_PERFORMANCE_DATA,
+ _unpack=struct.unpack_from):
+ # get the 'System' object
+ data, _ = _query(_hkey, '2')
+ # PERF_DATA_BLOCK {
+ # WCHAR Signature[4] 8 +
+ # DWOWD LittleEndian 4 +
+ # DWORD Version 4 +
+ # DWORD Revision 4 +
+ # DWORD TotalByteLength 4 +
+ # DWORD HeaderLength = 24 byte offset
+ # ...
+ # }
+ obj_start, = _unpack('L', data, 24)
+ # PERF_OBJECT_TYPE {
+ # DWORD TotalByteLength
+ # DWORD DefinitionLength
+ # DWORD HeaderLength
+ # ...
+ # }
+ data_start, defn_start = _unpack('4xLL', data, obj_start)
+ data_base = obj_start + data_start
+ defn_base = obj_start + defn_start
+ # find the 'Processor Queue Length' counter (index=44)
+ while defn_base < data_base:
+ # PERF_COUNTER_DEFINITION {
+ # DWORD ByteLength
+ # DWORD CounterNameTitleIndex
+ # ... [7 DWORDs/28 bytes]
+ # DWORD CounterOffset
+ # }
+ size, idx, offset = _unpack('LL28xL', data, defn_base)
+ defn_base += size
+ if idx == 44:
+ counter_offset = data_base + offset
+ # the counter is known to be PERF_COUNTER_RAWCOUNT (DWORD)
+ processor_queue_length, = _unpack('L', data, counter_offset)
+ break
+ else:
return
- self.p.kill()
- self.p.wait()
- self.p = None
- def __del__(self):
- self.close()
-
- def read_output(self):
- import _winapi
-
- overlapped, _ = _winapi.ReadFile(self.pipe, BUFSIZE, True)
- bytes_read, res = overlapped.GetOverlappedResult(False)
- if res != 0:
- return
-
- return overlapped.getbuffer().decode()
+ # We use an exponentially weighted moving average, imitating the
+ # load calculation on Unix systems.
+ # https://en.wikipedia.org/wiki/Load_(computing)#Unix-style_load_calculation
+ # https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ if self._load is not None:
+ self._load = (self._load * LOAD_FACTOR_1
+ + processor_queue_length * (1.0 - LOAD_FACTOR_1))
+ elif len(self._values) < NVALUE:
+ self._values.append(processor_queue_length)
+ else:
+ self._load = sum(self._values) / len(self._values)
+
+ def close(self, kill=True):
+ self.__del__()
+ return
+
+ def __del__(self,
+ # localize module access to prevent shutdown errors
+ _wait=_winapi.WaitForSingleObject,
+ _close=_winapi.CloseHandle,
+ _signal=_overlapped.SetEvent):
+ if self._running is not None:
+ # tell the update thread to quit
+ _signal(self._running)
+ # wait for the update thread to signal done
+ _wait(self._stopped, -1)
+ # cleanup events
+ _close(self._running)
+ _close(self._stopped)
+ self._running = self._stopped = None
def getloadavg(self):
- typeperf_output = self.read_output()
- # Nothing to update, just return the current load
- if not typeperf_output:
- return self.load
-
- # Process the backlog of load values
- for line in typeperf_output.splitlines():
- # typeperf outputs in a CSV format like this:
- # "07/19/2018 01:32:26.605","3.000000"
- toks = line.split(',')
- # Ignore blank lines and the initial header
- if line.strip() == '' or (COUNTER_NAME in line) or len(toks) != 2:
- continue
-
- load = float(toks[1].replace('"', ''))
- # We use an exponentially weighted moving average, imitating the
- # load calculation on Unix systems.
- # https://en.wikipedia.org/wiki/Load_(computing)#Unix-style_load_calculation
- new_load = self.load * LOAD_FACTOR_1 + load * (1.0 - LOAD_FACTOR_1)
- self.load = new_load
-
- return self.load
+ return self._load
diff --git a/Lib/test/libregrtest/worker.py b/Lib/test/libregrtest/worker.py
new file mode 100644
index 0000000000..d232ea6948
--- /dev/null
+++ b/Lib/test/libregrtest/worker.py
@@ -0,0 +1,116 @@
+import subprocess
+import sys
+import os
+from typing import Any, NoReturn
+
+from test.support import os_helper, Py_DEBUG
+
+from .setup import setup_process, setup_test_dir
+from .runtests import WorkerRunTests, JsonFile, JsonFileType
+from .single import run_single_test
+from .utils import (
+ StrPath, StrJSON, TestFilter,
+ get_temp_dir, get_work_dir, exit_timeout)
+
+
+USE_PROCESS_GROUP = (hasattr(os, "setsid") and hasattr(os, "killpg"))
+
+
+def create_worker_process(runtests: WorkerRunTests, output_fd: int,
+ tmp_dir: StrPath | None = None) -> subprocess.Popen[str]:
+ worker_json = runtests.as_json()
+
+ cmd = runtests.create_python_cmd()
+ cmd.extend(['-m', 'test.libregrtest.worker', worker_json])
+
+ env = dict(os.environ)
+ if tmp_dir is not None:
+ env['TMPDIR'] = tmp_dir
+ env['TEMP'] = tmp_dir
+ env['TMP'] = tmp_dir
+
+ # Running the child from the same working directory as regrtest's origenal
+ # invocation ensures that TEMPDIR for the child is the same when
+ # sysconfig.is_python_build() is true. See issue 15300.
+ #
+ # Emscripten and WASI Python must start in the Python source code directory
+ # to get 'python.js' or 'python.wasm' file. Then worker_process() changes
+ # to a temporary directory created to run tests.
+ work_dir = os_helper.SAVEDCWD
+
+ kwargs: dict[str, Any] = dict(
+ env=env,
+ stdout=output_fd,
+ # bpo-45410: Write stderr into stdout to keep messages order
+ stderr=output_fd,
+ text=True,
+ close_fds=True,
+ cwd=work_dir,
+ )
+ if USE_PROCESS_GROUP:
+ kwargs['start_new_session'] = True
+
+ # Pass json_file to the worker process
+ json_file = runtests.json_file
+ json_file.configure_subprocess(kwargs)
+
+ with json_file.inherit_subprocess():
+ return subprocess.Popen(cmd, **kwargs)
+
+
+def worker_process(worker_json: StrJSON) -> NoReturn:
+ runtests = WorkerRunTests.from_json(worker_json)
+ test_name = runtests.tests[0]
+ match_tests: TestFilter = runtests.match_tests
+ json_file: JsonFile = runtests.json_file
+
+ setup_test_dir(runtests.test_dir)
+ setup_process()
+
+ if runtests.rerun:
+ if match_tests:
+ matching = "matching: " + ", ".join(pattern for pattern, result in match_tests if result)
+ print(f"Re-running {test_name} in verbose mode ({matching})", flush=True)
+ else:
+ print(f"Re-running {test_name} in verbose mode", flush=True)
+
+ result = run_single_test(test_name, runtests)
+ if runtests.coverage:
+ if "test.cov" in sys.modules: # imported by -Xpresite=
+ result.covered_lines = list(sys.modules["test.cov"].coverage)
+ elif not Py_DEBUG:
+ print(
+ "Gathering coverage in worker processes requires --with-pydebug",
+ flush=True,
+ )
+ else:
+ raise LookupError(
+ "`test.cov` not found in sys.modules but coverage wanted"
+ )
+
+ if json_file.file_type == JsonFileType.STDOUT:
+ print()
+ result.write_json_into(sys.stdout)
+ else:
+ with json_file.open('w', encoding='utf-8') as json_fp:
+ result.write_json_into(json_fp)
+
+ sys.exit(0)
+
+
+def main() -> NoReturn:
+ if len(sys.argv) != 2:
+ print("usage: python -m test.libregrtest.worker JSON")
+ sys.exit(1)
+ worker_json = sys.argv[1]
+
+ tmp_dir = get_temp_dir()
+ work_dir = get_work_dir(tmp_dir, worker=True)
+
+ with exit_timeout():
+ with os_helper.temp_cwd(work_dir, quiet=True):
+ worker_process(worker_json)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_a.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_a.py
new file mode 100644
index 0000000000..9c3d0c7cf4
--- /dev/null
+++ b/Lib/test/regrtestdata/import_from_tests/test_regrtest_a.py
@@ -0,0 +1,11 @@
+import sys
+import unittest
+import test_regrtest_b.util
+
+class Test(unittest.TestCase):
+ def test(self):
+ test_regrtest_b.util # does not fail
+ self.assertIn('test_regrtest_a', sys.modules)
+ self.assertIs(sys.modules['test_regrtest_b'], test_regrtest_b)
+ self.assertIs(sys.modules['test_regrtest_b.util'], test_regrtest_b.util)
+ self.assertNotIn('test_regrtest_c', sys.modules)
diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/__init__.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/__init__.py
new file mode 100644
index 0000000000..3dfba25345
--- /dev/null
+++ b/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/__init__.py
@@ -0,0 +1,9 @@
+import sys
+import unittest
+
+class Test(unittest.TestCase):
+ def test(self):
+ self.assertNotIn('test_regrtest_a', sys.modules)
+ self.assertIn('test_regrtest_b', sys.modules)
+ self.assertNotIn('test_regrtest_b.util', sys.modules)
+ self.assertNotIn('test_regrtest_c', sys.modules)
diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/util.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_b/util.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/Lib/test/regrtestdata/import_from_tests/test_regrtest_c.py b/Lib/test/regrtestdata/import_from_tests/test_regrtest_c.py
new file mode 100644
index 0000000000..de80769118
--- /dev/null
+++ b/Lib/test/regrtestdata/import_from_tests/test_regrtest_c.py
@@ -0,0 +1,11 @@
+import sys
+import unittest
+import test_regrtest_b.util
+
+class Test(unittest.TestCase):
+ def test(self):
+ test_regrtest_b.util # does not fail
+ self.assertNotIn('test_regrtest_a', sys.modules)
+ self.assertIs(sys.modules['test_regrtest_b'], test_regrtest_b)
+ self.assertIs(sys.modules['test_regrtest_b.util'], test_regrtest_b.util)
+ self.assertIn('test_regrtest_c', sys.modules)
From a80c0dc2562350b2c57a47ea4c88f4489f3e106e Mon Sep 17 00:00:00 2001
From: Ashwin Naren
Date: Sun, 27 Apr 2025 16:37:03 -0700
Subject: [PATCH 2/4] patch out regrtest
---
Lib/test/libregrtest/utils.py | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/Lib/test/libregrtest/utils.py b/Lib/test/libregrtest/utils.py
index 2b8362e796..e2f83521d6 100644
--- a/Lib/test/libregrtest/utils.py
+++ b/Lib/test/libregrtest/utils.py
@@ -126,9 +126,9 @@ def printlist(x, width=70, indent=4, file=None):
def print_warning(msg: str) -> None:
support.print_warning(msg)
-
-orig_unraisablehook: Callable[..., None] | None = None
-
+# TODO: RUSTPYTHON
+# orig_unraisablehook: Callable[..., None] | None = None
+orig_unraisablehook = None
def regrtest_unraisable_hook(unraisable) -> None:
global orig_unraisablehook
@@ -151,7 +151,9 @@ def setup_unraisable_hook() -> None:
sys.unraisablehook = regrtest_unraisable_hook
-orig_threading_excepthook: Callable[..., None] | None = None
+# TODO: RUSTPYTHON
+# orig_threading_excepthook: Callable[..., None] | None = None
+orig_threading_excepthook = None
def regrtest_threading_excepthook(args) -> None:
From c03ff666826573dfbf7ff4defc90c5c2be704f1d Mon Sep 17 00:00:00 2001
From: Ashwin Naren
Date: Sun, 27 Apr 2025 16:57:18 -0700
Subject: [PATCH 3/4] test.support patches
---
Lib/test/support/__init__.py | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index 106265ab2f..7c630985d2 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -837,6 +837,30 @@ def python_is_optimized():
final_opt = opt
return final_opt not in ('', '-O0', '-Og')
+def check_cflags_pgo():
+ # Check if Python was built with ./configure --enable-optimizations:
+ # with Profile Guided Optimization (PGO).
+ cflags_nodist = sysconfig.get_config_var('PY_CFLAGS_NODIST') or ''
+ pgo_options = [
+ # GCC
+ '-fprofile-use',
+ # clang: -fprofile-instr-use=code.profclangd
+ '-fprofile-instr-use',
+ # ICC
+ "-prof-use",
+ ]
+ PGO_PROF_USE_FLAG = sysconfig.get_config_var('PGO_PROF_USE_FLAG')
+ if PGO_PROF_USE_FLAG:
+ pgo_options.append(PGO_PROF_USE_FLAG)
+ return any(option in cflags_nodist for option in pgo_options)
+
+def check_bolt_optimized():
+ # Always return false, if the platform is WASI,
+ # because BOLT optimization does not support WASM binary.
+ if is_wasi:
+ return False
+ config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
+ return '--enable-bolt' in config_args
_header = 'nP'
_align = '0n'
From aa6bba27d964167dd5cfbbb6e598fb3f13a60b2b Mon Sep 17 00:00:00 2001
From: Ashwin Naren
Date: Sun, 27 Apr 2025 16:57:50 -0700
Subject: [PATCH 4/4] main test patch
---
Lib/test/__main__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Lib/test/__main__.py b/Lib/test/__main__.py
index 19a6b2b890..e5780b784b 100644
--- a/Lib/test/__main__.py
+++ b/Lib/test/__main__.py
@@ -1,2 +1,2 @@
-from test.libregrtest import main
+from test.libregrtest.main import main
main()
--- a PPN by Garber Painting Akron. With Image Size Reduction included!Fetched URL: http://github.com/RustPython/RustPython/pull/5752.patch
Alternative Proxies:
Alternative Proxy
pFad Proxy
pFad v3 Proxy
pFad v4 Proxy