Removed the Requirement to Install Python and NodeJS (Now Bundled with Borealis)

This commit is contained in:
2025-04-24 00:42:19 -06:00
parent 785265d3e7
commit 9c68cdea84
7786 changed files with 2386458 additions and 217 deletions

View File

View File

@ -0,0 +1,543 @@
import argparse
import os.path
import shlex
import sys
from test.support import os_helper, Py_DEBUG
from .utils import ALL_RESOURCES, RESOURCE_NAMES, TestFilter
USAGE = """\
python -m test [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
"""
DESCRIPTION = """\
Run Python regression tests.
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -Wd -m test [options] [test_name1 ...]
"""
EPILOG = """\
Additional option details:
-r randomizes test execution order. You can use --randseed=int to provide an
int seed value for the randomizer. The randseed value will be used
to set seeds for all random usages in tests
(including randomizing the tests order if -r is set).
By default we always set random seed, but do not randomize test order.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines how much memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2 GiB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
walltime - Long running but not CPU-bound tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
tzdata - Run tests that require timezone data.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
--matchfile filters tests using a text file, one pattern per line.
Pattern examples:
- test method: test_stat_attributes
- test class: FileTests
- test identifier: test_os.FileTests.test_stat_attributes
"""
class Namespace(argparse.Namespace):
def __init__(self, **kwargs) -> None:
self.ci = False
self.testdir = None
self.verbose = 0
self.quiet = False
self.exclude = False
self.cleanup = False
self.wait = False
self.list_cases = False
self.list_tests = False
self.single = False
self.randomize = False
self.fromfile = None
self.fail_env_changed = False
self.use_resources: list[str] = []
self.trace = False
self.coverdir = 'coverage'
self.runleaks = False
self.huntrleaks: tuple[int, int, str] | None = None
self.rerun = False
self.verbose3 = False
self.print_slow = False
self.random_seed = None
self.use_mp = None
self.forever = False
self.header = False
self.failfast = False
self.match_tests: TestFilter = []
self.pgo = False
self.pgo_extended = False
self.tsan = False
self.worker_json = None
self.start = None
self.timeout = None
self.memlimit = None
self.threshold = None
self.fail_rerun = False
self.tempdir = None
self._add_python_opts = True
self.xmlpath = None
self.single_process = False
super().__init__(**kwargs)
class _ArgParser(argparse.ArgumentParser):
def error(self, message):
super().error(message + "\nPass -h or --help for complete help.")
class FilterAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
items = getattr(namespace, self.dest)
items.append((value, self.const))
class FromFileFilterAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
items = getattr(namespace, self.dest)
with open(value, encoding='utf-8') as fp:
for line in fp:
items.append((line.strip(), self.const))
def _create_parser():
# Set prog to prevent the uninformative "__main__.py" from displaying in
# error messages when using "python -m test ...".
parser = _ArgParser(prog='regrtest.py',
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG,
add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.set_defaults(match_tests=[])
# Arguments with this clause added to its help are described further in
# the epilog's "Additional option details" section.
more_details = ' See the section at bottom for more details.'
group = parser.add_argument_group('General options')
# We add help explicitly to control what argument group it renders under.
group.add_argument('-h', '--help', action='help',
help='show this help message and exit')
group.add_argument('--fast-ci', action='store_true',
help='Fast Continuous Integration (CI) mode used by '
'GitHub Actions')
group.add_argument('--slow-ci', action='store_true',
help='Slow Continuous Integration (CI) mode used by '
'buildbot workers')
group.add_argument('--timeout', metavar='TIMEOUT',
help='dump the traceback and exit if a test takes '
'more than TIMEOUT seconds; disabled if TIMEOUT '
'is negative or equals to zero')
group.add_argument('--wait', action='store_true',
help='wait for user input, e.g., allow a debugger '
'to be attached')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
more_details)
group.add_argument('-p', '--python', metavar='PYTHON',
help='Command to run Python test subprocesses with.')
group.add_argument('--randseed', metavar='SEED',
dest='random_seed', type=int,
help='pass a global random seed')
group = parser.add_argument_group('Verbosity')
group.add_argument('-v', '--verbose', action='count',
help='run tests in verbose mode with output to stdout')
group.add_argument('-w', '--rerun', action='store_true',
help='re-run failed tests in verbose mode')
group.add_argument('--verbose2', action='store_true', dest='rerun',
help='deprecated alias to --rerun')
group.add_argument('-W', '--verbose3', action='store_true',
help='display test output on failure')
group.add_argument('-q', '--quiet', action='store_true',
help='no output unless one or more tests fail')
group.add_argument('-o', '--slowest', action='store_true', dest='print_slow',
help='print the slowest 10 tests')
group.add_argument('--header', action='store_true',
help='print header with interpreter info')
group = parser.add_argument_group('Selecting tests')
group.add_argument('-r', '--randomize', action='store_true',
help='randomize test execution order.' + more_details)
group.add_argument('-f', '--fromfile', metavar='FILE',
help='read names of tests to run from a file.' +
more_details)
group.add_argument('-x', '--exclude', action='store_true',
help='arguments are tests to *exclude*')
group.add_argument('-s', '--single', action='store_true',
help='single step through a set of tests.' +
more_details)
group.add_argument('-m', '--match', metavar='PAT',
dest='match_tests', action=FilterAction, const=True,
help='match test cases and methods with glob pattern PAT')
group.add_argument('-i', '--ignore', metavar='PAT',
dest='match_tests', action=FilterAction, const=False,
help='ignore test cases and methods with glob pattern PAT')
group.add_argument('--matchfile', metavar='FILENAME',
dest='match_tests',
action=FromFileFilterAction, const=True,
help='similar to --match but get patterns from a '
'text file, one pattern per line')
group.add_argument('--ignorefile', metavar='FILENAME',
dest='match_tests',
action=FromFileFilterAction, const=False,
help='similar to --matchfile but it receives patterns '
'from text file to ignore')
group.add_argument('-G', '--failfast', action='store_true',
help='fail as soon as a test fails (only with -v or -W)')
group.add_argument('-u', '--use', metavar='RES1,RES2,...',
action='append', type=resources_list,
help='specify which special resource intensive tests '
'to run.' + more_details)
group.add_argument('-M', '--memlimit', metavar='LIMIT',
help='run very large memory-consuming tests.' +
more_details)
group.add_argument('--testdir', metavar='DIR',
type=relative_filename,
help='execute test files in the specified directory '
'(instead of the Python stdlib test suite)')
group = parser.add_argument_group('Special runs')
group.add_argument('-L', '--runleaks', action='store_true',
help='run the leaks(1) command just before exit.' +
more_details)
group.add_argument('-R', '--huntrleaks', metavar='RUNCOUNTS',
type=huntrleaks,
help='search for reference leaks (needs debug build, '
'very slow).' + more_details)
group.add_argument('-j', '--multiprocess', metavar='PROCESSES',
dest='use_mp', type=int,
help='run PROCESSES processes at once')
group.add_argument('--single-process', action='store_true',
dest='single_process',
help='always run all tests sequentially in '
'a single process, ignore -jN option, '
'and failed tests are also rerun sequentially '
'in the same process')
group.add_argument('-T', '--coverage', action='store_true',
dest='trace',
help='turn on code coverage tracing using the trace '
'module')
group.add_argument('-D', '--coverdir', metavar='DIR',
type=relative_filename,
help='directory where coverage files are put')
group.add_argument('-N', '--nocoverdir',
action='store_const', const=None, dest='coverdir',
help='put coverage files alongside modules')
group.add_argument('-t', '--threshold', metavar='THRESHOLD',
type=int,
help='call gc.set_threshold(THRESHOLD)')
group.add_argument('-n', '--nowindows', action='store_true',
help='suppress error message boxes on Windows')
group.add_argument('-F', '--forever', action='store_true',
help='run the specified tests in a loop, until an '
'error happens; imply --failfast')
group.add_argument('--list-tests', action='store_true',
help="only write the name of tests that will be run, "
"don't execute them")
group.add_argument('--list-cases', action='store_true',
help='only write the name of test cases that will be run'
' , don\'t execute them')
group.add_argument('-P', '--pgo', dest='pgo', action='store_true',
help='enable Profile Guided Optimization (PGO) training')
group.add_argument('--pgo-extended', action='store_true',
help='enable extended PGO training (slower training)')
group.add_argument('--tsan', dest='tsan', action='store_true',
help='run a subset of test cases that are proper for the TSAN test')
group.add_argument('--fail-env-changed', action='store_true',
help='if a test file alters the environment, mark '
'the test as failed')
group.add_argument('--fail-rerun', action='store_true',
help='if a test failed and then passed when re-run, '
'mark the tests as failed')
group.add_argument('--junit-xml', dest='xmlpath', metavar='FILENAME',
help='writes JUnit-style XML results to the specified '
'file')
group.add_argument('--tempdir', metavar='PATH',
help='override the working directory for the test run')
group.add_argument('--cleanup', action='store_true',
help='remove old test_python_* directories')
group.add_argument('--bisect', action='store_true',
help='if some tests fail, run test.bisect_cmd on them')
group.add_argument('--dont-add-python-opts', dest='_add_python_opts',
action='store_false',
help="internal option, don't use it")
return parser
def relative_filename(string):
# CWD is replaced with a temporary dir before calling main(), so we
# join it with the saved CWD so it ends up where the user expects.
return os.path.join(os_helper.SAVEDCWD, string)
def huntrleaks(string):
args = string.split(':')
if len(args) not in (2, 3):
raise argparse.ArgumentTypeError(
'needs 2 or 3 colon-separated arguments')
nwarmup = int(args[0]) if args[0] else 5
ntracked = int(args[1]) if args[1] else 4
fname = args[2] if len(args) > 2 and args[2] else 'reflog.txt'
return nwarmup, ntracked, fname
def resources_list(string):
u = [x.lower() for x in string.split(',')]
for r in u:
if r == 'all' or r == 'none':
continue
if r[0] == '-':
r = r[1:]
if r not in RESOURCE_NAMES:
raise argparse.ArgumentTypeError('invalid resource: ' + r)
return u
def _parse_args(args, **kwargs):
# Defaults
ns = Namespace()
for k, v in kwargs.items():
if not hasattr(ns, k):
raise TypeError('%r is an invalid keyword argument '
'for this function' % k)
setattr(ns, k, v)
parser = _create_parser()
# Issue #14191: argparse doesn't support "intermixed" positional and
# optional arguments. Use parse_known_args() as workaround.
ns.args = parser.parse_known_args(args=args, namespace=ns)[1]
for arg in ns.args:
if arg.startswith('-'):
parser.error("unrecognized arguments: %s" % arg)
if ns.timeout is not None:
# Support "--timeout=" (no value) so Makefile.pre.pre TESTTIMEOUT
# can be used by "make buildbottest" and "make test".
if ns.timeout != "":
try:
ns.timeout = float(ns.timeout)
except ValueError:
parser.error(f"invalid timeout value: {ns.timeout!r}")
else:
ns.timeout = None
# Continuous Integration (CI): common options for fast/slow CI modes
if ns.slow_ci or ns.fast_ci:
# Similar to options:
# -j0 --randomize --fail-env-changed --rerun --slowest --verbose3
if ns.use_mp is None:
ns.use_mp = 0
ns.randomize = True
ns.fail_env_changed = True
if ns.python is None:
ns.rerun = True
ns.print_slow = True
ns.verbose3 = True
else:
ns._add_python_opts = False
# --singleprocess overrides -jN option
if ns.single_process:
ns.use_mp = None
# When both --slow-ci and --fast-ci options are present,
# --slow-ci has the priority
if ns.slow_ci:
# Similar to: -u "all" --timeout=1200
if ns.use is None:
ns.use = []
ns.use.insert(0, ['all'])
if ns.timeout is None:
ns.timeout = 1200 # 20 minutes
elif ns.fast_ci:
# Similar to: -u "all,-cpu" --timeout=600
if ns.use is None:
ns.use = []
ns.use.insert(0, ['all', '-cpu'])
if ns.timeout is None:
ns.timeout = 600 # 10 minutes
if ns.single and ns.fromfile:
parser.error("-s and -f don't go together!")
if ns.trace:
if ns.use_mp is not None:
if not Py_DEBUG:
parser.error("need --with-pydebug to use -T and -j together")
else:
print(
"Warning: collecting coverage without -j is imprecise. Configure"
" --with-pydebug and run -m test -T -j for best results.",
file=sys.stderr
)
if ns.python is not None:
if ns.use_mp is None:
parser.error("-p requires -j!")
# The "executable" may be two or more parts, e.g. "node python.js"
ns.python = shlex.split(ns.python)
if ns.failfast and not (ns.verbose or ns.verbose3):
parser.error("-G/--failfast needs either -v or -W")
if ns.pgo and (ns.verbose or ns.rerun or ns.verbose3):
parser.error("--pgo/-v don't go together!")
if ns.pgo_extended:
ns.pgo = True # pgo_extended implies pgo
if ns.nowindows:
print("Warning: the --nowindows (-n) option is deprecated. "
"Use -vv to display assertions in stderr.", file=sys.stderr)
if ns.quiet:
ns.verbose = 0
if ns.timeout is not None:
if ns.timeout <= 0:
ns.timeout = None
if ns.use:
for a in ns.use:
for r in a:
if r == 'all':
ns.use_resources[:] = ALL_RESOURCES
continue
if r == 'none':
del ns.use_resources[:]
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if remove:
if r in ns.use_resources:
ns.use_resources.remove(r)
elif r not in ns.use_resources:
ns.use_resources.append(r)
if ns.random_seed is not None:
ns.randomize = True
if ns.verbose:
ns.header = True
# When -jN option is used, a worker process does not use --verbose3
# and so -R 3:3 -jN --verbose3 just works as expected: there is no false
# alarm about memory leak.
if ns.huntrleaks and ns.verbose3 and ns.use_mp is None:
# run_single_test() replaces sys.stdout with io.StringIO if verbose3
# is true. In this case, huntrleaks sees an write into StringIO as
# a memory leak, whereas it is not (gh-71290).
ns.verbose3 = False
print("WARNING: Disable --verbose3 because it's incompatible with "
"--huntrleaks without -jN option",
file=sys.stderr)
if ns.forever:
# --forever implies --failfast
ns.failfast = True
if ns.huntrleaks:
warmup, repetitions, _ = ns.huntrleaks
if warmup < 1 or repetitions < 1:
msg = ("Invalid values for the --huntrleaks/-R parameters. The "
"number of warmups and repetitions must be at least 1 "
"each (1:1).")
print(msg, file=sys.stderr, flush=True)
sys.exit(2)
return ns

View File

@ -0,0 +1,77 @@
import itertools
import operator
import re
# By default, don't filter tests
_test_matchers = ()
_test_patterns = ()
def match_test(test):
# Function used by support.run_unittest() and regrtest --list-cases
result = False
for matcher, result in reversed(_test_matchers):
if matcher(test.id()):
return result
return not result
def _is_full_match_test(pattern):
# If a pattern contains at least one dot, it's considered
# as a full test identifier.
# Example: 'test.test_os.FileTests.test_access'.
#
# ignore patterns which contain fnmatch patterns: '*', '?', '[...]'
# or '[!...]'. For example, ignore 'test_access*'.
return ('.' in pattern) and (not re.search(r'[?*\[\]]', pattern))
def get_match_tests():
global _test_patterns
return _test_patterns
def set_match_tests(patterns):
global _test_matchers, _test_patterns
if not patterns:
_test_matchers = ()
_test_patterns = ()
else:
itemgetter = operator.itemgetter
patterns = tuple(patterns)
if patterns != _test_patterns:
_test_matchers = [
(_compile_match_function(map(itemgetter(0), it)), result)
for result, it in itertools.groupby(patterns, itemgetter(1))
]
_test_patterns = patterns
def _compile_match_function(patterns):
patterns = list(patterns)
if all(map(_is_full_match_test, patterns)):
# Simple case: all patterns are full test identifier.
# The test.bisect_cmd utility only uses such full test identifiers.
return set(patterns).__contains__
else:
import fnmatch
regex = '|'.join(map(fnmatch.translate, patterns))
# The search *is* case sensitive on purpose:
# don't use flags=re.IGNORECASE
regex_match = re.compile(regex).match
def match_test_regex(test_id, regex_match=regex_match):
if regex_match(test_id):
# The regex matches the whole identifier, for example
# 'test.test_os.FileTests.test_access'.
return True
else:
# Try to match parts of the test identifier.
# For example, split 'test.test_os.FileTests.test_access'
# into: 'test', 'test_os', 'FileTests' and 'test_access'.
return any(map(regex_match, test_id.split(".")))
return match_test_regex

View File

@ -0,0 +1,110 @@
import os
import sys
import unittest
from collections.abc import Container
from test import support
from .filter import match_test, set_match_tests
from .utils import (
StrPath, TestName, TestTuple, TestList, TestFilter,
abs_module_name, count, printlist)
# If these test directories are encountered recurse into them and treat each
# "test_*.py" file or each sub-directory as a separate test module. This can
# increase parallelism.
#
# Beware this can't generally be done for any directory with sub-tests as the
# __init__.py may do things which alter what tests are to be run.
SPLITTESTDIRS: set[TestName] = {
"test_asyncio",
"test_concurrent_futures",
"test_doctests",
"test_future_stmt",
"test_gdb",
"test_inspect",
"test_pydoc",
"test_multiprocessing_fork",
"test_multiprocessing_forkserver",
"test_multiprocessing_spawn",
}
def findtestdir(path: StrPath | None = None) -> StrPath:
return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
def findtests(*, testdir: StrPath | None = None, exclude: Container[str] = (),
split_test_dirs: set[TestName] = SPLITTESTDIRS,
base_mod: str = "") -> TestList:
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
tests = []
for name in os.listdir(testdir):
mod, ext = os.path.splitext(name)
if (not mod.startswith("test_")) or (mod in exclude):
continue
if base_mod:
fullname = f"{base_mod}.{mod}"
else:
fullname = mod
if fullname in split_test_dirs:
subdir = os.path.join(testdir, mod)
if not base_mod:
fullname = f"test.{mod}"
tests.extend(findtests(testdir=subdir, exclude=exclude,
split_test_dirs=split_test_dirs,
base_mod=fullname))
elif ext in (".py", ""):
tests.append(fullname)
return sorted(tests)
def split_test_packages(tests, *, testdir: StrPath | None = None,
exclude: Container[str] = (),
split_test_dirs=SPLITTESTDIRS) -> list[TestName]:
testdir = findtestdir(testdir)
splitted = []
for name in tests:
if name in split_test_dirs:
subdir = os.path.join(testdir, name)
splitted.extend(findtests(testdir=subdir, exclude=exclude,
split_test_dirs=split_test_dirs,
base_mod=name))
else:
splitted.append(name)
return splitted
def _list_cases(suite: unittest.TestSuite) -> None:
for test in suite:
if isinstance(test, unittest.loader._FailedTest): # type: ignore[attr-defined]
continue
if isinstance(test, unittest.TestSuite):
_list_cases(test)
elif isinstance(test, unittest.TestCase):
if match_test(test):
print(test.id())
def list_cases(tests: TestTuple, *,
match_tests: TestFilter | None = None,
test_dir: StrPath | None = None) -> None:
support.verbose = False
set_match_tests(match_tests)
skipped = []
for test_name in tests:
module_name = abs_module_name(test_name, test_dir)
try:
suite = unittest.defaultTestLoader.loadTestsFromName(module_name)
_list_cases(suite)
except unittest.SkipTest:
skipped.append(test_name)
if skipped:
sys.stdout.flush()
stderr = sys.stderr
print(file=stderr)
print(count(len(skipped), "test"), "skipped:", file=stderr)
printlist(skipped, file=stderr)

View File

@ -0,0 +1,89 @@
import os
import time
from test.support import MS_WINDOWS
from .results import TestResults
from .runtests import RunTests
from .utils import print_warning
if MS_WINDOWS:
from .win_utils import WindowsLoadTracker
class Logger:
def __init__(self, results: TestResults, quiet: bool, pgo: bool):
self.start_time = time.perf_counter()
self.test_count_text = ''
self.test_count_width = 3
self.win_load_tracker: WindowsLoadTracker | None = None
self._results: TestResults = results
self._quiet: bool = quiet
self._pgo: bool = pgo
def log(self, line: str = '') -> None:
empty = not line
# add the system load prefix: "load avg: 1.80 "
load_avg = self.get_load_avg()
if load_avg is not None:
line = f"load avg: {load_avg:.2f} {line}"
# add the timestamp prefix: "0:01:05 "
log_time = time.perf_counter() - self.start_time
mins, secs = divmod(int(log_time), 60)
hours, mins = divmod(mins, 60)
formatted_log_time = "%d:%02d:%02d" % (hours, mins, secs)
line = f"{formatted_log_time} {line}"
if empty:
line = line[:-1]
print(line, flush=True)
def get_load_avg(self) -> float | None:
if hasattr(os, 'getloadavg'):
try:
return os.getloadavg()[0]
except OSError:
pass
if self.win_load_tracker is not None:
return self.win_load_tracker.getloadavg()
return None
def display_progress(self, test_index: int, text: str) -> None:
if self._quiet:
return
results = self._results
# "[ 51/405/1] test_tcl passed"
line = f"{test_index:{self.test_count_width}}{self.test_count_text}"
fails = len(results.bad) + len(results.env_changed)
if fails and not self._pgo:
line = f"{line}/{fails}"
self.log(f"[{line}] {text}")
def set_tests(self, runtests: RunTests) -> None:
if runtests.forever:
self.test_count_text = ''
self.test_count_width = 3
else:
self.test_count_text = '/{}'.format(len(runtests.tests))
self.test_count_width = len(self.test_count_text) - 1
def start_load_tracker(self) -> None:
if not MS_WINDOWS:
return
try:
self.win_load_tracker = WindowsLoadTracker()
except PermissionError as error:
# Standard accounts may not have access to the performance
# counters.
print_warning(f'Failed to create WindowsLoadTracker: {error}')
def stop_load_tracker(self) -> None:
if self.win_load_tracker is None:
return
self.win_load_tracker.close()
self.win_load_tracker = None

View File

@ -0,0 +1,755 @@
import os
import random
import re
import shlex
import sys
import sysconfig
import time
import trace
from typing import NoReturn
from test.support import (os_helper, MS_WINDOWS, flush_std_streams,
suppress_immortalization)
from .cmdline import _parse_args, Namespace
from .findtests import findtests, split_test_packages, list_cases
from .logger import Logger
from .pgo import setup_pgo_tests
from .result import State, TestResult
from .results import TestResults, EXITCODE_INTERRUPTED
from .runtests import RunTests, HuntRefleak
from .setup import setup_process, setup_test_dir
from .single import run_single_test, PROGRESS_MIN_TIME
from .tsan import setup_tsan_tests
from .utils import (
StrPath, StrJSON, TestName, TestList, TestTuple, TestFilter,
strip_py_suffix, count, format_duration,
printlist, get_temp_dir, get_work_dir, exit_timeout,
display_header, cleanup_temp_dir, print_warning,
is_cross_compiled, get_host_runner,
EXIT_TIMEOUT)
class Regrtest:
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
def __init__(self, ns: Namespace, _add_python_opts: bool = False):
# Log verbosity
self.verbose: int = int(ns.verbose)
self.quiet: bool = ns.quiet
self.pgo: bool = ns.pgo
self.pgo_extended: bool = ns.pgo_extended
self.tsan: bool = ns.tsan
# Test results
self.results: TestResults = TestResults()
self.first_state: str | None = None
# Logger
self.logger = Logger(self.results, self.quiet, self.pgo)
# Actions
self.want_header: bool = ns.header
self.want_list_tests: bool = ns.list_tests
self.want_list_cases: bool = ns.list_cases
self.want_wait: bool = ns.wait
self.want_cleanup: bool = ns.cleanup
self.want_rerun: bool = ns.rerun
self.want_run_leaks: bool = ns.runleaks
self.want_bisect: bool = ns.bisect
self.ci_mode: bool = (ns.fast_ci or ns.slow_ci)
self.want_add_python_opts: bool = (_add_python_opts
and ns._add_python_opts)
# Select tests
self.match_tests: TestFilter = ns.match_tests
self.exclude: bool = ns.exclude
self.fromfile: StrPath | None = ns.fromfile
self.starting_test: TestName | None = ns.start
self.cmdline_args: TestList = ns.args
# Workers
self.single_process: bool = ns.single_process
if self.single_process or ns.use_mp is None:
num_workers = 0 # run sequentially in a single process
elif ns.use_mp <= 0:
num_workers = -1 # run in parallel, use the number of CPUs
else:
num_workers = ns.use_mp # run in parallel
self.num_workers: int = num_workers
self.worker_json: StrJSON | None = ns.worker_json
# Options to run tests
self.fail_fast: bool = ns.failfast
self.fail_env_changed: bool = ns.fail_env_changed
self.fail_rerun: bool = ns.fail_rerun
self.forever: bool = ns.forever
self.output_on_failure: bool = ns.verbose3
self.timeout: float | None = ns.timeout
if ns.huntrleaks:
warmups, runs, filename = ns.huntrleaks
filename = os.path.abspath(filename)
self.hunt_refleak: HuntRefleak | None = HuntRefleak(warmups, runs, filename)
else:
self.hunt_refleak = None
self.test_dir: StrPath | None = ns.testdir
self.junit_filename: StrPath | None = ns.xmlpath
self.memory_limit: str | None = ns.memlimit
self.gc_threshold: int | None = ns.threshold
self.use_resources: tuple[str, ...] = tuple(ns.use_resources)
if ns.python:
self.python_cmd: tuple[str, ...] | None = tuple(ns.python)
else:
self.python_cmd = None
self.coverage: bool = ns.trace
self.coverage_dir: StrPath | None = ns.coverdir
self._tmp_dir: StrPath | None = ns.tempdir
# Randomize
self.randomize: bool = ns.randomize
if ('SOURCE_DATE_EPOCH' in os.environ
# don't use the variable if empty
and os.environ['SOURCE_DATE_EPOCH']
):
self.randomize = False
# SOURCE_DATE_EPOCH should be an integer, but use a string to not
# fail if it's not integer. random.seed() accepts a string.
# https://reproducible-builds.org/docs/source-date-epoch/
self.random_seed: int | str = os.environ['SOURCE_DATE_EPOCH']
elif ns.random_seed is None:
self.random_seed = random.getrandbits(32)
else:
self.random_seed = ns.random_seed
# tests
self.first_runtests: RunTests | None = None
# used by --slowest
self.print_slowest: bool = ns.print_slow
# used to display the progress bar "[ 3/100]"
self.start_time = time.perf_counter()
# used by --single
self.single_test_run: bool = ns.single
self.next_single_test: TestName | None = None
self.next_single_filename: StrPath | None = None
def log(self, line: str = '') -> None:
self.logger.log(line)
def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList | None]:
if tests is None:
tests = []
if self.single_test_run:
self.next_single_filename = os.path.join(self.tmp_dir, 'pynexttest')
try:
with open(self.next_single_filename, 'r') as fp:
next_test = fp.read().strip()
tests = [next_test]
except OSError:
pass
if self.fromfile:
tests = []
# regex to match 'test_builtin' in line:
# '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
with open(os.path.join(os_helper.SAVEDCWD, self.fromfile)) as fp:
for line in fp:
line = line.split('#', 1)[0]
line = line.strip()
match = regex.search(line)
if match is not None:
tests.append(match.group())
strip_py_suffix(tests)
if self.pgo:
# add default PGO tests if no tests are specified
setup_pgo_tests(self.cmdline_args, self.pgo_extended)
if self.tsan:
setup_tsan_tests(self.cmdline_args)
exclude_tests = set()
if self.exclude:
for arg in self.cmdline_args:
exclude_tests.add(arg)
self.cmdline_args = []
alltests = findtests(testdir=self.test_dir,
exclude=exclude_tests)
if not self.fromfile:
selected = tests or self.cmdline_args
if selected:
selected = split_test_packages(selected)
else:
selected = alltests
else:
selected = tests
if self.single_test_run:
selected = selected[:1]
try:
pos = alltests.index(selected[0])
self.next_single_test = alltests[pos + 1]
except IndexError:
pass
# Remove all the selected tests that precede start if it's set.
if self.starting_test:
try:
del selected[:selected.index(self.starting_test)]
except ValueError:
print(f"Cannot find starting test: {self.starting_test}")
sys.exit(1)
random.seed(self.random_seed)
if self.randomize:
random.shuffle(selected)
return (tuple(selected), tests)
@staticmethod
def list_tests(tests: TestTuple) -> None:
for name in tests:
print(name)
def _rerun_failed_tests(self, runtests: RunTests) -> RunTests:
# Configure the runner to re-run tests
if self.num_workers == 0 and not self.single_process:
# Always run tests in fresh processes to have more deterministic
# initial state. Don't re-run tests in parallel but limit to a
# single worker process to have side effects (on the system load
# and timings) between tests.
self.num_workers = 1
tests, match_tests_dict = self.results.prepare_rerun()
# Re-run failed tests
runtests = runtests.copy(
tests=tests,
rerun=True,
verbose=True,
forever=False,
fail_fast=False,
match_tests_dict=match_tests_dict,
output_on_failure=False)
self.logger.set_tests(runtests)
msg = f"Re-running {len(tests)} failed tests in verbose mode"
if not self.single_process:
msg = f"{msg} in subprocesses"
self.log(msg)
self._run_tests_mp(runtests, self.num_workers)
else:
self.log(msg)
self.run_tests_sequentially(runtests)
return runtests
def rerun_failed_tests(self, runtests: RunTests) -> None:
if self.python_cmd:
# Temp patch for https://github.com/python/cpython/issues/94052
self.log(
"Re-running failed tests is not supported with --python "
"host runner option."
)
return
self.first_state = self.get_state()
print()
rerun_runtests = self._rerun_failed_tests(runtests)
if self.results.bad:
print(count(len(self.results.bad), 'test'), "failed again:")
printlist(self.results.bad)
self.display_result(rerun_runtests)
def _run_bisect(self, runtests: RunTests, test: str, progress: str) -> bool:
print()
title = f"Bisect {test}"
if progress:
title = f"{title} ({progress})"
print(title)
print("#" * len(title))
print()
cmd = runtests.create_python_cmd()
cmd.extend([
"-u", "-m", "test.bisect_cmd",
# Limit to 25 iterations (instead of 100) to not abuse CI resources
"--max-iter", "25",
"-v",
# runtests.match_tests is not used (yet) for bisect_cmd -i arg
])
cmd.extend(runtests.bisect_cmd_args())
cmd.append(test)
print("+", shlex.join(cmd), flush=True)
flush_std_streams()
import subprocess
proc = subprocess.run(cmd, timeout=runtests.timeout)
exitcode = proc.returncode
title = f"{title}: exit code {exitcode}"
print(title)
print("#" * len(title))
print(flush=True)
if exitcode:
print(f"Bisect failed with exit code {exitcode}")
return False
return True
def run_bisect(self, runtests: RunTests) -> None:
tests, _ = self.results.prepare_rerun(clear=False)
for index, name in enumerate(tests, 1):
if len(tests) > 1:
progress = f"{index}/{len(tests)}"
else:
progress = ""
if not self._run_bisect(runtests, name, progress):
return
def display_result(self, runtests: RunTests) -> None:
# If running the test suite for PGO then no one cares about results.
if runtests.pgo:
return
state = self.get_state()
print()
print(f"== Tests result: {state} ==")
self.results.display_result(runtests.tests,
self.quiet, self.print_slowest)
def run_test(
self, test_name: TestName, runtests: RunTests, tracer: trace.Trace | None
) -> TestResult:
if tracer is not None:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
cmd = ('result = run_single_test(test_name, runtests)')
namespace = dict(locals())
tracer.runctx(cmd, globals=globals(), locals=namespace)
result = namespace['result']
result.covered_lines = list(tracer.counts)
else:
result = run_single_test(test_name, runtests)
self.results.accumulate_result(result, runtests)
return result
def run_tests_sequentially(self, runtests: RunTests) -> None:
if self.coverage:
tracer = trace.Trace(trace=False, count=True)
else:
tracer = None
save_modules = set(sys.modules)
jobs = runtests.get_jobs()
if jobs is not None:
tests = count(jobs, 'test')
else:
tests = 'tests'
msg = f"Run {tests} sequentially in a single process"
if runtests.timeout:
msg += " (timeout: %s)" % format_duration(runtests.timeout)
self.log(msg)
tests_iter = runtests.iter_tests()
for test_index, test_name in enumerate(tests_iter, 1):
start_time = time.perf_counter()
self.logger.display_progress(test_index, test_name)
result = self.run_test(test_name, runtests, tracer)
# Unload the newly imported test modules (best effort finalization)
new_modules = [module for module in sys.modules
if module not in save_modules and
module.startswith(("test.", "test_"))]
for module in new_modules:
sys.modules.pop(module, None)
# Remove the attribute of the parent module.
parent, _, name = module.rpartition('.')
try:
delattr(sys.modules[parent], name)
except (KeyError, AttributeError):
pass
text = str(result)
test_time = time.perf_counter() - start_time
if test_time >= PROGRESS_MIN_TIME:
text = f"{text} in {format_duration(test_time)}"
self.logger.display_progress(test_index, text)
if result.must_stop(self.fail_fast, self.fail_env_changed):
break
def get_state(self) -> str:
state = self.results.get_state(self.fail_env_changed)
if self.first_state:
state = f'{self.first_state} then {state}'
return state
def _run_tests_mp(self, runtests: RunTests, num_workers: int) -> None:
from .run_workers import RunWorkers
RunWorkers(num_workers, runtests, self.logger, self.results).run()
def finalize_tests(self, coverage: trace.CoverageResults | None) -> None:
if self.next_single_filename:
if self.next_single_test:
with open(self.next_single_filename, 'w') as fp:
fp.write(self.next_single_test + '\n')
else:
os.unlink(self.next_single_filename)
if coverage is not None:
# uses a new-in-Python 3.13 keyword argument that mypy doesn't know about yet:
coverage.write_results(show_missing=True, summary=True, # type: ignore[call-arg]
coverdir=self.coverage_dir,
ignore_missing_files=True)
if self.want_run_leaks:
os.system("leaks %d" % os.getpid())
if self.junit_filename:
self.results.write_junit(self.junit_filename)
def display_summary(self) -> None:
if self.first_runtests is None:
raise ValueError(
"Should never call `display_summary()` before calling `_run_test()`"
)
duration = time.perf_counter() - self.logger.start_time
filtered = bool(self.match_tests)
# Total duration
print()
print("Total duration: %s" % format_duration(duration))
self.results.display_summary(self.first_runtests, filtered)
# Result
state = self.get_state()
print(f"Result: {state}")
def create_run_tests(self, tests: TestTuple) -> RunTests:
return RunTests(
tests,
fail_fast=self.fail_fast,
fail_env_changed=self.fail_env_changed,
match_tests=self.match_tests,
match_tests_dict=None,
rerun=False,
forever=self.forever,
pgo=self.pgo,
pgo_extended=self.pgo_extended,
output_on_failure=self.output_on_failure,
timeout=self.timeout,
verbose=self.verbose,
quiet=self.quiet,
hunt_refleak=self.hunt_refleak,
test_dir=self.test_dir,
use_junit=(self.junit_filename is not None),
coverage=self.coverage,
memory_limit=self.memory_limit,
gc_threshold=self.gc_threshold,
use_resources=self.use_resources,
python_cmd=self.python_cmd,
randomize=self.randomize,
random_seed=self.random_seed,
)
def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
if self.hunt_refleak and self.hunt_refleak.warmups < 3:
msg = ("WARNING: Running tests with --huntrleaks/-R and "
"less than 3 warmup repetitions can give false positives!")
print(msg, file=sys.stdout, flush=True)
if self.num_workers < 0:
# Use all CPUs + 2 extra worker processes for tests
# that like to sleep
#
# os.process.cpu_count() is new in Python 3.13;
# mypy doesn't know about it yet
self.num_workers = (os.process_cpu_count() or 1) + 2 # type: ignore[attr-defined]
# For a partial run, we do not need to clutter the output.
if (self.want_header
or not(self.pgo or self.quiet or self.single_test_run
or tests or self.cmdline_args)):
display_header(self.use_resources, self.python_cmd)
print("Using random seed:", self.random_seed)
runtests = self.create_run_tests(selected)
self.first_runtests = runtests
self.logger.set_tests(runtests)
setup_process()
if (runtests.hunt_refleak is not None) and (not self.num_workers):
# gh-109739: WindowsLoadTracker thread interferes with refleak check
use_load_tracker = False
else:
# WindowsLoadTracker is only needed on Windows
use_load_tracker = MS_WINDOWS
if use_load_tracker:
self.logger.start_load_tracker()
try:
if self.num_workers:
self._run_tests_mp(runtests, self.num_workers)
else:
# gh-117783: don't immortalize deferred objects when tracking
# refleaks. Only releveant for the free-threaded build.
with suppress_immortalization(runtests.hunt_refleak):
self.run_tests_sequentially(runtests)
coverage = self.results.get_coverage_results()
self.display_result(runtests)
if self.want_rerun and self.results.need_rerun():
self.rerun_failed_tests(runtests)
if self.want_bisect and self.results.need_rerun():
self.run_bisect(runtests)
finally:
if use_load_tracker:
self.logger.stop_load_tracker()
self.display_summary()
self.finalize_tests(coverage)
return self.results.get_exitcode(self.fail_env_changed,
self.fail_rerun)
def run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
os.makedirs(self.tmp_dir, exist_ok=True)
work_dir = get_work_dir(self.tmp_dir)
# Put a timeout on Python exit
with exit_timeout():
# Run the tests in a context manager that temporarily changes the
# CWD to a temporary and writable directory. If it's not possible
# to create or change the CWD, the original CWD will be used.
# The original CWD is available from os_helper.SAVEDCWD.
with os_helper.temp_cwd(work_dir, quiet=True):
# When using multiprocessing, worker processes will use
# work_dir as their parent temporary directory. So when the
# main process exit, it removes also subdirectories of worker
# processes.
return self._run_tests(selected, tests)
def _add_cross_compile_opts(self, regrtest_opts):
# WASM/WASI buildbot builders pass multiple PYTHON environment
# variables such as PYTHONPATH and _PYTHON_HOSTRUNNER.
keep_environ = bool(self.python_cmd)
environ = None
# Are we using cross-compilation?
cross_compile = is_cross_compiled()
# Get HOSTRUNNER
hostrunner = get_host_runner()
if cross_compile:
# emulate -E, but keep PYTHONPATH + cross compile env vars,
# so test executable can load correct sysconfigdata file.
keep = {
'_PYTHON_PROJECT_BASE',
'_PYTHON_HOST_PLATFORM',
'_PYTHON_SYSCONFIGDATA_NAME',
"_PYTHON_SYSCONFIGDATA_PATH",
'PYTHONPATH'
}
old_environ = os.environ
new_environ = {
name: value for name, value in os.environ.items()
if not name.startswith(('PYTHON', '_PYTHON')) or name in keep
}
# Only set environ if at least one variable was removed
if new_environ != old_environ:
environ = new_environ
keep_environ = True
if cross_compile and hostrunner:
if self.num_workers == 0 and not self.single_process:
# For now use only two cores for cross-compiled builds;
# hostrunner can be expensive.
regrtest_opts.extend(['-j', '2'])
# If HOSTRUNNER is set and -p/--python option is not given, then
# use hostrunner to execute python binary for tests.
if not self.python_cmd:
buildpython = sysconfig.get_config_var("BUILDPYTHON")
python_cmd = f"{hostrunner} {buildpython}"
regrtest_opts.extend(["--python", python_cmd])
keep_environ = True
return (environ, keep_environ)
def _add_ci_python_opts(self, python_opts, keep_environ):
# --fast-ci and --slow-ci add options to Python:
# "-u -W default -bb -E"
# Unbuffered stdout and stderr
if not sys.stdout.write_through:
python_opts.append('-u')
# Add warnings filter 'error'
if 'default' not in sys.warnoptions:
python_opts.extend(('-W', 'error'))
# Error on bytes/str comparison
if sys.flags.bytes_warning < 2:
python_opts.append('-bb')
if not keep_environ:
# Ignore PYTHON* environment variables
if not sys.flags.ignore_environment:
python_opts.append('-E')
def _execute_python(self, cmd, environ):
# Make sure that messages before execv() are logged
sys.stdout.flush()
sys.stderr.flush()
cmd_text = shlex.join(cmd)
try:
print(f"+ {cmd_text}", flush=True)
if hasattr(os, 'execv') and not MS_WINDOWS:
os.execv(cmd[0], cmd)
# On success, execv() do no return.
# On error, it raises an OSError.
else:
import subprocess
with subprocess.Popen(cmd, env=environ) as proc:
try:
proc.wait()
except KeyboardInterrupt:
# There is no need to call proc.terminate(): on CTRL+C,
# SIGTERM is also sent to the child process.
try:
proc.wait(timeout=EXIT_TIMEOUT)
except subprocess.TimeoutExpired:
proc.kill()
proc.wait()
sys.exit(EXITCODE_INTERRUPTED)
sys.exit(proc.returncode)
except Exception as exc:
print_warning(f"Failed to change Python options: {exc!r}\n"
f"Command: {cmd_text}")
# continue executing main()
def _add_python_opts(self) -> None:
python_opts: list[str] = []
regrtest_opts: list[str] = []
environ, keep_environ = self._add_cross_compile_opts(regrtest_opts)
if self.ci_mode:
self._add_ci_python_opts(python_opts, keep_environ)
if (not python_opts) and (not regrtest_opts) and (environ is None):
# Nothing changed: nothing to do
return
# Create new command line
cmd = list(sys.orig_argv)
if python_opts:
cmd[1:1] = python_opts
if regrtest_opts:
cmd.extend(regrtest_opts)
cmd.append("--dont-add-python-opts")
self._execute_python(cmd, environ)
def _init(self):
# Set sys.stdout encoder error handler to backslashreplace,
# similar to sys.stderr error handler, to avoid UnicodeEncodeError
# when printing a traceback or any other non-encodable character.
sys.stdout.reconfigure(errors="backslashreplace")
if self.junit_filename and not os.path.isabs(self.junit_filename):
self.junit_filename = os.path.abspath(self.junit_filename)
strip_py_suffix(self.cmdline_args)
self._tmp_dir = get_temp_dir(self._tmp_dir)
@property
def tmp_dir(self) -> StrPath:
if self._tmp_dir is None:
raise ValueError(
"Should never use `.tmp_dir` before calling `.main()`"
)
return self._tmp_dir
def main(self, tests: TestList | None = None) -> NoReturn:
if self.want_add_python_opts:
self._add_python_opts()
self._init()
if self.want_cleanup:
cleanup_temp_dir(self.tmp_dir)
sys.exit(0)
if self.want_wait:
input("Press any key to continue...")
setup_test_dir(self.test_dir)
selected, tests = self.find_tests(tests)
exitcode = 0
if self.want_list_tests:
self.list_tests(selected)
elif self.want_list_cases:
list_cases(selected,
match_tests=self.match_tests,
test_dir=self.test_dir)
else:
exitcode = self.run_tests(selected, tests)
sys.exit(exitcode)
def main(tests=None, _add_python_opts=False, **kwargs) -> NoReturn:
"""Run the Python suite."""
ns = _parse_args(sys.argv[1:], **kwargs)
Regrtest(ns, _add_python_opts=_add_python_opts).main(tests=tests)

View File

@ -0,0 +1,26 @@
# Config file for running mypy on libregrtest.
# Run mypy by invoking `mypy --config-file Lib/test/libregrtest/mypy.ini`
# on the command-line from the repo root
[mypy]
files = Lib/test/libregrtest
explicit_package_bases = True
python_version = 3.12
platform = linux
pretty = True
# Enable most stricter settings
enable_error_code = ignore-without-code
strict = True
# Various stricter settings that we can't yet enable
# Try to enable these in the following order:
disallow_incomplete_defs = False
disallow_untyped_calls = False
disallow_untyped_defs = False
check_untyped_defs = False
warn_return_any = False
# Various internal modules that typeshed deliberately doesn't have stubs for:
[mypy-_abc.*,_opcode.*,_overlapped.*,_testcapi.*,_testinternalcapi.*,test.*]
ignore_missing_imports = True

View File

@ -0,0 +1,55 @@
# Set of tests run by default if --pgo is specified. The tests below were
# chosen based on the following criteria: either they exercise a commonly used
# C extension module or type, or they run some relatively typical Python code.
# Long running tests should be avoided because the PGO instrumented executable
# runs slowly.
PGO_TESTS = [
'test_array',
'test_base64',
'test_binascii',
'test_binop',
'test_bisect',
'test_bytes',
'test_bz2',
'test_cmath',
'test_codecs',
'test_collections',
'test_complex',
'test_dataclasses',
'test_datetime',
'test_decimal',
'test_difflib',
'test_float',
'test_fstring',
'test_functools',
'test_generators',
'test_hashlib',
'test_heapq',
'test_int',
'test_itertools',
'test_json',
'test_long',
'test_lzma',
'test_math',
'test_memoryview',
'test_operator',
'test_ordered_dict',
'test_patma',
'test_pickle',
'test_pprint',
'test_re',
'test_set',
'test_sqlite3',
'test_statistics',
'test_str',
'test_struct',
'test_tabnanny',
'test_time',
'test_xml_etree',
'test_xml_etree_c',
]
def setup_pgo_tests(cmdline_args, pgo_extended: bool) -> None:
if not cmdline_args and not pgo_extended:
# run default set of tests for PGO training
cmdline_args[:] = PGO_TESTS[:]

View File

@ -0,0 +1,285 @@
import os
import sys
import warnings
from inspect import isabstract
from typing import Any
import linecache
from test import support
from test.support import os_helper
from test.support import refleak_helper
from .runtests import HuntRefleak
from .utils import clear_caches
try:
from _abc import _get_dump
except ImportError:
import weakref
def _get_dump(cls):
# Reimplement _get_dump() for pure-Python implementation of
# the abc module (Lib/_py_abc.py)
registry_weakrefs = set(weakref.ref(obj) for obj in cls._abc_registry)
return (registry_weakrefs, cls._abc_cache,
cls._abc_negative_cache, cls._abc_negative_cache_version)
def save_support_xml(filename):
if support.junit_xml_list is None:
return
import pickle
with open(filename, 'xb') as fp:
pickle.dump(support.junit_xml_list, fp)
support.junit_xml_list = None
def restore_support_xml(filename):
try:
fp = open(filename, 'rb')
except FileNotFoundError:
return
import pickle
with fp:
xml_list = pickle.load(fp)
os.unlink(filename)
support.junit_xml_list = xml_list
def runtest_refleak(test_name, test_func,
hunt_refleak: HuntRefleak,
quiet: bool):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg
import collections.abc
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Avoid false positives due to various caches
# filling slowly with random data:
warm_caches()
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
zdc: dict[str, Any] | None
# Linecache holds a cache with the source of interactive code snippets
# (e.g. code typed in the REPL). This cache is not cleared by
# linecache.clearcache(). We need to save and restore it to avoid false
# positives.
linecache_data = linecache.cache.copy(), linecache._interactive_cache.copy() # type: ignore[attr-defined]
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
# private attribute that mypy doesn't know about:
zdc = zipimport._zip_directory_cache.copy() # type: ignore[attr-defined]
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = _get_dump(obj)[0]
# bpo-31217: Integer pool to get a single integer object for the same
# value. The pool is used to prevent false alarm when checking for memory
# block leaks. Fill the pool with values in -1000..1000 which are the most
# common (reference, memory block, file descriptor) differences.
int_pool = {value: value for value in range(-1000, 1000)}
def get_pooled_int(value):
return int_pool.setdefault(value, value)
warmups = hunt_refleak.warmups
runs = hunt_refleak.runs
filename = hunt_refleak.filename
repcount = warmups + runs
# Pre-allocate to ensure that the loop doesn't allocate anything new
rep_range = list(range(repcount))
rc_deltas = [0] * repcount
alloc_deltas = [0] * repcount
fd_deltas = [0] * repcount
getallocatedblocks = sys.getallocatedblocks
gettotalrefcount = sys.gettotalrefcount
getunicodeinternedsize = sys.getunicodeinternedsize
fd_count = os_helper.fd_count
# initialize variables to make pyflakes quiet
rc_before = alloc_before = fd_before = interned_immortal_before = 0
if not quiet:
print("beginning", repcount, "repetitions. Showing number of leaks "
"(. for 0 or less, X for 10 or more)",
file=sys.stderr)
numbers = ("1234567890"*(repcount//10 + 1))[:repcount]
numbers = numbers[:warmups] + ':' + numbers[warmups:]
print(numbers, file=sys.stderr, flush=True)
xml_filename = 'refleak-xml.tmp'
result = None
dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data)
support.gc_collect()
for i in rep_range:
current = refleak_helper._hunting_for_refleaks
refleak_helper._hunting_for_refleaks = True
try:
result = test_func()
finally:
refleak_helper._hunting_for_refleaks = current
save_support_xml(xml_filename)
dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data)
support.gc_collect()
# Read memory statistics immediately after the garbage collection.
# Also, readjust the reference counts and alloc blocks by ignoring
# any strings that might have been interned during test_func. These
# strings will be deallocated at runtime shutdown
interned_immortal_after = getunicodeinternedsize(
# Use an internal-only keyword argument that mypy doesn't know yet
_only_immortal=True) # type: ignore[call-arg]
alloc_after = getallocatedblocks() - interned_immortal_after
rc_after = gettotalrefcount()
fd_after = fd_count()
rc_deltas[i] = get_pooled_int(rc_after - rc_before)
alloc_deltas[i] = get_pooled_int(alloc_after - alloc_before)
fd_deltas[i] = get_pooled_int(fd_after - fd_before)
if not quiet:
# use max, not sum, so total_leaks is one of the pooled ints
total_leaks = max(rc_deltas[i], alloc_deltas[i], fd_deltas[i])
if total_leaks <= 0:
symbol = '.'
elif total_leaks < 10:
symbol = (
'.', '1', '2', '3', '4', '5', '6', '7', '8', '9',
)[total_leaks]
else:
symbol = 'X'
if i == warmups:
print(' ', end='', file=sys.stderr, flush=True)
print(symbol, end='', file=sys.stderr, flush=True)
del total_leaks
del symbol
alloc_before = alloc_after
rc_before = rc_after
fd_before = fd_after
interned_immortal_before = interned_immortal_after
restore_support_xml(xml_filename)
if not quiet:
print(file=sys.stderr)
# These checkers return False on success, True on failure
def check_rc_deltas(deltas):
# Checker for reference counters and memory blocks.
#
# bpo-30776: Try to ignore false positives:
#
# [3, 0, 0]
# [0, 1, 0]
# [8, -8, 1]
#
# Expected leaks:
#
# [5, 5, 6]
# [10, 1, 1]
return all(delta >= 1 for delta in deltas)
def check_fd_deltas(deltas):
return any(deltas)
failed = False
for deltas, item_name, checker in [
(rc_deltas, 'references', check_rc_deltas),
(alloc_deltas, 'memory blocks', check_rc_deltas),
(fd_deltas, 'file descriptors', check_fd_deltas)
]:
# ignore warmup runs
deltas = deltas[warmups:]
failing = checker(deltas)
suspicious = any(deltas)
if failing or suspicious:
msg = '%s leaked %s %s, sum=%s' % (
test_name, deltas, item_name, sum(deltas))
print(msg, end='', file=sys.stderr)
if failing:
print(file=sys.stderr, flush=True)
with open(filename, "a", encoding="utf-8") as refrep:
print(msg, file=refrep)
refrep.flush()
failed = True
else:
print(' (this is fine)', file=sys.stderr, flush=True)
return (failed, result)
def dash_R_cleanup(fs, ps, pic, zdc, abcs, linecache_data):
import copyreg
import collections.abc
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
lcache, linteractive = linecache_data
linecache._interactive_cache.clear()
linecache._interactive_cache.update(linteractive)
linecache.cache.clear()
linecache.cache.update(lcache)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# Clear ABC registries, restoring previously saved ABC registries.
# ignore deprecation warning for collections.abc.ByteString
abs_classes = [getattr(collections.abc, a) for a in collections.abc.__all__]
abs_classes = filter(isabstract, abs_classes)
for abc in abs_classes:
for obj in abc.__subclasses__() + [abc]:
refs = abcs.get(obj, None)
if refs is not None:
obj._abc_registry_clear()
for ref in refs:
subclass = ref()
if subclass is not None:
obj.register(subclass)
obj._abc_caches_clear()
# Clear caches
clear_caches()
# Clear other caches last (previous function calls can re-populate them):
sys._clear_internal_caches()
def warm_caches() -> None:
# char cache
s = bytes(range(256))
for i in range(256):
s[i:i+1]
# unicode cache
[chr(i) for i in range(256)]
# int cache
list(range(-5, 257))

View File

@ -0,0 +1,225 @@
import dataclasses
import json
from typing import Any
from .utils import (
StrJSON, TestName, FilterTuple,
format_duration, normalize_test_name, print_warning)
@dataclasses.dataclass(slots=True)
class TestStats:
tests_run: int = 0
failures: int = 0
skipped: int = 0
@staticmethod
def from_unittest(result):
return TestStats(result.testsRun,
len(result.failures),
len(result.skipped))
@staticmethod
def from_doctest(results):
return TestStats(results.attempted,
results.failed,
results.skipped)
def accumulate(self, stats):
self.tests_run += stats.tests_run
self.failures += stats.failures
self.skipped += stats.skipped
# Avoid enum.Enum to reduce the number of imports when tests are run
class State:
PASSED = "PASSED"
FAILED = "FAILED"
SKIPPED = "SKIPPED"
UNCAUGHT_EXC = "UNCAUGHT_EXC"
REFLEAK = "REFLEAK"
ENV_CHANGED = "ENV_CHANGED"
RESOURCE_DENIED = "RESOURCE_DENIED"
INTERRUPTED = "INTERRUPTED"
WORKER_FAILED = "WORKER_FAILED" # non-zero worker process exit code
WORKER_BUG = "WORKER_BUG" # exception when running a worker
DID_NOT_RUN = "DID_NOT_RUN"
TIMEOUT = "TIMEOUT"
@staticmethod
def is_failed(state):
return state in {
State.FAILED,
State.UNCAUGHT_EXC,
State.REFLEAK,
State.WORKER_FAILED,
State.WORKER_BUG,
State.TIMEOUT}
@staticmethod
def has_meaningful_duration(state):
# Consider that the duration is meaningless for these cases.
# For example, if a whole test file is skipped, its duration
# is unlikely to be the duration of executing its tests,
# but just the duration to execute code which skips the test.
return state not in {
State.SKIPPED,
State.RESOURCE_DENIED,
State.INTERRUPTED,
State.WORKER_FAILED,
State.WORKER_BUG,
State.DID_NOT_RUN}
@staticmethod
def must_stop(state):
return state in {
State.INTERRUPTED,
State.WORKER_BUG,
}
FileName = str
LineNo = int
Location = tuple[FileName, LineNo]
@dataclasses.dataclass(slots=True)
class TestResult:
test_name: TestName
state: str | None = None
# Test duration in seconds
duration: float | None = None
xml_data: list[str] | None = None
stats: TestStats | None = None
# errors and failures copied from support.TestFailedWithDetails
errors: list[tuple[str, str]] | None = None
failures: list[tuple[str, str]] | None = None
# partial coverage in a worker run; not used by sequential in-process runs
covered_lines: list[Location] | None = None
def is_failed(self, fail_env_changed: bool) -> bool:
if self.state == State.ENV_CHANGED:
return fail_env_changed
return State.is_failed(self.state)
def _format_failed(self):
if self.errors and self.failures:
le = len(self.errors)
lf = len(self.failures)
error_s = "error" + ("s" if le > 1 else "")
failure_s = "failure" + ("s" if lf > 1 else "")
return f"{self.test_name} failed ({le} {error_s}, {lf} {failure_s})"
if self.errors:
le = len(self.errors)
error_s = "error" + ("s" if le > 1 else "")
return f"{self.test_name} failed ({le} {error_s})"
if self.failures:
lf = len(self.failures)
failure_s = "failure" + ("s" if lf > 1 else "")
return f"{self.test_name} failed ({lf} {failure_s})"
return f"{self.test_name} failed"
def __str__(self) -> str:
match self.state:
case State.PASSED:
return f"{self.test_name} passed"
case State.FAILED:
return self._format_failed()
case State.SKIPPED:
return f"{self.test_name} skipped"
case State.UNCAUGHT_EXC:
return f"{self.test_name} failed (uncaught exception)"
case State.REFLEAK:
return f"{self.test_name} failed (reference leak)"
case State.ENV_CHANGED:
return f"{self.test_name} failed (env changed)"
case State.RESOURCE_DENIED:
return f"{self.test_name} skipped (resource denied)"
case State.INTERRUPTED:
return f"{self.test_name} interrupted"
case State.WORKER_FAILED:
return f"{self.test_name} worker non-zero exit code"
case State.WORKER_BUG:
return f"{self.test_name} worker bug"
case State.DID_NOT_RUN:
return f"{self.test_name} ran no tests"
case State.TIMEOUT:
assert self.duration is not None, "self.duration is None"
return f"{self.test_name} timed out ({format_duration(self.duration)})"
case _:
raise ValueError("unknown result state: {state!r}")
def has_meaningful_duration(self):
return State.has_meaningful_duration(self.state)
def set_env_changed(self):
if self.state is None or self.state == State.PASSED:
self.state = State.ENV_CHANGED
def must_stop(self, fail_fast: bool, fail_env_changed: bool) -> bool:
if State.must_stop(self.state):
return True
if fail_fast and self.is_failed(fail_env_changed):
return True
return False
def get_rerun_match_tests(self) -> FilterTuple | None:
match_tests = []
errors = self.errors or []
failures = self.failures or []
for error_list, is_error in (
(errors, True),
(failures, False),
):
for full_name, *_ in error_list:
match_name = normalize_test_name(full_name, is_error=is_error)
if match_name is None:
# 'setUpModule (test.test_sys)': don't filter tests
return None
if not match_name:
error_type = "ERROR" if is_error else "FAIL"
print_warning(f"rerun failed to parse {error_type} test name: "
f"{full_name!r}: don't filter tests")
return None
match_tests.append(match_name)
if not match_tests:
return None
return tuple(match_tests)
def write_json_into(self, file) -> None:
json.dump(self, file, cls=_EncodeTestResult)
@staticmethod
def from_json(worker_json: StrJSON) -> 'TestResult':
return json.loads(worker_json, object_hook=_decode_test_result)
class _EncodeTestResult(json.JSONEncoder):
def default(self, o: Any) -> dict[str, Any]:
if isinstance(o, TestResult):
result = dataclasses.asdict(o)
result["__test_result__"] = o.__class__.__name__
return result
else:
return super().default(o)
def _decode_test_result(data: dict[str, Any]) -> TestResult | dict[str, Any]:
if "__test_result__" in data:
data.pop('__test_result__')
if data['stats'] is not None:
data['stats'] = TestStats(**data['stats'])
if data['covered_lines'] is not None:
data['covered_lines'] = [
tuple(loc) for loc in data['covered_lines']
]
return TestResult(**data)
else:
return data

View File

@ -0,0 +1,276 @@
import sys
import trace
from typing import TYPE_CHECKING
from .runtests import RunTests
from .result import State, TestResult, TestStats, Location
from .utils import (
StrPath, TestName, TestTuple, TestList, FilterDict,
printlist, count, format_duration)
if TYPE_CHECKING:
from xml.etree.ElementTree import Element
# Python uses exit code 1 when an exception is not caught
# argparse.ArgumentParser.error() uses exit code 2
EXITCODE_BAD_TEST = 2
EXITCODE_ENV_CHANGED = 3
EXITCODE_NO_TESTS_RAN = 4
EXITCODE_RERUN_FAIL = 5
EXITCODE_INTERRUPTED = 130 # 128 + signal.SIGINT=2
class TestResults:
def __init__(self) -> None:
self.bad: TestList = []
self.good: TestList = []
self.rerun_bad: TestList = []
self.skipped: TestList = []
self.resource_denied: TestList = []
self.env_changed: TestList = []
self.run_no_tests: TestList = []
self.rerun: TestList = []
self.rerun_results: list[TestResult] = []
self.interrupted: bool = False
self.worker_bug: bool = False
self.test_times: list[tuple[float, TestName]] = []
self.stats = TestStats()
# used by --junit-xml
self.testsuite_xml: list['Element'] = []
# used by -T with -j
self.covered_lines: set[Location] = set()
def is_all_good(self) -> bool:
return (not self.bad
and not self.skipped
and not self.interrupted
and not self.worker_bug)
def get_executed(self) -> set[TestName]:
return (set(self.good) | set(self.bad) | set(self.skipped)
| set(self.resource_denied) | set(self.env_changed)
| set(self.run_no_tests))
def no_tests_run(self) -> bool:
return not any((self.good, self.bad, self.skipped, self.interrupted,
self.env_changed))
def get_state(self, fail_env_changed: bool) -> str:
state = []
if self.bad:
state.append("FAILURE")
elif fail_env_changed and self.env_changed:
state.append("ENV CHANGED")
elif self.no_tests_run():
state.append("NO TESTS RAN")
if self.interrupted:
state.append("INTERRUPTED")
if self.worker_bug:
state.append("WORKER BUG")
if not state:
state.append("SUCCESS")
return ', '.join(state)
def get_exitcode(self, fail_env_changed: bool, fail_rerun: bool) -> int:
exitcode = 0
if self.bad:
exitcode = EXITCODE_BAD_TEST
elif self.interrupted:
exitcode = EXITCODE_INTERRUPTED
elif fail_env_changed and self.env_changed:
exitcode = EXITCODE_ENV_CHANGED
elif self.no_tests_run():
exitcode = EXITCODE_NO_TESTS_RAN
elif fail_rerun and self.rerun:
exitcode = EXITCODE_RERUN_FAIL
elif self.worker_bug:
exitcode = EXITCODE_BAD_TEST
return exitcode
def accumulate_result(self, result: TestResult, runtests: RunTests) -> None:
test_name = result.test_name
rerun = runtests.rerun
fail_env_changed = runtests.fail_env_changed
match result.state:
case State.PASSED:
self.good.append(test_name)
case State.ENV_CHANGED:
self.env_changed.append(test_name)
self.rerun_results.append(result)
case State.SKIPPED:
self.skipped.append(test_name)
case State.RESOURCE_DENIED:
self.resource_denied.append(test_name)
case State.INTERRUPTED:
self.interrupted = True
case State.DID_NOT_RUN:
self.run_no_tests.append(test_name)
case _:
if result.is_failed(fail_env_changed):
self.bad.append(test_name)
self.rerun_results.append(result)
else:
raise ValueError(f"invalid test state: {result.state!r}")
if result.state == State.WORKER_BUG:
self.worker_bug = True
if result.has_meaningful_duration() and not rerun:
if result.duration is None:
raise ValueError("result.duration is None")
self.test_times.append((result.duration, test_name))
if result.stats is not None:
self.stats.accumulate(result.stats)
if rerun:
self.rerun.append(test_name)
if result.covered_lines:
# we don't care about trace counts so we don't have to sum them up
self.covered_lines.update(result.covered_lines)
xml_data = result.xml_data
if xml_data:
self.add_junit(xml_data)
def get_coverage_results(self) -> trace.CoverageResults:
counts = {loc: 1 for loc in self.covered_lines}
return trace.CoverageResults(counts=counts)
def need_rerun(self) -> bool:
return bool(self.rerun_results)
def prepare_rerun(self, *, clear: bool = True) -> tuple[TestTuple, FilterDict]:
tests: TestList = []
match_tests_dict = {}
for result in self.rerun_results:
tests.append(result.test_name)
match_tests = result.get_rerun_match_tests()
# ignore empty match list
if match_tests:
match_tests_dict[result.test_name] = match_tests
if clear:
# Clear previously failed tests
self.rerun_bad.extend(self.bad)
self.bad.clear()
self.env_changed.clear()
self.rerun_results.clear()
return (tuple(tests), match_tests_dict)
def add_junit(self, xml_data: list[str]) -> None:
import xml.etree.ElementTree as ET
for e in xml_data:
try:
self.testsuite_xml.append(ET.fromstring(e))
except ET.ParseError:
print(xml_data, file=sys.__stderr__)
raise
def write_junit(self, filename: StrPath) -> None:
if not self.testsuite_xml:
# Don't create empty XML file
return
import xml.etree.ElementTree as ET
root = ET.Element("testsuites")
# Manually count the totals for the overall summary
totals = {'tests': 0, 'errors': 0, 'failures': 0}
for suite in self.testsuite_xml:
root.append(suite)
for k in totals:
try:
totals[k] += int(suite.get(k, 0))
except ValueError:
pass
for k, v in totals.items():
root.set(k, str(v))
with open(filename, 'wb') as f:
for s in ET.tostringlist(root):
f.write(s)
def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool) -> None:
if print_slowest:
self.test_times.sort(reverse=True)
print()
print("10 slowest tests:")
for test_time, test in self.test_times[:10]:
print("- %s: %s" % (test, format_duration(test_time)))
all_tests = []
omitted = set(tests) - self.get_executed()
# less important
all_tests.append((sorted(omitted), "test", "{} omitted:"))
if not quiet:
all_tests.append((self.skipped, "test", "{} skipped:"))
all_tests.append((self.resource_denied, "test", "{} skipped (resource denied):"))
all_tests.append((self.run_no_tests, "test", "{} run no tests:"))
# more important
all_tests.append((self.env_changed, "test", "{} altered the execution environment (env changed):"))
all_tests.append((self.rerun, "re-run test", "{}:"))
all_tests.append((self.bad, "test", "{} failed:"))
for tests_list, count_text, title_format in all_tests:
if tests_list:
print()
count_text = count(len(tests_list), count_text)
print(title_format.format(count_text))
printlist(tests_list)
if self.good and not quiet:
print()
text = count(len(self.good), "test")
text = f"{text} OK."
if (self.is_all_good() and len(self.good) > 1):
text = f"All {text}"
print(text)
if self.interrupted:
print()
print("Test suite interrupted by signal SIGINT.")
def display_summary(self, first_runtests: RunTests, filtered: bool) -> None:
# Total tests
stats = self.stats
text = f'run={stats.tests_run:,}'
if filtered:
text = f"{text} (filtered)"
report = [text]
if stats.failures:
report.append(f'failures={stats.failures:,}')
if stats.skipped:
report.append(f'skipped={stats.skipped:,}')
print(f"Total tests: {' '.join(report)}")
# Total test files
all_tests = [self.good, self.bad, self.rerun,
self.skipped,
self.env_changed, self.run_no_tests]
run = sum(map(len, all_tests))
text = f'run={run}'
if not first_runtests.forever:
ntest = len(first_runtests.tests)
text = f"{text}/{ntest}"
if filtered:
text = f"{text} (filtered)"
report = [text]
for name, tests in (
('failed', self.bad),
('env_changed', self.env_changed),
('skipped', self.skipped),
('resource_denied', self.resource_denied),
('rerun', self.rerun),
('run_no_tests', self.run_no_tests),
):
if tests:
report.append(f'{name}={len(tests)}')
print(f"Total test files: {' '.join(report)}")

View File

@ -0,0 +1,621 @@
import contextlib
import dataclasses
import faulthandler
import os.path
import queue
import signal
import subprocess
import sys
import tempfile
import threading
import time
import traceback
from typing import Any, Literal, TextIO
from test import support
from test.support import os_helper, MS_WINDOWS
from .logger import Logger
from .result import TestResult, State
from .results import TestResults
from .runtests import RunTests, WorkerRunTests, JsonFile, JsonFileType
from .single import PROGRESS_MIN_TIME
from .utils import (
StrPath, TestName,
format_duration, print_warning, count, plural, get_signal_name)
from .worker import create_worker_process, USE_PROCESS_GROUP
if MS_WINDOWS:
import locale
import msvcrt
# Display the running tests if nothing happened last N seconds
PROGRESS_UPDATE = 30.0 # seconds
assert PROGRESS_UPDATE >= PROGRESS_MIN_TIME
# Kill the main process after 5 minutes. It is supposed to write an update
# every PROGRESS_UPDATE seconds. Tolerate 5 minutes for Python slowest
# buildbot workers.
MAIN_PROCESS_TIMEOUT = 5 * 60.0
assert MAIN_PROCESS_TIMEOUT >= PROGRESS_UPDATE
# Time to wait until a worker completes: should be immediate
WAIT_COMPLETED_TIMEOUT = 30.0 # seconds
# Time to wait a killed process (in seconds)
WAIT_KILLED_TIMEOUT = 60.0
# We do not use a generator so multiple threads can call next().
class MultiprocessIterator:
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests_iter):
self.lock = threading.Lock()
self.tests_iter = tests_iter
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.tests_iter is None:
raise StopIteration
return next(self.tests_iter)
def stop(self):
with self.lock:
self.tests_iter = None
@dataclasses.dataclass(slots=True, frozen=True)
class MultiprocessResult:
result: TestResult
# bpo-45410: stderr is written into stdout to keep messages order
worker_stdout: str | None = None
err_msg: str | None = None
class WorkerThreadExited:
"""Indicates that a worker thread has exited"""
ExcStr = str
QueueOutput = tuple[Literal[False], MultiprocessResult] | tuple[Literal[True], ExcStr]
QueueContent = QueueOutput | WorkerThreadExited
class ExitThread(Exception):
pass
class WorkerError(Exception):
def __init__(self,
test_name: TestName,
err_msg: str | None,
stdout: str | None,
state: str):
result = TestResult(test_name, state=state)
self.mp_result = MultiprocessResult(result, stdout, err_msg)
super().__init__()
_NOT_RUNNING = "<not running>"
class WorkerThread(threading.Thread):
def __init__(self, worker_id: int, runner: "RunWorkers") -> None:
super().__init__()
self.worker_id = worker_id
self.runtests = runner.runtests
self.pending = runner.pending
self.output = runner.output
self.timeout = runner.worker_timeout
self.log = runner.log
self.test_name = _NOT_RUNNING
self.start_time = time.monotonic()
self._popen: subprocess.Popen[str] | None = None
self._killed = False
self._stopped = False
def __repr__(self) -> str:
info = [f'WorkerThread #{self.worker_id}']
if self.is_alive():
info.append("running")
else:
info.append('stopped')
test = self.test_name
if test:
info.append(f'test={test}')
popen = self._popen
if popen is not None:
dt = time.monotonic() - self.start_time
info.extend((f'pid={popen.pid}',
f'time={format_duration(dt)}'))
return '<%s>' % ' '.join(info)
def _kill(self) -> None:
popen = self._popen
if popen is None:
return
if self._killed:
return
self._killed = True
if USE_PROCESS_GROUP:
what = f"{self} process group"
else:
what = f"{self} process"
print(f"Kill {what}", file=sys.stderr, flush=True)
try:
if USE_PROCESS_GROUP:
os.killpg(popen.pid, signal.SIGKILL)
else:
popen.kill()
except ProcessLookupError:
# popen.kill(): the process completed, the WorkerThread thread
# read its exit status, but Popen.send_signal() read the returncode
# just before Popen.wait() set returncode.
pass
except OSError as exc:
print_warning(f"Failed to kill {what}: {exc!r}")
def stop(self) -> None:
# Method called from a different thread to stop this thread
self._stopped = True
self._kill()
def _run_process(self, runtests: WorkerRunTests, output_fd: int,
tmp_dir: StrPath | None = None) -> int | None:
popen = create_worker_process(runtests, output_fd, tmp_dir)
self._popen = popen
self._killed = False
try:
if self._stopped:
# If kill() has been called before self._popen is set,
# self._popen is still running. Call again kill()
# to ensure that the process is killed.
self._kill()
raise ExitThread
try:
# gh-94026: stdout+stderr are written to tempfile
retcode = popen.wait(timeout=self.timeout)
assert retcode is not None
return retcode
except subprocess.TimeoutExpired:
if self._stopped:
# kill() has been called: communicate() fails on reading
# closed stdout
raise ExitThread
# On timeout, kill the process
self._kill()
# None means TIMEOUT for the caller
retcode = None
# bpo-38207: Don't attempt to call communicate() again: on it
# can hang until all child processes using stdout
# pipes completes.
except OSError:
if self._stopped:
# kill() has been called: communicate() fails
# on reading closed stdout
raise ExitThread
raise
return None
except:
self._kill()
raise
finally:
self._wait_completed()
self._popen = None
def create_stdout(self, stack: contextlib.ExitStack) -> TextIO:
"""Create stdout temporary file (file descriptor)."""
if MS_WINDOWS:
# gh-95027: When stdout is not a TTY, Python uses the ANSI code
# page for the sys.stdout encoding. If the main process runs in a
# terminal, sys.stdout uses WindowsConsoleIO with UTF-8 encoding.
encoding = locale.getencoding()
else:
encoding = sys.stdout.encoding
# gh-94026: Write stdout+stderr to a tempfile as workaround for
# non-blocking pipes on Emscripten with NodeJS.
# gh-109425: Use "backslashreplace" error handler: log corrupted
# stdout+stderr, instead of failing with a UnicodeDecodeError and not
# logging stdout+stderr at all.
stdout_file = tempfile.TemporaryFile('w+',
encoding=encoding,
errors='backslashreplace')
stack.enter_context(stdout_file)
return stdout_file
def create_json_file(self, stack: contextlib.ExitStack) -> tuple[JsonFile, TextIO | None]:
"""Create JSON file."""
json_file_use_stdout = self.runtests.json_file_use_stdout()
if json_file_use_stdout:
json_file = JsonFile(None, JsonFileType.STDOUT)
json_tmpfile = None
else:
json_tmpfile = tempfile.TemporaryFile('w+', encoding='utf8')
stack.enter_context(json_tmpfile)
json_fd = json_tmpfile.fileno()
if MS_WINDOWS:
# The msvcrt module is only available on Windows;
# we run mypy with `--platform=linux` in CI
json_handle: int = msvcrt.get_osfhandle(json_fd) # type: ignore[attr-defined]
json_file = JsonFile(json_handle,
JsonFileType.WINDOWS_HANDLE)
else:
json_file = JsonFile(json_fd, JsonFileType.UNIX_FD)
return (json_file, json_tmpfile)
def create_worker_runtests(self, test_name: TestName, json_file: JsonFile) -> WorkerRunTests:
tests = (test_name,)
if self.runtests.rerun:
match_tests = self.runtests.get_match_tests(test_name)
else:
match_tests = None
kwargs: dict[str, Any] = {}
if match_tests:
kwargs['match_tests'] = [(test, True) for test in match_tests]
if self.runtests.output_on_failure:
kwargs['verbose'] = True
kwargs['output_on_failure'] = False
return self.runtests.create_worker_runtests(
tests=tests,
json_file=json_file,
**kwargs)
def run_tmp_files(self, worker_runtests: WorkerRunTests,
stdout_fd: int) -> tuple[int | None, list[StrPath]]:
# gh-93353: Check for leaked temporary files in the parent process,
# since the deletion of temporary files can happen late during
# Python finalization: too late for libregrtest.
if not support.is_wasi:
# Don't check for leaked temporary files and directories if Python is
# run on WASI. WASI doesn't pass environment variables like TMPDIR to
# worker processes.
tmp_dir = tempfile.mkdtemp(prefix="test_python_")
tmp_dir = os.path.abspath(tmp_dir)
try:
retcode = self._run_process(worker_runtests,
stdout_fd, tmp_dir)
finally:
tmp_files = os.listdir(tmp_dir)
os_helper.rmtree(tmp_dir)
else:
retcode = self._run_process(worker_runtests, stdout_fd)
tmp_files = []
return (retcode, tmp_files)
def read_stdout(self, stdout_file: TextIO) -> str:
stdout_file.seek(0)
try:
return stdout_file.read().strip()
except Exception as exc:
# gh-101634: Catch UnicodeDecodeError if stdout cannot be
# decoded from encoding
raise WorkerError(self.test_name,
f"Cannot read process stdout: {exc}",
stdout=None,
state=State.WORKER_BUG)
def read_json(self, json_file: JsonFile, json_tmpfile: TextIO | None,
stdout: str) -> tuple[TestResult, str]:
try:
if json_tmpfile is not None:
json_tmpfile.seek(0)
worker_json = json_tmpfile.read()
elif json_file.file_type == JsonFileType.STDOUT:
stdout, _, worker_json = stdout.rpartition("\n")
stdout = stdout.rstrip()
else:
with json_file.open(encoding='utf8') as json_fp:
worker_json = json_fp.read()
except Exception as exc:
# gh-101634: Catch UnicodeDecodeError if stdout cannot be
# decoded from encoding
err_msg = f"Failed to read worker process JSON: {exc}"
raise WorkerError(self.test_name, err_msg, stdout,
state=State.WORKER_BUG)
if not worker_json:
raise WorkerError(self.test_name, "empty JSON", stdout,
state=State.WORKER_BUG)
try:
result = TestResult.from_json(worker_json)
except Exception as exc:
# gh-101634: Catch UnicodeDecodeError if stdout cannot be
# decoded from encoding
err_msg = f"Failed to parse worker process JSON: {exc}"
raise WorkerError(self.test_name, err_msg, stdout,
state=State.WORKER_BUG)
return (result, stdout)
def _runtest(self, test_name: TestName) -> MultiprocessResult:
with contextlib.ExitStack() as stack:
stdout_file = self.create_stdout(stack)
json_file, json_tmpfile = self.create_json_file(stack)
worker_runtests = self.create_worker_runtests(test_name, json_file)
retcode: str | int | None
retcode, tmp_files = self.run_tmp_files(worker_runtests,
stdout_file.fileno())
stdout = self.read_stdout(stdout_file)
if retcode is None:
raise WorkerError(self.test_name, stdout=stdout,
err_msg=None,
state=State.TIMEOUT)
if retcode != 0:
name = get_signal_name(retcode)
if name:
retcode = f"{retcode} ({name})"
raise WorkerError(self.test_name, f"Exit code {retcode}", stdout,
state=State.WORKER_FAILED)
result, stdout = self.read_json(json_file, json_tmpfile, stdout)
if tmp_files:
msg = (f'\n\n'
f'Warning -- {test_name} leaked temporary files '
f'({len(tmp_files)}): {", ".join(sorted(tmp_files))}')
stdout += msg
result.set_env_changed()
return MultiprocessResult(result, stdout)
def run(self) -> None:
fail_fast = self.runtests.fail_fast
fail_env_changed = self.runtests.fail_env_changed
try:
while not self._stopped:
try:
test_name = next(self.pending)
except StopIteration:
break
self.start_time = time.monotonic()
self.test_name = test_name
try:
mp_result = self._runtest(test_name)
except WorkerError as exc:
mp_result = exc.mp_result
finally:
self.test_name = _NOT_RUNNING
mp_result.result.duration = time.monotonic() - self.start_time
self.output.put((False, mp_result))
if mp_result.result.must_stop(fail_fast, fail_env_changed):
break
except ExitThread:
pass
except BaseException:
self.output.put((True, traceback.format_exc()))
finally:
self.output.put(WorkerThreadExited())
def _wait_completed(self) -> None:
popen = self._popen
# only needed for mypy:
if popen is None:
raise ValueError("Should never access `._popen` before calling `.run()`")
try:
popen.wait(WAIT_COMPLETED_TIMEOUT)
except (subprocess.TimeoutExpired, OSError) as exc:
print_warning(f"Failed to wait for {self} completion "
f"(timeout={format_duration(WAIT_COMPLETED_TIMEOUT)}): "
f"{exc!r}")
def wait_stopped(self, start_time: float) -> None:
# bpo-38207: RunWorkers.stop_workers() called self.stop()
# which killed the process. Sometimes, killing the process from the
# main thread does not interrupt popen.communicate() in
# WorkerThread thread. This loop with a timeout is a workaround
# for that.
#
# Moreover, if this method fails to join the thread, it is likely
# that Python will hang at exit while calling threading._shutdown()
# which tries again to join the blocked thread. Regrtest.main()
# uses EXIT_TIMEOUT to workaround this second bug.
while True:
# Write a message every second
self.join(1.0)
if not self.is_alive():
break
dt = time.monotonic() - start_time
self.log(f"Waiting for {self} thread for {format_duration(dt)}")
if dt > WAIT_KILLED_TIMEOUT:
print_warning(f"Failed to join {self} in {format_duration(dt)}")
break
def get_running(workers: list[WorkerThread]) -> str | None:
running: list[str] = []
for worker in workers:
test_name = worker.test_name
if test_name == _NOT_RUNNING:
continue
dt = time.monotonic() - worker.start_time
if dt >= PROGRESS_MIN_TIME:
text = f'{test_name} ({format_duration(dt)})'
running.append(text)
if not running:
return None
return f"running ({len(running)}): {', '.join(running)}"
class RunWorkers:
def __init__(self, num_workers: int, runtests: RunTests,
logger: Logger, results: TestResults) -> None:
self.num_workers = num_workers
self.runtests = runtests
self.log = logger.log
self.display_progress = logger.display_progress
self.results: TestResults = results
self.live_worker_count = 0
self.output: queue.Queue[QueueContent] = queue.Queue()
tests_iter = runtests.iter_tests()
self.pending = MultiprocessIterator(tests_iter)
self.timeout = runtests.timeout
if self.timeout is not None:
# Rely on faulthandler to kill a worker process. This timouet is
# when faulthandler fails to kill a worker process. Give a maximum
# of 5 minutes to faulthandler to kill the worker.
self.worker_timeout: float | None = min(self.timeout * 1.5, self.timeout + 5 * 60)
else:
self.worker_timeout = None
self.workers: list[WorkerThread] = []
jobs = self.runtests.get_jobs()
if jobs is not None:
# Don't spawn more threads than the number of jobs:
# these worker threads would never get anything to do.
self.num_workers = min(self.num_workers, jobs)
def start_workers(self) -> None:
self.workers = [WorkerThread(index, self)
for index in range(1, self.num_workers + 1)]
jobs = self.runtests.get_jobs()
if jobs is not None:
tests = count(jobs, 'test')
else:
tests = 'tests'
nworkers = len(self.workers)
processes = plural(nworkers, "process", "processes")
msg = (f"Run {tests} in parallel using "
f"{nworkers} worker {processes}")
if self.timeout and self.worker_timeout is not None:
msg += (" (timeout: %s, worker timeout: %s)"
% (format_duration(self.timeout),
format_duration(self.worker_timeout)))
self.log(msg)
for worker in self.workers:
worker.start()
self.live_worker_count += 1
def stop_workers(self) -> None:
start_time = time.monotonic()
for worker in self.workers:
worker.stop()
for worker in self.workers:
worker.wait_stopped(start_time)
def _get_result(self) -> QueueOutput | None:
pgo = self.runtests.pgo
use_faulthandler = (self.timeout is not None)
# bpo-46205: check the status of workers every iteration to avoid
# waiting forever on an empty queue.
while self.live_worker_count > 0:
if use_faulthandler:
faulthandler.dump_traceback_later(MAIN_PROCESS_TIMEOUT,
exit=True)
# wait for a thread
try:
result = self.output.get(timeout=PROGRESS_UPDATE)
if isinstance(result, WorkerThreadExited):
self.live_worker_count -= 1
continue
return result
except queue.Empty:
pass
if not pgo:
# display progress
running = get_running(self.workers)
if running:
self.log(running)
return None
def display_result(self, mp_result: MultiprocessResult) -> None:
result = mp_result.result
pgo = self.runtests.pgo
text = str(result)
if mp_result.err_msg:
# WORKER_BUG
text += ' (%s)' % mp_result.err_msg
elif (result.duration and result.duration >= PROGRESS_MIN_TIME and not pgo):
text += ' (%s)' % format_duration(result.duration)
if not pgo:
running = get_running(self.workers)
if running:
text += f' -- {running}'
self.display_progress(self.test_index, text)
def _process_result(self, item: QueueOutput) -> TestResult:
"""Returns True if test runner must stop."""
if item[0]:
# Thread got an exception
format_exc = item[1]
print_warning(f"regrtest worker thread failed: {format_exc}")
result = TestResult("<regrtest worker>", state=State.WORKER_BUG)
self.results.accumulate_result(result, self.runtests)
return result
self.test_index += 1
mp_result = item[1]
result = mp_result.result
self.results.accumulate_result(result, self.runtests)
self.display_result(mp_result)
# Display worker stdout
if not self.runtests.output_on_failure:
show_stdout = True
else:
# --verbose3 ignores stdout on success
show_stdout = (result.state != State.PASSED)
if show_stdout:
stdout = mp_result.worker_stdout
if stdout:
print(stdout, flush=True)
return result
def run(self) -> None:
fail_fast = self.runtests.fail_fast
fail_env_changed = self.runtests.fail_env_changed
self.start_workers()
self.test_index = 0
try:
while True:
item = self._get_result()
if item is None:
break
result = self._process_result(item)
if result.must_stop(fail_fast, fail_env_changed):
break
except KeyboardInterrupt:
print()
self.results.interrupted = True
finally:
if self.timeout is not None:
faulthandler.cancel_dump_traceback_later()
# Always ensure that all worker processes are no longer
# worker when we exit this function
self.pending.stop()
self.stop_workers()

View File

@ -0,0 +1,222 @@
import contextlib
import dataclasses
import json
import os
import shlex
import subprocess
import sys
from typing import Any, Iterator
from test import support
from .utils import (
StrPath, StrJSON, TestTuple, TestName, TestFilter, FilterTuple, FilterDict)
class JsonFileType:
UNIX_FD = "UNIX_FD"
WINDOWS_HANDLE = "WINDOWS_HANDLE"
STDOUT = "STDOUT"
@dataclasses.dataclass(slots=True, frozen=True)
class JsonFile:
# file type depends on file_type:
# - UNIX_FD: file descriptor (int)
# - WINDOWS_HANDLE: handle (int)
# - STDOUT: use process stdout (None)
file: int | None
file_type: str
def configure_subprocess(self, popen_kwargs: dict[str, Any]) -> None:
match self.file_type:
case JsonFileType.UNIX_FD:
# Unix file descriptor
popen_kwargs['pass_fds'] = [self.file]
case JsonFileType.WINDOWS_HANDLE:
# Windows handle
# We run mypy with `--platform=linux` so it complains about this:
startupinfo = subprocess.STARTUPINFO() # type: ignore[attr-defined]
startupinfo.lpAttributeList = {"handle_list": [self.file]}
popen_kwargs['startupinfo'] = startupinfo
@contextlib.contextmanager
def inherit_subprocess(self) -> Iterator[None]:
if sys.platform == 'win32' and self.file_type == JsonFileType.WINDOWS_HANDLE:
os.set_handle_inheritable(self.file, True)
try:
yield
finally:
os.set_handle_inheritable(self.file, False)
else:
yield
def open(self, mode='r', *, encoding):
if self.file_type == JsonFileType.STDOUT:
raise ValueError("for STDOUT file type, just use sys.stdout")
file = self.file
if self.file_type == JsonFileType.WINDOWS_HANDLE:
import msvcrt
# Create a file descriptor from the handle
file = msvcrt.open_osfhandle(file, os.O_WRONLY)
return open(file, mode, encoding=encoding)
@dataclasses.dataclass(slots=True, frozen=True)
class HuntRefleak:
warmups: int
runs: int
filename: StrPath
def bisect_cmd_args(self) -> list[str]:
# Ignore filename since it can contain colon (":"),
# and usually it's not used. Use the default filename.
return ["-R", f"{self.warmups}:{self.runs}:"]
@dataclasses.dataclass(slots=True, frozen=True)
class RunTests:
tests: TestTuple
fail_fast: bool
fail_env_changed: bool
match_tests: TestFilter
match_tests_dict: FilterDict | None
rerun: bool
forever: bool
pgo: bool
pgo_extended: bool
output_on_failure: bool
timeout: float | None
verbose: int
quiet: bool
hunt_refleak: HuntRefleak | None
test_dir: StrPath | None
use_junit: bool
coverage: bool
memory_limit: str | None
gc_threshold: int | None
use_resources: tuple[str, ...]
python_cmd: tuple[str, ...] | None
randomize: bool
random_seed: int | str
def copy(self, **override) -> 'RunTests':
state = dataclasses.asdict(self)
state.update(override)
return RunTests(**state)
def create_worker_runtests(self, **override) -> 'WorkerRunTests':
state = dataclasses.asdict(self)
state.update(override)
return WorkerRunTests(**state)
def get_match_tests(self, test_name: TestName) -> FilterTuple | None:
if self.match_tests_dict is not None:
return self.match_tests_dict.get(test_name, None)
else:
return None
def get_jobs(self) -> int | None:
# Number of run_single_test() calls needed to run all tests.
# None means that there is not bound limit (--forever option).
if self.forever:
return None
return len(self.tests)
def iter_tests(self) -> Iterator[TestName]:
if self.forever:
while True:
yield from self.tests
else:
yield from self.tests
def json_file_use_stdout(self) -> bool:
# Use STDOUT in two cases:
#
# - If --python command line option is used;
# - On Emscripten and WASI.
#
# On other platforms, UNIX_FD or WINDOWS_HANDLE can be used.
return (
bool(self.python_cmd)
or support.is_emscripten
or support.is_wasi
)
def create_python_cmd(self) -> list[str]:
python_opts = support.args_from_interpreter_flags()
if self.python_cmd is not None:
executable = self.python_cmd
# Remove -E option, since --python=COMMAND can set PYTHON
# environment variables, such as PYTHONPATH, in the worker
# process.
python_opts = [opt for opt in python_opts if opt != "-E"]
else:
executable = (sys.executable,)
cmd = [*executable, *python_opts]
if '-u' not in python_opts:
cmd.append('-u') # Unbuffered stdout and stderr
if self.coverage:
cmd.append("-Xpresite=test.cov")
return cmd
def bisect_cmd_args(self) -> list[str]:
args = []
if self.fail_fast:
args.append("--failfast")
if self.fail_env_changed:
args.append("--fail-env-changed")
if self.timeout:
args.append(f"--timeout={self.timeout}")
if self.hunt_refleak is not None:
args.extend(self.hunt_refleak.bisect_cmd_args())
if self.test_dir:
args.extend(("--testdir", self.test_dir))
if self.memory_limit:
args.extend(("--memlimit", self.memory_limit))
if self.gc_threshold:
args.append(f"--threshold={self.gc_threshold}")
if self.use_resources:
args.extend(("-u", ','.join(self.use_resources)))
if self.python_cmd:
cmd = shlex.join(self.python_cmd)
args.extend(("--python", cmd))
if self.randomize:
args.append(f"--randomize")
args.append(f"--randseed={self.random_seed}")
return args
@dataclasses.dataclass(slots=True, frozen=True)
class WorkerRunTests(RunTests):
json_file: JsonFile
def as_json(self) -> StrJSON:
return json.dumps(self, cls=_EncodeRunTests)
@staticmethod
def from_json(worker_json: StrJSON) -> 'WorkerRunTests':
return json.loads(worker_json, object_hook=_decode_runtests)
class _EncodeRunTests(json.JSONEncoder):
def default(self, o: Any) -> dict[str, Any]:
if isinstance(o, WorkerRunTests):
result = dataclasses.asdict(o)
result["__runtests__"] = True
return result
else:
return super().default(o)
def _decode_runtests(data: dict[str, Any]) -> RunTests | dict[str, Any]:
if "__runtests__" in data:
data.pop('__runtests__')
if data['hunt_refleak']:
data['hunt_refleak'] = HuntRefleak(**data['hunt_refleak'])
if data['json_file']:
data['json_file'] = JsonFile(**data['json_file'])
return WorkerRunTests(**data)
else:
return data

View File

@ -0,0 +1,331 @@
import builtins
import locale
import os
import sys
import threading
from test import support
from test.support import os_helper
from .utils import print_warning
class SkipTestEnvironment(Exception):
pass
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(test_name, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The support.environment_altered
attribute is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
def __init__(self, test_name, verbose, quiet, *, pgo):
self.test_name = test_name
self.verbose = verbose
self.quiet = quiet
self.pgo = pgo
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
'warnings.filters', 'asyncore.socket_map',
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
'sys.warnoptions',
# multiprocessing.process._cleanup() may release ref
# to a thread, so check processes first.
'multiprocessing.process._dangling', 'threading._dangling',
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
'files', 'locale', 'warnings.showwarning',
'shutil_archive_formats', 'shutil_unpack_formats',
'asyncio.events._event_loop_policy',
'urllib.requests._url_tempfiles', 'urllib.requests._opener',
)
def get_module(self, name):
# function for restore() methods
return sys.modules[name]
def try_get_module(self, name):
# function for get() methods
try:
return self.get_module(name)
except KeyError:
raise SkipTestEnvironment
def get_urllib_requests__url_tempfiles(self):
urllib_request = self.try_get_module('urllib.request')
return list(urllib_request._url_tempfiles)
def restore_urllib_requests__url_tempfiles(self, tempfiles):
for filename in tempfiles:
os_helper.unlink(filename)
def get_urllib_requests__opener(self):
urllib_request = self.try_get_module('urllib.request')
return urllib_request._opener
def restore_urllib_requests__opener(self, opener):
urllib_request = self.get_module('urllib.request')
urllib_request._opener = opener
def get_asyncio_events__event_loop_policy(self):
self.try_get_module('asyncio')
return support.maybe_get_event_loop_policy()
def restore_asyncio_events__event_loop_policy(self, policy):
asyncio = self.get_module('asyncio')
asyncio.set_event_loop_policy(policy)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_sys_path_hooks(self):
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
def restore_sys_path_hooks(self, saved_hooks):
sys.path_hooks = saved_hooks[1]
sys.path_hooks[:] = saved_hooks[2]
def get_sys_gettrace(self):
return sys.gettrace()
def restore_sys_gettrace(self, trace_fxn):
sys.settrace(trace_fxn)
def get___import__(self):
return builtins.__import__
def restore___import__(self, import_):
builtins.__import__ = import_
def get_warnings_filters(self):
warnings = self.try_get_module('warnings')
return id(warnings.filters), warnings.filters, warnings.filters[:]
def restore_warnings_filters(self, saved_filters):
warnings = self.get_module('warnings')
warnings.filters = saved_filters[1]
warnings.filters[:] = saved_filters[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('test.support.asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('test.support.asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
shutil = self.try_get_module('shutil')
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
shutil = self.get_module('shutil')
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
shutil = self.try_get_module('shutil')
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
shutil = self.get_module('shutil')
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
logging = self.try_get_module('logging')
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
# Can't easily revert the logging state
pass
def get_logging__handlerList(self):
logging = self.try_get_module('logging')
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
# Can't easily revert the logging state
pass
def get_sys_warnoptions(self):
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
def restore_sys_warnoptions(self, saved_options):
sys.warnoptions = saved_options[1]
sys.warnoptions[:] = saved_options[2]
# Controlling dangling references to Thread objects can make it easier
# to track reference leaks.
def get_threading__dangling(self):
# This copies the weakrefs without making any strong reference
return threading._dangling.copy()
def restore_threading__dangling(self, saved):
threading._dangling.clear()
threading._dangling.update(saved)
# Same for Process objects
def get_multiprocessing_process__dangling(self):
multiprocessing_process = self.try_get_module('multiprocessing.process')
# Unjoined process objects can survive after process exits
multiprocessing_process._cleanup()
# This copies the weakrefs without making any strong reference
return multiprocessing_process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
multiprocessing_process = self.get_module('multiprocessing.process')
multiprocessing_process._dangling.clear()
multiprocessing_process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
sysconfig = self.try_get_module('sysconfig')
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
sysconfig = self.get_module('sysconfig')
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
sysconfig = self.try_get_module('sysconfig')
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
sysconfig = self.get_module('sysconfig')
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_files(self):
# XXX: Maybe add an allow-list here?
return sorted(fn + ('/' if os.path.isdir(fn) else '')
for fn in os.listdir()
if not fn.startswith(".hypothesis"))
def restore_files(self, saved_value):
fn = os_helper.TESTFN
if fn not in saved_value and (fn + '/') not in saved_value:
if os.path.isfile(fn):
os_helper.unlink(fn)
elif os.path.isdir(fn):
os_helper.rmtree(fn)
_lc = [getattr(locale, lc) for lc in dir(locale)
if lc.startswith('LC_')]
def get_locale(self):
pairings = []
for lc in self._lc:
try:
pairings.append((lc, locale.setlocale(lc, None)))
except (TypeError, ValueError):
continue
return pairings
def restore_locale(self, saved):
for lc, setting in saved:
locale.setlocale(lc, setting)
def get_warnings_showwarning(self):
warnings = self.try_get_module('warnings')
return warnings.showwarning
def restore_warnings_showwarning(self, fxn):
warnings = self.get_module('warnings')
warnings.showwarning = fxn
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = []
for name, get, restore in self.resource_info():
try:
original = get()
except SkipTestEnvironment:
continue
self.saved_values.append((name, get, restore, original))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
self.saved_values = None
# Some resources use weak references
support.gc_collect()
for name, get, restore, original in saved_values:
current = get()
# Check for changes to the resource's value
if current != original:
support.environment_altered = True
restore(original)
if not self.quiet and not self.pgo:
print_warning(
f"{name} was modified by {self.test_name}\n"
f" Before: {original}\n"
f" After: {current} ")
return False

View File

@ -0,0 +1,134 @@
import faulthandler
import gc
import os
import random
import signal
import sys
import unittest
from test import support
from test.support.os_helper import TESTFN_UNDECODABLE, FS_NONASCII
from .filter import set_match_tests
from .runtests import RunTests
from .utils import (
setup_unraisable_hook, setup_threading_excepthook, fix_umask,
adjust_rlimit_nofile)
UNICODE_GUARD_ENV = "PYTHONREGRTEST_UNICODE_GUARD"
def setup_test_dir(testdir: str | None) -> None:
if testdir:
# Prepend test directory to sys.path, so runtest() will be able
# to locate tests
sys.path.insert(0, os.path.abspath(testdir))
def setup_process() -> None:
fix_umask()
assert sys.__stderr__ is not None, "sys.__stderr__ is None"
try:
stderr_fd = sys.__stderr__.fileno()
except (ValueError, AttributeError):
# Catch ValueError to catch io.UnsupportedOperation on TextIOBase
# and ValueError on a closed stream.
#
# Catch AttributeError for stderr being None.
pass
else:
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True, file=stderr_fd)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True, file=stderr_fd)
adjust_rlimit_nofile()
support.record_original_stdout(sys.stdout)
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
for index, path in enumerate(module.__path__):
module.__path__[index] = os.path.abspath(path)
if getattr(module, '__file__', None):
module.__file__ = os.path.abspath(module.__file__) # type: ignore[type-var]
if hasattr(sys, 'addaudithook'):
# Add an auditing hook for all tests to ensure PySys_Audit is tested
def _test_audit_hook(name, args):
pass
sys.addaudithook(_test_audit_hook)
setup_unraisable_hook()
setup_threading_excepthook()
# Ensure there's a non-ASCII character in env vars at all times to force
# tests consider this case. See BPO-44647 for details.
if TESTFN_UNDECODABLE and os.supports_bytes_environ:
os.environb.setdefault(UNICODE_GUARD_ENV.encode(), TESTFN_UNDECODABLE)
elif FS_NONASCII:
os.environ.setdefault(UNICODE_GUARD_ENV, FS_NONASCII)
def setup_tests(runtests: RunTests) -> None:
support.verbose = runtests.verbose
support.failfast = runtests.fail_fast
support.PGO = runtests.pgo
support.PGO_EXTENDED = runtests.pgo_extended
set_match_tests(runtests.match_tests)
if runtests.use_junit:
support.junit_xml_list = []
from .testresult import RegressionTestResult
RegressionTestResult.USE_XML = True
else:
support.junit_xml_list = None
if runtests.memory_limit is not None:
support.set_memlimit(runtests.memory_limit)
support.suppress_msvcrt_asserts(runtests.verbose >= 2)
support.use_resources = runtests.use_resources
timeout = runtests.timeout
if timeout is not None:
# For a slow buildbot worker, increase SHORT_TIMEOUT and LONG_TIMEOUT
support.LOOPBACK_TIMEOUT = max(support.LOOPBACK_TIMEOUT, timeout / 120)
# don't increase INTERNET_TIMEOUT
support.SHORT_TIMEOUT = max(support.SHORT_TIMEOUT, timeout / 40)
support.LONG_TIMEOUT = max(support.LONG_TIMEOUT, timeout / 4)
# If --timeout is short: reduce timeouts
support.LOOPBACK_TIMEOUT = min(support.LOOPBACK_TIMEOUT, timeout)
support.INTERNET_TIMEOUT = min(support.INTERNET_TIMEOUT, timeout)
support.SHORT_TIMEOUT = min(support.SHORT_TIMEOUT, timeout)
support.LONG_TIMEOUT = min(support.LONG_TIMEOUT, timeout)
if runtests.hunt_refleak:
# private attribute that mypy doesn't know about:
unittest.BaseTestSuite._cleanup = False # type: ignore[attr-defined]
if runtests.gc_threshold is not None:
gc.set_threshold(runtests.gc_threshold)
random.seed(runtests.random_seed)

View File

@ -0,0 +1,322 @@
import faulthandler
import gc
import importlib
import io
import sys
import time
import traceback
import unittest
from test import support
from test.support import threading_helper
from .filter import match_test
from .result import State, TestResult, TestStats
from .runtests import RunTests
from .save_env import saved_test_environment
from .setup import setup_tests
from .testresult import get_test_runner
from .utils import (
TestName,
clear_caches, remove_testfn, abs_module_name, print_warning)
# Minimum duration of a test to display its duration or to mention that
# the test is running in background
PROGRESS_MIN_TIME = 30.0 # seconds
def run_unittest(test_mod):
loader = unittest.TestLoader()
tests = loader.loadTestsFromModule(test_mod)
for error in loader.errors:
print(error, file=sys.stderr)
if loader.errors:
raise Exception("errors while loading tests")
_filter_suite(tests, match_test)
return _run_suite(tests)
def _filter_suite(suite, pred):
"""Recursively filter test cases in a suite based on a predicate."""
newtests = []
for test in suite._tests:
if isinstance(test, unittest.TestSuite):
_filter_suite(test, pred)
newtests.append(test)
else:
if pred(test):
newtests.append(test)
suite._tests = newtests
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
runner = get_test_runner(sys.stdout,
verbosity=support.verbose,
capture_output=(support.junit_xml_list is not None))
result = runner.run(suite)
if support.junit_xml_list is not None:
import xml.etree.ElementTree as ET
xml_elem = result.get_xml_element()
xml_str = ET.tostring(xml_elem).decode('ascii')
support.junit_xml_list.append(xml_str)
if not result.testsRun and not result.skipped and not result.errors:
raise support.TestDidNotRun
if not result.wasSuccessful():
stats = TestStats.from_unittest(result)
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not support.verbose: err += "; run in verbose mode for details"
errors = [(str(tc), exc_str) for tc, exc_str in result.errors]
failures = [(str(tc), exc_str) for tc, exc_str in result.failures]
raise support.TestFailedWithDetails(err, errors, failures, stats=stats)
return result
def regrtest_runner(result: TestResult, test_func, runtests: RunTests) -> None:
# Run test_func(), collect statistics, and detect reference and memory
# leaks.
if runtests.hunt_refleak:
from .refleak import runtest_refleak
refleak, test_result = runtest_refleak(result.test_name, test_func,
runtests.hunt_refleak,
runtests.quiet)
else:
test_result = test_func()
refleak = False
if refleak:
result.state = State.REFLEAK
stats: TestStats | None
match test_result:
case TestStats():
stats = test_result
case unittest.TestResult():
stats = TestStats.from_unittest(test_result)
case None:
print_warning(f"{result.test_name} test runner returned None: {test_func}")
stats = None
case _:
# Don't import doctest at top level since only few tests return
# a doctest.TestResult instance.
import doctest
if isinstance(test_result, doctest.TestResults):
stats = TestStats.from_doctest(test_result)
else:
print_warning(f"Unknown test result type: {type(test_result)}")
stats = None
result.stats = stats
# Storage of uncollectable GC objects (gc.garbage)
GC_GARBAGE = []
def _load_run_test(result: TestResult, runtests: RunTests) -> None:
# Load the test module and run the tests.
test_name = result.test_name
module_name = abs_module_name(test_name, runtests.test_dir)
test_mod = importlib.import_module(module_name)
if hasattr(test_mod, "test_main"):
# https://github.com/python/cpython/issues/89392
raise Exception(f"Module {test_name} defines test_main() which "
f"is no longer supported by regrtest")
def test_func():
return run_unittest(test_mod)
try:
regrtest_runner(result, test_func, runtests)
finally:
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce
# failures.
support.gc_collect()
remove_testfn(test_name, runtests.verbose)
if gc.garbage:
support.environment_altered = True
print_warning(f"{test_name} created {len(gc.garbage)} "
f"uncollectable object(s)")
# move the uncollectable objects somewhere,
# so we don't see them again
GC_GARBAGE.extend(gc.garbage)
gc.garbage.clear()
support.reap_children()
def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
display_failure: bool = True) -> None:
# Handle exceptions, detect environment changes.
# Reset the environment_altered flag to detect if a test altered
# the environment
support.environment_altered = False
pgo = runtests.pgo
if pgo:
display_failure = False
quiet = runtests.quiet
test_name = result.test_name
try:
clear_caches()
support.gc_collect()
with saved_test_environment(test_name,
runtests.verbose, quiet, pgo=pgo):
_load_run_test(result, runtests)
except support.ResourceDenied as exc:
if not quiet and not pgo:
print(f"{test_name} skipped -- {exc}", flush=True)
result.state = State.RESOURCE_DENIED
return
except unittest.SkipTest as exc:
if not quiet and not pgo:
print(f"{test_name} skipped -- {exc}", flush=True)
result.state = State.SKIPPED
return
except support.TestFailedWithDetails as exc:
msg = f"test {test_name} failed"
if display_failure:
msg = f"{msg} -- {exc}"
print(msg, file=sys.stderr, flush=True)
result.state = State.FAILED
result.errors = exc.errors
result.failures = exc.failures
result.stats = exc.stats
return
except support.TestFailed as exc:
msg = f"test {test_name} failed"
if display_failure:
msg = f"{msg} -- {exc}"
print(msg, file=sys.stderr, flush=True)
result.state = State.FAILED
result.stats = exc.stats
return
except support.TestDidNotRun:
result.state = State.DID_NOT_RUN
return
except KeyboardInterrupt:
print()
result.state = State.INTERRUPTED
return
except:
if not pgo:
msg = traceback.format_exc()
print(f"test {test_name} crashed -- {msg}",
file=sys.stderr, flush=True)
result.state = State.UNCAUGHT_EXC
return
if support.environment_altered:
result.set_env_changed()
# Don't override the state if it was already set (REFLEAK or ENV_CHANGED)
if result.state is None:
result.state = State.PASSED
def _runtest(result: TestResult, runtests: RunTests) -> None:
# Capture stdout and stderr, set faulthandler timeout,
# and create JUnit XML report.
verbose = runtests.verbose
output_on_failure = runtests.output_on_failure
timeout = runtests.timeout
if timeout is not None and threading_helper.can_start_thread:
use_timeout = True
faulthandler.dump_traceback_later(timeout, exit=True)
else:
use_timeout = False
try:
setup_tests(runtests)
if output_on_failure:
support.verbose = True
stream = io.StringIO()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
print_warning = support.print_warning
orig_print_warnings_stderr = print_warning.orig_stderr
output = None
try:
sys.stdout = stream
sys.stderr = stream
# print_warning() writes into the temporary stream to preserve
# messages order. If support.environment_altered becomes true,
# warnings will be written to sys.stderr below.
print_warning.orig_stderr = stream
_runtest_env_changed_exc(result, runtests, display_failure=False)
# Ignore output if the test passed successfully
if result.state != State.PASSED:
output = stream.getvalue()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
print_warning.orig_stderr = orig_print_warnings_stderr
if output is not None:
sys.stderr.write(output)
sys.stderr.flush()
else:
# Tell tests to be moderately quiet
support.verbose = verbose
_runtest_env_changed_exc(result, runtests,
display_failure=not verbose)
xml_list = support.junit_xml_list
if xml_list:
result.xml_data = xml_list
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
support.junit_xml_list = None
def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult:
"""Run a single test.
test_name -- the name of the test
Returns a TestResult.
If runtests.use_junit, xml_data is a list containing each generated
testsuite element.
"""
start_time = time.perf_counter()
result = TestResult(test_name)
pgo = runtests.pgo
try:
# gh-117783: don't immortalize deferred objects when tracking
# refleaks. Only releveant for the free-threaded build.
with support.suppress_immortalization(runtests.hunt_refleak):
_runtest(result, runtests)
except:
if not pgo:
msg = traceback.format_exc()
print(f"test {test_name} crashed -- {msg}",
file=sys.stderr, flush=True)
result.state = State.UNCAUGHT_EXC
sys.stdout.flush()
sys.stderr.flush()
result.duration = time.perf_counter() - start_time
return result

View File

@ -0,0 +1,193 @@
'''Test runner and result class for the regression test suite.
'''
import functools
import io
import sys
import time
import traceback
import unittest
from test import support
from test.libregrtest.utils import sanitize_xml
class RegressionTestResult(unittest.TextTestResult):
USE_XML = False
def __init__(self, stream, descriptions, verbosity):
super().__init__(stream=stream, descriptions=descriptions,
verbosity=2 if verbosity else 0)
self.buffer = True
if self.USE_XML:
from xml.etree import ElementTree as ET
from datetime import datetime, UTC
self.__ET = ET
self.__suite = ET.Element('testsuite')
self.__suite.set('start',
datetime.now(UTC)
.replace(tzinfo=None)
.isoformat(' '))
self.__e = None
self.__start_time = None
@classmethod
def __getId(cls, test):
try:
test_id = test.id
except AttributeError:
return str(test)
try:
return test_id()
except TypeError:
return str(test_id)
return repr(test)
def startTest(self, test):
super().startTest(test)
if self.USE_XML:
self.__e = e = self.__ET.SubElement(self.__suite, 'testcase')
self.__start_time = time.perf_counter()
def _add_result(self, test, capture=False, **args):
if not self.USE_XML:
return
e = self.__e
self.__e = None
if e is None:
return
ET = self.__ET
e.set('name', args.pop('name', self.__getId(test)))
e.set('status', args.pop('status', 'run'))
e.set('result', args.pop('result', 'completed'))
if self.__start_time:
e.set('time', f'{time.perf_counter() - self.__start_time:0.6f}')
if capture:
if self._stdout_buffer is not None:
stdout = self._stdout_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-out').text = sanitize_xml(stdout)
if self._stderr_buffer is not None:
stderr = self._stderr_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-err').text = sanitize_xml(stderr)
for k, v in args.items():
if not k or not v:
continue
e2 = ET.SubElement(e, k)
if hasattr(v, 'items'):
for k2, v2 in v.items():
if k2:
e2.set(k2, sanitize_xml(str(v2)))
else:
e2.text = sanitize_xml(str(v2))
else:
e2.text = sanitize_xml(str(v))
@classmethod
def __makeErrorDict(cls, err_type, err_value, err_tb):
if isinstance(err_type, type):
if err_type.__module__ == 'builtins':
typename = err_type.__name__
else:
typename = f'{err_type.__module__}.{err_type.__name__}'
else:
typename = repr(err_type)
msg = traceback.format_exception(err_type, err_value, None)
tb = traceback.format_exception(err_type, err_value, err_tb)
return {
'type': typename,
'message': ''.join(msg),
'': ''.join(tb),
}
def addError(self, test, err):
self._add_result(test, True, error=self.__makeErrorDict(*err))
super().addError(test, err)
def addExpectedFailure(self, test, err):
self._add_result(test, True, output=self.__makeErrorDict(*err))
super().addExpectedFailure(test, err)
def addFailure(self, test, err):
self._add_result(test, True, failure=self.__makeErrorDict(*err))
super().addFailure(test, err)
if support.failfast:
self.stop()
def addSkip(self, test, reason):
self._add_result(test, skipped=reason)
super().addSkip(test, reason)
def addSuccess(self, test):
self._add_result(test)
super().addSuccess(test)
def addUnexpectedSuccess(self, test):
self._add_result(test, outcome='UNEXPECTED_SUCCESS')
super().addUnexpectedSuccess(test)
def get_xml_element(self):
if not self.USE_XML:
raise ValueError("USE_XML is false")
e = self.__suite
e.set('tests', str(self.testsRun))
e.set('errors', str(len(self.errors)))
e.set('failures', str(len(self.failures)))
return e
class QuietRegressionTestRunner:
def __init__(self, stream, buffer=False):
self.result = RegressionTestResult(stream, None, 0)
self.result.buffer = buffer
def run(self, test):
test(self.result)
return self.result
def get_test_runner_class(verbosity, buffer=False):
if verbosity:
return functools.partial(unittest.TextTestRunner,
resultclass=RegressionTestResult,
buffer=buffer,
verbosity=verbosity)
return functools.partial(QuietRegressionTestRunner, buffer=buffer)
def get_test_runner(stream, verbosity, capture_output=False):
return get_test_runner_class(verbosity, capture_output)(stream)
if __name__ == '__main__':
import xml.etree.ElementTree as ET
RegressionTestResult.USE_XML = True
class TestTests(unittest.TestCase):
def test_pass(self):
pass
def test_pass_slow(self):
time.sleep(1.0)
def test_fail(self):
print('stdout', file=sys.stdout)
print('stderr', file=sys.stderr)
self.fail('failure message')
def test_error(self):
print('stdout', file=sys.stdout)
print('stderr', file=sys.stderr)
raise RuntimeError('error message')
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestTests))
stream = io.StringIO()
runner_cls = get_test_runner_class(sum(a == '-v' for a in sys.argv))
runner = runner_cls(sys.stdout)
result = runner.run(suite)
print('Output:', stream.getvalue())
print('XML: ', end='')
for s in ET.tostringlist(result.get_xml_element()):
print(s.decode(), end='')
print()

View File

@ -0,0 +1,34 @@
# Set of tests run by default if --tsan is specified. The tests below were
# chosen because they use threads and run in a reasonable amount of time.
TSAN_TESTS = [
# TODO: enable more of test_capi once bugs are fixed (GH-116908, GH-116909).
'test_capi.test_mem',
'test_capi.test_pyatomic',
'test_code',
'test_enum',
'test_functools',
'test_httpservers',
'test_imaplib',
'test_importlib',
'test_io',
'test_logging',
'test_opcache',
'test_queue',
'test_signal',
'test_socket',
'test_sqlite3',
'test_ssl',
'test_syslog',
'test_thread',
'test_threadedtempfile',
'test_threading',
'test_threading_local',
'test_threadsignals',
'test_weakref',
]
def setup_tsan_tests(cmdline_args) -> None:
if not cmdline_args:
cmdline_args[:] = TSAN_TESTS[:]

View File

@ -0,0 +1,799 @@
import contextlib
import faulthandler
import locale
import math
import os.path
import platform
import random
import re
import shlex
import signal
import subprocess
import sys
import sysconfig
import tempfile
import textwrap
from collections.abc import Callable, Iterable
from test import support
from test.support import os_helper
from test.support import threading_helper
# All temporary files and temporary directories created by libregrtest should
# use TMP_PREFIX so cleanup_temp_dir() can remove them all.
TMP_PREFIX = 'test_python_'
WORK_DIR_PREFIX = TMP_PREFIX
WORKER_WORK_DIR_PREFIX = WORK_DIR_PREFIX + 'worker_'
# bpo-38203: Maximum delay in seconds to exit Python (call Py_Finalize()).
# Used to protect against threading._shutdown() hang.
# Must be smaller than buildbot "1200 seconds without output" limit.
EXIT_TIMEOUT = 120.0
ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'walltime')
# Other resources excluded from --use=all:
#
# - extralagefile (ex: test_zipfile64): really too slow to be enabled
# "by default"
# - tzdata: while needed to validate fully test_datetime, it makes
# test_datetime too slow (15-20 min on some buildbots) and so is disabled by
# default (see bpo-30822).
RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata')
# Types for types hints
StrPath = str
TestName = str
StrJSON = str
TestTuple = tuple[TestName, ...]
TestList = list[TestName]
# --match and --ignore options: list of patterns
# ('*' joker character can be used)
TestFilter = list[tuple[TestName, bool]]
FilterTuple = tuple[TestName, ...]
FilterDict = dict[TestName, FilterTuple]
def format_duration(seconds: float) -> str:
ms = math.ceil(seconds * 1e3)
seconds, ms = divmod(ms, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
parts = []
if hours:
parts.append('%s hour' % hours)
if minutes:
parts.append('%s min' % minutes)
if seconds:
if parts:
# 2 min 1 sec
parts.append('%s sec' % seconds)
else:
# 1.0 sec
parts.append('%.1f sec' % (seconds + ms / 1000))
if not parts:
return '%s ms' % ms
parts = parts[:2]
return ' '.join(parts)
def strip_py_suffix(names: list[str] | None) -> None:
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def plural(n: int, singular: str, plural: str | None = None) -> str:
if n == 1:
return singular
elif plural is not None:
return plural
else:
return singular + 's'
def count(n: int, word: str) -> str:
if n == 1:
return f"{n} {word}"
else:
return f"{n} {word}s"
def printlist(x, width=70, indent=4, file=None):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks),
file=file)
def print_warning(msg: str) -> None:
support.print_warning(msg)
orig_unraisablehook: Callable[..., None] | None = None
def regrtest_unraisable_hook(unraisable) -> None:
global orig_unraisablehook
support.environment_altered = True
support.print_warning("Unraisable exception")
old_stderr = sys.stderr
try:
support.flush_std_streams()
sys.stderr = support.print_warning.orig_stderr
assert orig_unraisablehook is not None, "orig_unraisablehook not set"
orig_unraisablehook(unraisable)
sys.stderr.flush()
finally:
sys.stderr = old_stderr
def setup_unraisable_hook() -> None:
global orig_unraisablehook
orig_unraisablehook = sys.unraisablehook
sys.unraisablehook = regrtest_unraisable_hook
orig_threading_excepthook: Callable[..., None] | None = None
def regrtest_threading_excepthook(args) -> None:
global orig_threading_excepthook
support.environment_altered = True
support.print_warning(f"Uncaught thread exception: {args.exc_type.__name__}")
old_stderr = sys.stderr
try:
support.flush_std_streams()
sys.stderr = support.print_warning.orig_stderr
assert orig_threading_excepthook is not None, "orig_threading_excepthook not set"
orig_threading_excepthook(args)
sys.stderr.flush()
finally:
sys.stderr = old_stderr
def setup_threading_excepthook() -> None:
global orig_threading_excepthook
import threading
orig_threading_excepthook = threading.excepthook
threading.excepthook = regrtest_threading_excepthook
def clear_caches():
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
try:
re = sys.modules['re']
except KeyError:
pass
else:
re.purge()
try:
_strptime = sys.modules['_strptime']
except KeyError:
pass
else:
_strptime._regex_cache.clear()
try:
urllib_parse = sys.modules['urllib.parse']
except KeyError:
pass
else:
urllib_parse.clear_cache()
try:
urllib_request = sys.modules['urllib.request']
except KeyError:
pass
else:
urllib_request.urlcleanup()
try:
linecache = sys.modules['linecache']
except KeyError:
pass
else:
linecache.clearcache()
try:
mimetypes = sys.modules['mimetypes']
except KeyError:
pass
else:
mimetypes._default_mime_types()
try:
filecmp = sys.modules['filecmp']
except KeyError:
pass
else:
filecmp._cache.clear()
try:
struct = sys.modules['struct']
except KeyError:
pass
else:
struct._clearcache()
try:
doctest = sys.modules['doctest']
except KeyError:
pass
else:
doctest.master = None
try:
ctypes = sys.modules['ctypes']
except KeyError:
pass
else:
ctypes._reset_cache()
try:
typing = sys.modules['typing']
except KeyError:
pass
else:
for f in typing._cleanups:
f()
import inspect
abs_classes = filter(inspect.isabstract, typing.__dict__.values())
for abc in abs_classes:
for obj in abc.__subclasses__() + [abc]:
obj._abc_caches_clear()
try:
fractions = sys.modules['fractions']
except KeyError:
pass
else:
fractions._hash_algorithm.cache_clear()
try:
inspect = sys.modules['inspect']
except KeyError:
pass
else:
inspect._shadowed_dict_from_weakref_mro_tuple.cache_clear()
inspect._filesbymodname.clear()
inspect.modulesbyfile.clear()
try:
importlib_metadata = sys.modules['importlib.metadata']
except KeyError:
pass
else:
importlib_metadata.FastPath.__new__.cache_clear()
def get_build_info():
# Get most important configure and build options as a list of strings.
# Example: ['debug', 'ASAN+MSAN'] or ['release', 'LTO+PGO'].
config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
cflags += ' ' + (sysconfig.get_config_var('PY_CFLAGS_NODIST') or '')
ldflags_nodist = sysconfig.get_config_var('PY_LDFLAGS_NODIST') or ''
build = []
# --disable-gil
if sysconfig.get_config_var('Py_GIL_DISABLED'):
if not sys.flags.ignore_environment:
PYTHON_GIL = os.environ.get('PYTHON_GIL', None)
if PYTHON_GIL:
PYTHON_GIL = (PYTHON_GIL == '1')
else:
PYTHON_GIL = None
free_threading = "free_threading"
if PYTHON_GIL is not None:
free_threading = f"{free_threading} GIL={int(PYTHON_GIL)}"
build.append(free_threading)
if hasattr(sys, 'gettotalrefcount'):
# --with-pydebug
build.append('debug')
if '-DNDEBUG' in cflags:
build.append('without_assert')
else:
build.append('release')
if '--with-assertions' in config_args:
build.append('with_assert')
elif '-DNDEBUG' not in cflags:
build.append('with_assert')
# --enable-experimental-jit
tier2 = re.search('-D_Py_TIER2=([0-9]+)', cflags)
if tier2:
tier2 = int(tier2.group(1))
if not sys.flags.ignore_environment:
PYTHON_JIT = os.environ.get('PYTHON_JIT', None)
if PYTHON_JIT:
PYTHON_JIT = (PYTHON_JIT != '0')
else:
PYTHON_JIT = None
if tier2 == 1: # =yes
if PYTHON_JIT == False:
jit = 'JIT=off'
else:
jit = 'JIT'
elif tier2 == 3: # =yes-off
if PYTHON_JIT:
jit = 'JIT'
else:
jit = 'JIT=off'
elif tier2 == 4: # =interpreter
if PYTHON_JIT == False:
jit = 'JIT-interpreter=off'
else:
jit = 'JIT-interpreter'
elif tier2 == 6: # =interpreter-off (Secret option!)
if PYTHON_JIT:
jit = 'JIT-interpreter'
else:
jit = 'JIT-interpreter=off'
elif '-D_Py_JIT' in cflags:
jit = 'JIT'
else:
jit = None
if jit:
build.append(jit)
# --enable-framework=name
framework = sysconfig.get_config_var('PYTHONFRAMEWORK')
if framework:
build.append(f'framework={framework}')
# --enable-shared
shared = int(sysconfig.get_config_var('PY_ENABLE_SHARED') or '0')
if shared:
build.append('shared')
# --with-lto
optimizations = []
if '-flto=thin' in ldflags_nodist:
optimizations.append('ThinLTO')
elif '-flto' in ldflags_nodist:
optimizations.append('LTO')
if support.check_cflags_pgo():
# PGO (--enable-optimizations)
optimizations.append('PGO')
if support.check_bolt_optimized():
# BOLT (--enable-bolt)
optimizations.append('BOLT')
if optimizations:
build.append('+'.join(optimizations))
# --with-address-sanitizer
sanitizers = []
if support.check_sanitizer(address=True):
sanitizers.append("ASAN")
# --with-memory-sanitizer
if support.check_sanitizer(memory=True):
sanitizers.append("MSAN")
# --with-undefined-behavior-sanitizer
if support.check_sanitizer(ub=True):
sanitizers.append("UBSAN")
# --with-thread-sanitizer
if support.check_sanitizer(thread=True):
sanitizers.append("TSAN")
if sanitizers:
build.append('+'.join(sanitizers))
# --with-trace-refs
if hasattr(sys, 'getobjects'):
build.append("TraceRefs")
# --enable-pystats
if hasattr(sys, '_stats_on'):
build.append("pystats")
# --with-valgrind
if sysconfig.get_config_var('WITH_VALGRIND'):
build.append("valgrind")
# --with-dtrace
if sysconfig.get_config_var('WITH_DTRACE'):
build.append("dtrace")
return build
def get_temp_dir(tmp_dir: StrPath | None = None) -> StrPath:
if tmp_dir:
tmp_dir = os.path.expanduser(tmp_dir)
else:
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. This eases the cleanup of leftover
# files using the "make distclean" command.
if sysconfig.is_python_build():
if not support.is_wasi:
tmp_dir = sysconfig.get_config_var('abs_builddir')
if tmp_dir is None:
tmp_dir = sysconfig.get_config_var('abs_srcdir')
if not tmp_dir:
# gh-74470: On Windows, only srcdir is available. Using
# abs_builddir mostly matters on UNIX when building
# Python out of the source tree, especially when the
# source tree is read only.
tmp_dir = sysconfig.get_config_var('srcdir')
if not tmp_dir:
raise RuntimeError(
"Could not determine the correct value for tmp_dir"
)
tmp_dir = os.path.join(tmp_dir, 'build')
else:
# WASI platform
tmp_dir = sysconfig.get_config_var('projectbase')
if not tmp_dir:
raise RuntimeError(
"sysconfig.get_config_var('projectbase') "
f"unexpectedly returned {tmp_dir!r} on WASI"
)
tmp_dir = os.path.join(tmp_dir, 'build')
# When get_temp_dir() is called in a worker process,
# get_temp_dir() path is different than in the parent process
# which is not a WASI process. So the parent does not create
# the same "tmp_dir" than the test worker process.
os.makedirs(tmp_dir, exist_ok=True)
else:
tmp_dir = tempfile.gettempdir()
return os.path.abspath(tmp_dir)
def fix_umask() -> None:
if support.is_emscripten:
# Emscripten has default umask 0o777, which breaks some tests.
# see https://github.com/emscripten-core/emscripten/issues/17269
old_mask = os.umask(0)
if old_mask == 0o777:
os.umask(0o027)
else:
os.umask(old_mask)
def get_work_dir(parent_dir: StrPath, worker: bool = False) -> StrPath:
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
# Emscripten and WASI have stubbed getpid(), Emscripten has only
# millisecond clock resolution. Use randint() instead.
if support.is_emscripten or support.is_wasi:
nounce = random.randint(0, 1_000_000)
else:
nounce = os.getpid()
if worker:
work_dir = WORK_DIR_PREFIX + str(nounce)
else:
work_dir = WORKER_WORK_DIR_PREFIX + str(nounce)
work_dir += os_helper.FS_NONASCII
work_dir = os.path.join(parent_dir, work_dir)
return work_dir
@contextlib.contextmanager
def exit_timeout():
try:
yield
except SystemExit as exc:
# bpo-38203: Python can hang at exit in Py_Finalize(), especially
# on threading._shutdown() call: put a timeout
if threading_helper.can_start_thread:
faulthandler.dump_traceback_later(EXIT_TIMEOUT, exit=True)
sys.exit(exc.code)
def remove_testfn(test_name: TestName, verbose: int) -> None:
# Try to clean up os_helper.TESTFN if left behind.
#
# While tests shouldn't leave any files or directories behind, when a test
# fails that can be tedious for it to arrange. The consequences can be
# especially nasty on Windows, since if a test leaves a file open, it
# cannot be deleted by name (while there's nothing we can do about that
# here either, we can display the name of the offending test, which is a
# real help).
name = os_helper.TESTFN
if not os.path.exists(name):
return
nuker: Callable[[str], None]
if os.path.isdir(name):
import shutil
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise RuntimeError(f"os.path says {name!r} exists but is neither "
f"directory nor file")
if verbose:
print_warning(f"{test_name} left behind {kind} {name!r}")
support.environment_altered = True
try:
import stat
# fix possible permissions problems that might prevent cleanup
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as exc:
print_warning(f"{test_name} left behind {kind} {name!r} "
f"and it couldn't be removed: {exc}")
def abs_module_name(test_name: TestName, test_dir: StrPath | None) -> TestName:
if test_name.startswith('test.') or test_dir:
return test_name
else:
# Import it from the test package
return 'test.' + test_name
# gh-90681: When rerunning tests, we might need to rerun the whole
# class or module suite if some its life-cycle hooks fail.
# Test level hooks are not affected.
_TEST_LIFECYCLE_HOOKS = frozenset((
'setUpClass', 'tearDownClass',
'setUpModule', 'tearDownModule',
))
def normalize_test_name(test_full_name: str, *,
is_error: bool = False) -> str | None:
short_name = test_full_name.split(" ")[0]
if is_error and short_name in _TEST_LIFECYCLE_HOOKS:
if test_full_name.startswith(('setUpModule (', 'tearDownModule (')):
# if setUpModule() or tearDownModule() failed, don't filter
# tests with the test file name, don't use use filters.
return None
# This means that we have a failure in a life-cycle hook,
# we need to rerun the whole module or class suite.
# Basically the error looks like this:
# ERROR: setUpClass (test.test_reg_ex.RegTest)
# or
# ERROR: setUpModule (test.test_reg_ex)
# So, we need to parse the class / module name.
lpar = test_full_name.index('(')
rpar = test_full_name.index(')')
return test_full_name[lpar + 1: rpar].split('.')[-1]
return short_name
def adjust_rlimit_nofile() -> None:
"""
On macOS the default fd limit (RLIMIT_NOFILE) is sometimes too low (256)
for our test suite to succeed. Raise it to something more reasonable. 1024
is a common Linux default.
"""
try:
import resource
except ImportError:
return
fd_limit, max_fds = resource.getrlimit(resource.RLIMIT_NOFILE)
desired_fds = 1024
if fd_limit < desired_fds and fd_limit < max_fds:
new_fd_limit = min(desired_fds, max_fds)
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(new_fd_limit, max_fds))
print(f"Raised RLIMIT_NOFILE: {fd_limit} -> {new_fd_limit}")
except (ValueError, OSError) as err:
print_warning(f"Unable to raise RLIMIT_NOFILE from {fd_limit} to "
f"{new_fd_limit}: {err}.")
def get_host_runner() -> str:
if (hostrunner := os.environ.get("_PYTHON_HOSTRUNNER")) is None:
hostrunner = sysconfig.get_config_var("HOSTRUNNER")
return hostrunner
def is_cross_compiled() -> bool:
return ('_PYTHON_HOST_PLATFORM' in os.environ)
def format_resources(use_resources: Iterable[str]) -> str:
use_resources = set(use_resources)
all_resources = set(ALL_RESOURCES)
# Express resources relative to "all"
relative_all = ['all']
for name in sorted(all_resources - use_resources):
relative_all.append(f'-{name}')
for name in sorted(use_resources - all_resources):
relative_all.append(f'{name}')
all_text = ','.join(relative_all)
all_text = f"resources: {all_text}"
# List of enabled resources
text = ','.join(sorted(use_resources))
text = f"resources ({len(use_resources)}): {text}"
# Pick the shortest string (prefer relative to all if lengths are equal)
if len(all_text) <= len(text):
return all_text
else:
return text
def display_header(use_resources: tuple[str, ...],
python_cmd: tuple[str, ...] | None) -> None:
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
print("==", platform.platform(aliased=True),
"%s-endian" % sys.byteorder)
print("== Python build:", ' '.join(get_build_info()))
print("== cwd:", os.getcwd())
cpu_count: object = os.cpu_count()
if cpu_count:
# The function is new in Python 3.13; mypy doesn't know about it yet:
process_cpu_count = os.process_cpu_count() # type: ignore[attr-defined]
if process_cpu_count and process_cpu_count != cpu_count:
cpu_count = f"{process_cpu_count} (process) / {cpu_count} (system)"
print("== CPU count:", cpu_count)
print("== encodings: locale=%s FS=%s"
% (locale.getencoding(), sys.getfilesystemencoding()))
if use_resources:
text = format_resources(use_resources)
print(f"== {text}")
else:
print("== resources: all test resources are disabled, "
"use -u option to unskip tests")
cross_compile = is_cross_compiled()
if cross_compile:
print("== cross compiled: Yes")
if python_cmd:
cmd = shlex.join(python_cmd)
print(f"== host python: {cmd}")
get_cmd = [*python_cmd, '-m', 'platform']
proc = subprocess.run(
get_cmd,
stdout=subprocess.PIPE,
text=True,
cwd=os_helper.SAVEDCWD)
stdout = proc.stdout.replace('\n', ' ').strip()
if stdout:
print(f"== host platform: {stdout}")
elif proc.returncode:
print(f"== host platform: <command failed with exit code {proc.returncode}>")
else:
hostrunner = get_host_runner()
if hostrunner:
print(f"== host runner: {hostrunner}")
# This makes it easier to remember what to set in your local
# environment when trying to reproduce a sanitizer failure.
asan = support.check_sanitizer(address=True)
msan = support.check_sanitizer(memory=True)
ubsan = support.check_sanitizer(ub=True)
tsan = support.check_sanitizer(thread=True)
sanitizers = []
if asan:
sanitizers.append("address")
if msan:
sanitizers.append("memory")
if ubsan:
sanitizers.append("undefined behavior")
if tsan:
sanitizers.append("thread")
if sanitizers:
print(f"== sanitizers: {', '.join(sanitizers)}")
for sanitizer, env_var in (
(asan, "ASAN_OPTIONS"),
(msan, "MSAN_OPTIONS"),
(ubsan, "UBSAN_OPTIONS"),
(tsan, "TSAN_OPTIONS"),
):
options= os.environ.get(env_var)
if sanitizer and options is not None:
print(f"== {env_var}={options!r}")
print(flush=True)
def cleanup_temp_dir(tmp_dir: StrPath) -> None:
import glob
path = os.path.join(glob.escape(tmp_dir), TMP_PREFIX + '*')
print("Cleanup %s directory" % tmp_dir)
for name in glob.glob(path):
if os.path.isdir(name):
print("Remove directory: %s" % name)
os_helper.rmtree(name)
else:
print("Remove file: %s" % name)
os_helper.unlink(name)
WINDOWS_STATUS = {
0xC0000005: "STATUS_ACCESS_VIOLATION",
0xC00000FD: "STATUS_STACK_OVERFLOW",
0xC000013A: "STATUS_CONTROL_C_EXIT",
}
def get_signal_name(exitcode):
if exitcode < 0:
signum = -exitcode
try:
return signal.Signals(signum).name
except ValueError:
pass
# Shell exit code (ex: WASI build)
if 128 < exitcode < 256:
signum = exitcode - 128
try:
return signal.Signals(signum).name
except ValueError:
pass
try:
return WINDOWS_STATUS[exitcode]
except KeyError:
pass
return None
ILLEGAL_XML_CHARS_RE = re.compile(
'['
# Control characters; newline (\x0A and \x0D) and TAB (\x09) are legal
'\x00-\x08\x0B\x0C\x0E-\x1F'
# Surrogate characters
'\uD800-\uDFFF'
# Special Unicode characters
'\uFFFE'
'\uFFFF'
# Match multiple sequential invalid characters for better efficiency
']+')
def _sanitize_xml_replace(regs):
text = regs[0]
return ''.join(f'\\x{ord(ch):02x}' if ch <= '\xff' else ascii(ch)[1:-1]
for ch in text)
def sanitize_xml(text: str) -> str:
return ILLEGAL_XML_CHARS_RE.sub(_sanitize_xml_replace, text)

View File

@ -0,0 +1,128 @@
import _overlapped
import _thread
import _winapi
import math
import struct
import winreg
# Seconds per measurement
SAMPLING_INTERVAL = 1
# Exponential damping factor to compute exponentially weighted moving average
# on 1 minute (60 seconds)
LOAD_FACTOR_1 = 1 / math.exp(SAMPLING_INTERVAL / 60)
# Initialize the load using the arithmetic mean of the first NVALUE values
# of the Processor Queue Length
NVALUE = 5
class WindowsLoadTracker():
"""
This class asynchronously reads the performance counters to calculate
the system load on Windows. A "raw" thread is used here to prevent
interference with the test suite's cases for the threading module.
"""
def __init__(self):
# make __del__ not fail if pre-flight test fails
self._running = None
self._stopped = None
# Pre-flight test for access to the performance data;
# `PermissionError` will be raised if not allowed
winreg.QueryInfoKey(winreg.HKEY_PERFORMANCE_DATA)
self._values = []
self._load = None
self._running = _overlapped.CreateEvent(None, True, False, None)
self._stopped = _overlapped.CreateEvent(None, True, False, None)
_thread.start_new_thread(self._update_load, (), {})
def _update_load(self,
# localize module access to prevent shutdown errors
_wait=_winapi.WaitForSingleObject,
_signal=_overlapped.SetEvent):
# run until signaled to stop
while _wait(self._running, 1000):
self._calculate_load()
# notify stopped
_signal(self._stopped)
def _calculate_load(self,
# localize module access to prevent shutdown errors
_query=winreg.QueryValueEx,
_hkey=winreg.HKEY_PERFORMANCE_DATA,
_unpack=struct.unpack_from):
# get the 'System' object
data, _ = _query(_hkey, '2')
# PERF_DATA_BLOCK {
# WCHAR Signature[4] 8 +
# DWOWD LittleEndian 4 +
# DWORD Version 4 +
# DWORD Revision 4 +
# DWORD TotalByteLength 4 +
# DWORD HeaderLength = 24 byte offset
# ...
# }
obj_start, = _unpack('L', data, 24)
# PERF_OBJECT_TYPE {
# DWORD TotalByteLength
# DWORD DefinitionLength
# DWORD HeaderLength
# ...
# }
data_start, defn_start = _unpack('4xLL', data, obj_start)
data_base = obj_start + data_start
defn_base = obj_start + defn_start
# find the 'Processor Queue Length' counter (index=44)
while defn_base < data_base:
# PERF_COUNTER_DEFINITION {
# DWORD ByteLength
# DWORD CounterNameTitleIndex
# ... [7 DWORDs/28 bytes]
# DWORD CounterOffset
# }
size, idx, offset = _unpack('LL28xL', data, defn_base)
defn_base += size
if idx == 44:
counter_offset = data_base + offset
# the counter is known to be PERF_COUNTER_RAWCOUNT (DWORD)
processor_queue_length, = _unpack('L', data, counter_offset)
break
else:
return
# We use an exponentially weighted moving average, imitating the
# load calculation on Unix systems.
# https://en.wikipedia.org/wiki/Load_(computing)#Unix-style_load_calculation
# https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
if self._load is not None:
self._load = (self._load * LOAD_FACTOR_1
+ processor_queue_length * (1.0 - LOAD_FACTOR_1))
elif len(self._values) < NVALUE:
self._values.append(processor_queue_length)
else:
self._load = sum(self._values) / len(self._values)
def close(self, kill=True):
self.__del__()
return
def __del__(self,
# localize module access to prevent shutdown errors
_wait=_winapi.WaitForSingleObject,
_close=_winapi.CloseHandle,
_signal=_overlapped.SetEvent):
if self._running is not None:
# tell the update thread to quit
_signal(self._running)
# wait for the update thread to signal done
_wait(self._stopped, -1)
# cleanup events
_close(self._running)
_close(self._stopped)
self._running = self._stopped = None
def getloadavg(self):
return self._load

View File

@ -0,0 +1,116 @@
import subprocess
import sys
import os
from typing import Any, NoReturn
from test.support import os_helper, Py_DEBUG
from .setup import setup_process, setup_test_dir
from .runtests import WorkerRunTests, JsonFile, JsonFileType
from .single import run_single_test
from .utils import (
StrPath, StrJSON, TestFilter,
get_temp_dir, get_work_dir, exit_timeout)
USE_PROCESS_GROUP = (hasattr(os, "setsid") and hasattr(os, "killpg"))
def create_worker_process(runtests: WorkerRunTests, output_fd: int,
tmp_dir: StrPath | None = None) -> subprocess.Popen[str]:
worker_json = runtests.as_json()
cmd = runtests.create_python_cmd()
cmd.extend(['-m', 'test.libregrtest.worker', worker_json])
env = dict(os.environ)
if tmp_dir is not None:
env['TMPDIR'] = tmp_dir
env['TEMP'] = tmp_dir
env['TMP'] = tmp_dir
# Running the child from the same working directory as regrtest's original
# invocation ensures that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
#
# Emscripten and WASI Python must start in the Python source code directory
# to get 'python.js' or 'python.wasm' file. Then worker_process() changes
# to a temporary directory created to run tests.
work_dir = os_helper.SAVEDCWD
kwargs: dict[str, Any] = dict(
env=env,
stdout=output_fd,
# bpo-45410: Write stderr into stdout to keep messages order
stderr=output_fd,
text=True,
close_fds=True,
cwd=work_dir,
)
if USE_PROCESS_GROUP:
kwargs['start_new_session'] = True
# Pass json_file to the worker process
json_file = runtests.json_file
json_file.configure_subprocess(kwargs)
with json_file.inherit_subprocess():
return subprocess.Popen(cmd, **kwargs)
def worker_process(worker_json: StrJSON) -> NoReturn:
runtests = WorkerRunTests.from_json(worker_json)
test_name = runtests.tests[0]
match_tests: TestFilter = runtests.match_tests
json_file: JsonFile = runtests.json_file
setup_test_dir(runtests.test_dir)
setup_process()
if runtests.rerun:
if match_tests:
matching = "matching: " + ", ".join(pattern for pattern, result in match_tests if result)
print(f"Re-running {test_name} in verbose mode ({matching})", flush=True)
else:
print(f"Re-running {test_name} in verbose mode", flush=True)
result = run_single_test(test_name, runtests)
if runtests.coverage:
if "test.cov" in sys.modules: # imported by -Xpresite=
result.covered_lines = list(sys.modules["test.cov"].coverage)
elif not Py_DEBUG:
print(
"Gathering coverage in worker processes requires --with-pydebug",
flush=True,
)
else:
raise LookupError(
"`test.cov` not found in sys.modules but coverage wanted"
)
if json_file.file_type == JsonFileType.STDOUT:
print()
result.write_json_into(sys.stdout)
else:
with json_file.open('w', encoding='utf-8') as json_fp:
result.write_json_into(json_fp)
sys.exit(0)
def main() -> NoReturn:
if len(sys.argv) != 2:
print("usage: python -m test.libregrtest.worker JSON")
sys.exit(1)
worker_json = sys.argv[1]
tmp_dir = get_temp_dir()
work_dir = get_work_dir(tmp_dir, worker=True)
with exit_timeout():
with os_helper.temp_cwd(work_dir, quiet=True):
worker_process(worker_json)
if __name__ == "__main__":
main()