Removed the Requirement to Install Python and NodeJS (Now Bundled with Borealis)
This commit is contained in:
2787
Dependencies/Python/Lib/test/support/__init__.py
vendored
Normal file
2787
Dependencies/Python/Lib/test/support/__init__.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
111
Dependencies/Python/Lib/test/support/_hypothesis_stubs/__init__.py
vendored
Normal file
111
Dependencies/Python/Lib/test/support/_hypothesis_stubs/__init__.py
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
from enum import Enum
|
||||
import functools
|
||||
import unittest
|
||||
|
||||
__all__ = [
|
||||
"given",
|
||||
"example",
|
||||
"assume",
|
||||
"reject",
|
||||
"register_random",
|
||||
"strategies",
|
||||
"HealthCheck",
|
||||
"settings",
|
||||
"Verbosity",
|
||||
]
|
||||
|
||||
from . import strategies
|
||||
|
||||
|
||||
def given(*_args, **_kwargs):
|
||||
def decorator(f):
|
||||
if examples := getattr(f, "_examples", []):
|
||||
|
||||
@functools.wraps(f)
|
||||
def test_function(self):
|
||||
for example_args, example_kwargs in examples:
|
||||
with self.subTest(*example_args, **example_kwargs):
|
||||
f(self, *example_args, **example_kwargs)
|
||||
|
||||
else:
|
||||
# If we have found no examples, we must skip the test. If @example
|
||||
# is applied after @given, it will re-wrap the test to remove the
|
||||
# skip decorator.
|
||||
test_function = unittest.skip(
|
||||
"Hypothesis required for property test with no " +
|
||||
"specified examples"
|
||||
)(f)
|
||||
|
||||
test_function._given = True
|
||||
return test_function
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def example(*args, **kwargs):
|
||||
if bool(args) == bool(kwargs):
|
||||
raise ValueError("Must specify exactly one of *args or **kwargs")
|
||||
|
||||
def decorator(f):
|
||||
base_func = getattr(f, "__wrapped__", f)
|
||||
if not hasattr(base_func, "_examples"):
|
||||
base_func._examples = []
|
||||
|
||||
base_func._examples.append((args, kwargs))
|
||||
|
||||
if getattr(f, "_given", False):
|
||||
# If the given decorator is below all the example decorators,
|
||||
# it would be erroneously skipped, so we need to re-wrap the new
|
||||
# base function.
|
||||
f = given()(base_func)
|
||||
|
||||
return f
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def assume(condition):
|
||||
if not condition:
|
||||
raise unittest.SkipTest("Unsatisfied assumption")
|
||||
return True
|
||||
|
||||
|
||||
def reject():
|
||||
assume(False)
|
||||
|
||||
|
||||
def register_random(*args, **kwargs):
|
||||
pass # pragma: no cover
|
||||
|
||||
|
||||
def settings(*args, **kwargs):
|
||||
return lambda f: f # pragma: nocover
|
||||
|
||||
|
||||
class HealthCheck(Enum):
|
||||
data_too_large = 1
|
||||
filter_too_much = 2
|
||||
too_slow = 3
|
||||
return_value = 5
|
||||
large_base_example = 7
|
||||
not_a_test_method = 8
|
||||
|
||||
@classmethod
|
||||
def all(cls):
|
||||
return list(cls)
|
||||
|
||||
|
||||
class Verbosity(Enum):
|
||||
quiet = 0
|
||||
normal = 1
|
||||
verbose = 2
|
||||
debug = 3
|
||||
|
||||
|
||||
class Phase(Enum):
|
||||
explicit = 0
|
||||
reuse = 1
|
||||
generate = 2
|
||||
target = 3
|
||||
shrink = 4
|
||||
explain = 5
|
43
Dependencies/Python/Lib/test/support/_hypothesis_stubs/_helpers.py
vendored
Normal file
43
Dependencies/Python/Lib/test/support/_hypothesis_stubs/_helpers.py
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
# Stub out only the subset of the interface that we actually use in our tests.
|
||||
class StubClass:
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.__stub_args = args
|
||||
self.__stub_kwargs = kwargs
|
||||
self.__repr = None
|
||||
|
||||
def _with_repr(self, new_repr):
|
||||
new_obj = self.__class__(*self.__stub_args, **self.__stub_kwargs)
|
||||
new_obj.__repr = new_repr
|
||||
return new_obj
|
||||
|
||||
def __repr__(self):
|
||||
if self.__repr is not None:
|
||||
return self.__repr
|
||||
|
||||
argstr = ", ".join(self.__stub_args)
|
||||
kwargstr = ", ".join(f"{kw}={val}" for kw, val in self.__stub_kwargs.items())
|
||||
|
||||
in_parens = argstr
|
||||
if kwargstr:
|
||||
in_parens += ", " + kwargstr
|
||||
|
||||
return f"{self.__class__.__qualname__}({in_parens})"
|
||||
|
||||
|
||||
def stub_factory(klass, name, *, with_repr=None, _seen={}):
|
||||
if (klass, name) not in _seen:
|
||||
|
||||
class Stub(klass):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__()
|
||||
self.__stub_args = args
|
||||
self.__stub_kwargs = kwargs
|
||||
|
||||
Stub.__name__ = name
|
||||
Stub.__qualname__ = name
|
||||
if with_repr is not None:
|
||||
Stub._repr = None
|
||||
|
||||
_seen.setdefault((klass, name, with_repr), Stub)
|
||||
|
||||
return _seen[(klass, name, with_repr)]
|
91
Dependencies/Python/Lib/test/support/_hypothesis_stubs/strategies.py
vendored
Normal file
91
Dependencies/Python/Lib/test/support/_hypothesis_stubs/strategies.py
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
import functools
|
||||
|
||||
from ._helpers import StubClass, stub_factory
|
||||
|
||||
|
||||
class StubStrategy(StubClass):
|
||||
def __make_trailing_repr(self, transformation_name, func):
|
||||
func_name = func.__name__ or repr(func)
|
||||
return f"{self!r}.{transformation_name}({func_name})"
|
||||
|
||||
def map(self, pack):
|
||||
return self._with_repr(self.__make_trailing_repr("map", pack))
|
||||
|
||||
def flatmap(self, expand):
|
||||
return self._with_repr(self.__make_trailing_repr("flatmap", expand))
|
||||
|
||||
def filter(self, condition):
|
||||
return self._with_repr(self.__make_trailing_repr("filter", condition))
|
||||
|
||||
def __or__(self, other):
|
||||
new_repr = f"one_of({self!r}, {other!r})"
|
||||
return self._with_repr(new_repr)
|
||||
|
||||
|
||||
_STRATEGIES = {
|
||||
"binary",
|
||||
"booleans",
|
||||
"builds",
|
||||
"characters",
|
||||
"complex_numbers",
|
||||
"composite",
|
||||
"data",
|
||||
"dates",
|
||||
"datetimes",
|
||||
"decimals",
|
||||
"deferred",
|
||||
"dictionaries",
|
||||
"emails",
|
||||
"fixed_dictionaries",
|
||||
"floats",
|
||||
"fractions",
|
||||
"from_regex",
|
||||
"from_type",
|
||||
"frozensets",
|
||||
"functions",
|
||||
"integers",
|
||||
"iterables",
|
||||
"just",
|
||||
"lists",
|
||||
"none",
|
||||
"nothing",
|
||||
"one_of",
|
||||
"permutations",
|
||||
"random_module",
|
||||
"randoms",
|
||||
"recursive",
|
||||
"register_type_strategy",
|
||||
"runner",
|
||||
"sampled_from",
|
||||
"sets",
|
||||
"shared",
|
||||
"slices",
|
||||
"timedeltas",
|
||||
"times",
|
||||
"text",
|
||||
"tuples",
|
||||
"uuids",
|
||||
}
|
||||
|
||||
__all__ = sorted(_STRATEGIES)
|
||||
|
||||
|
||||
def composite(f):
|
||||
strategy = stub_factory(StubStrategy, f.__name__)
|
||||
|
||||
@functools.wraps(f)
|
||||
def inner(*args, **kwargs):
|
||||
return strategy(*args, **kwargs)
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
if name not in _STRATEGIES:
|
||||
raise AttributeError(f"Unknown attribute {name}")
|
||||
|
||||
return stub_factory(StubStrategy, f"hypothesis.strategies.{name}")
|
||||
|
||||
|
||||
def __dir__():
|
||||
return __all__
|
43
Dependencies/Python/Lib/test/support/ast_helper.py
vendored
Normal file
43
Dependencies/Python/Lib/test/support/ast_helper.py
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
import ast
|
||||
|
||||
class ASTTestMixin:
|
||||
"""Test mixing to have basic assertions for AST nodes."""
|
||||
|
||||
def assertASTEqual(self, ast1, ast2):
|
||||
# Ensure the comparisons start at an AST node
|
||||
self.assertIsInstance(ast1, ast.AST)
|
||||
self.assertIsInstance(ast2, ast.AST)
|
||||
|
||||
# An AST comparison routine modeled after ast.dump(), but
|
||||
# instead of string building, it traverses the two trees
|
||||
# in lock-step.
|
||||
def traverse_compare(a, b, missing=object()):
|
||||
if type(a) is not type(b):
|
||||
self.fail(f"{type(a)!r} is not {type(b)!r}")
|
||||
if isinstance(a, ast.AST):
|
||||
for field in a._fields:
|
||||
value1 = getattr(a, field, missing)
|
||||
value2 = getattr(b, field, missing)
|
||||
# Singletons are equal by definition, so further
|
||||
# testing can be skipped.
|
||||
if value1 is not value2:
|
||||
traverse_compare(value1, value2)
|
||||
elif isinstance(a, list):
|
||||
try:
|
||||
for node1, node2 in zip(a, b, strict=True):
|
||||
traverse_compare(node1, node2)
|
||||
except ValueError:
|
||||
# Attempt a "pretty" error ala assertSequenceEqual()
|
||||
len1 = len(a)
|
||||
len2 = len(b)
|
||||
if len1 > len2:
|
||||
what = "First"
|
||||
diff = len1 - len2
|
||||
else:
|
||||
what = "Second"
|
||||
diff = len2 - len1
|
||||
msg = f"{what} list contains {diff} additional elements."
|
||||
raise self.failureException(msg) from None
|
||||
elif a != b:
|
||||
self.fail(f"{a!r} != {b!r}")
|
||||
traverse_compare(ast1, ast2)
|
314
Dependencies/Python/Lib/test/support/asynchat.py
vendored
Normal file
314
Dependencies/Python/Lib/test/support/asynchat.py
vendored
Normal file
@ -0,0 +1,314 @@
|
||||
# TODO: This module was deprecated and removed from CPython 3.12
|
||||
# Now it is a test-only helper. Any attempts to rewrite exising tests that
|
||||
# are using this module and remove it completely are appreciated!
|
||||
# See: https://github.com/python/cpython/issues/72719
|
||||
|
||||
# -*- Mode: Python; tab-width: 4 -*-
|
||||
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
|
||||
# Author: Sam Rushing <rushing@nightmare.com>
|
||||
|
||||
# ======================================================================
|
||||
# Copyright 1996 by Sam Rushing
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and
|
||||
# its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of Sam
|
||||
# Rushing not be used in advertising or publicity pertaining to
|
||||
# distribution of the software without specific, written prior
|
||||
# permission.
|
||||
#
|
||||
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
||||
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
# ======================================================================
|
||||
|
||||
r"""A class supporting chat-style (command/response) protocols.
|
||||
|
||||
This class adds support for 'chat' style protocols - where one side
|
||||
sends a 'command', and the other sends a response (examples would be
|
||||
the common internet protocols - smtp, nntp, ftp, etc..).
|
||||
|
||||
The handle_read() method looks at the input stream for the current
|
||||
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
|
||||
for multi-line output), calling self.found_terminator() on its
|
||||
receipt.
|
||||
|
||||
for example:
|
||||
Say you build an async nntp client using this class. At the start
|
||||
of the connection, you'll have self.terminator set to '\r\n', in
|
||||
order to process the single-line greeting. Just before issuing a
|
||||
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
|
||||
command will be accumulated (using your own 'collect_incoming_data'
|
||||
method) up to the terminator, and then control will be returned to
|
||||
you - by calling your self.found_terminator() method.
|
||||
"""
|
||||
|
||||
from collections import deque
|
||||
|
||||
from test.support import asyncore
|
||||
|
||||
|
||||
class async_chat(asyncore.dispatcher):
|
||||
"""This is an abstract class. You must derive from this class, and add
|
||||
the two methods collect_incoming_data() and found_terminator()"""
|
||||
|
||||
# these are overridable defaults
|
||||
|
||||
ac_in_buffer_size = 65536
|
||||
ac_out_buffer_size = 65536
|
||||
|
||||
# we don't want to enable the use of encoding by default, because that is a
|
||||
# sign of an application bug that we don't want to pass silently
|
||||
|
||||
use_encoding = 0
|
||||
encoding = 'latin-1'
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
# for string terminator matching
|
||||
self.ac_in_buffer = b''
|
||||
|
||||
# we use a list here rather than io.BytesIO for a few reasons...
|
||||
# del lst[:] is faster than bio.truncate(0)
|
||||
# lst = [] is faster than bio.truncate(0)
|
||||
self.incoming = []
|
||||
|
||||
# we toss the use of the "simple producer" and replace it with
|
||||
# a pure deque, which the original fifo was a wrapping of
|
||||
self.producer_fifo = deque()
|
||||
asyncore.dispatcher.__init__(self, sock, map)
|
||||
|
||||
def collect_incoming_data(self, data):
|
||||
raise NotImplementedError("must be implemented in subclass")
|
||||
|
||||
def _collect_incoming_data(self, data):
|
||||
self.incoming.append(data)
|
||||
|
||||
def _get_data(self):
|
||||
d = b''.join(self.incoming)
|
||||
del self.incoming[:]
|
||||
return d
|
||||
|
||||
def found_terminator(self):
|
||||
raise NotImplementedError("must be implemented in subclass")
|
||||
|
||||
def set_terminator(self, term):
|
||||
"""Set the input delimiter.
|
||||
|
||||
Can be a fixed string of any length, an integer, or None.
|
||||
"""
|
||||
if isinstance(term, str) and self.use_encoding:
|
||||
term = bytes(term, self.encoding)
|
||||
elif isinstance(term, int) and term < 0:
|
||||
raise ValueError('the number of received bytes must be positive')
|
||||
self.terminator = term
|
||||
|
||||
def get_terminator(self):
|
||||
return self.terminator
|
||||
|
||||
# grab some more data from the socket,
|
||||
# throw it to the collector method,
|
||||
# check for the terminator,
|
||||
# if found, transition to the next state.
|
||||
|
||||
def handle_read(self):
|
||||
|
||||
try:
|
||||
data = self.recv(self.ac_in_buffer_size)
|
||||
except BlockingIOError:
|
||||
return
|
||||
except OSError:
|
||||
self.handle_error()
|
||||
return
|
||||
|
||||
if isinstance(data, str) and self.use_encoding:
|
||||
data = bytes(str, self.encoding)
|
||||
self.ac_in_buffer = self.ac_in_buffer + data
|
||||
|
||||
# Continue to search for self.terminator in self.ac_in_buffer,
|
||||
# while calling self.collect_incoming_data. The while loop
|
||||
# is necessary because we might read several data+terminator
|
||||
# combos with a single recv(4096).
|
||||
|
||||
while self.ac_in_buffer:
|
||||
lb = len(self.ac_in_buffer)
|
||||
terminator = self.get_terminator()
|
||||
if not terminator:
|
||||
# no terminator, collect it all
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
elif isinstance(terminator, int):
|
||||
# numeric terminator
|
||||
n = terminator
|
||||
if lb < n:
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
self.terminator = self.terminator - lb
|
||||
else:
|
||||
self.collect_incoming_data(self.ac_in_buffer[:n])
|
||||
self.ac_in_buffer = self.ac_in_buffer[n:]
|
||||
self.terminator = 0
|
||||
self.found_terminator()
|
||||
else:
|
||||
# 3 cases:
|
||||
# 1) end of buffer matches terminator exactly:
|
||||
# collect data, transition
|
||||
# 2) end of buffer matches some prefix:
|
||||
# collect data to the prefix
|
||||
# 3) end of buffer does not match any prefix:
|
||||
# collect data
|
||||
terminator_len = len(terminator)
|
||||
index = self.ac_in_buffer.find(terminator)
|
||||
if index != -1:
|
||||
# we found the terminator
|
||||
if index > 0:
|
||||
# don't bother reporting the empty string
|
||||
# (source of subtle bugs)
|
||||
self.collect_incoming_data(self.ac_in_buffer[:index])
|
||||
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
|
||||
# This does the Right Thing if the terminator
|
||||
# is changed here.
|
||||
self.found_terminator()
|
||||
else:
|
||||
# check for a prefix of the terminator
|
||||
index = find_prefix_at_end(self.ac_in_buffer, terminator)
|
||||
if index:
|
||||
if index != lb:
|
||||
# we found a prefix, collect up to the prefix
|
||||
self.collect_incoming_data(self.ac_in_buffer[:-index])
|
||||
self.ac_in_buffer = self.ac_in_buffer[-index:]
|
||||
break
|
||||
else:
|
||||
# no prefix, collect it all
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
|
||||
def handle_write(self):
|
||||
self.initiate_send()
|
||||
|
||||
def handle_close(self):
|
||||
self.close()
|
||||
|
||||
def push(self, data):
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError('data argument must be byte-ish (%r)',
|
||||
type(data))
|
||||
sabs = self.ac_out_buffer_size
|
||||
if len(data) > sabs:
|
||||
for i in range(0, len(data), sabs):
|
||||
self.producer_fifo.append(data[i:i+sabs])
|
||||
else:
|
||||
self.producer_fifo.append(data)
|
||||
self.initiate_send()
|
||||
|
||||
def push_with_producer(self, producer):
|
||||
self.producer_fifo.append(producer)
|
||||
self.initiate_send()
|
||||
|
||||
def readable(self):
|
||||
"predicate for inclusion in the readable for select()"
|
||||
# cannot use the old predicate, it violates the claim of the
|
||||
# set_terminator method.
|
||||
|
||||
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
|
||||
return 1
|
||||
|
||||
def writable(self):
|
||||
"predicate for inclusion in the writable for select()"
|
||||
return self.producer_fifo or (not self.connected)
|
||||
|
||||
def close_when_done(self):
|
||||
"automatically close this channel once the outgoing queue is empty"
|
||||
self.producer_fifo.append(None)
|
||||
|
||||
def initiate_send(self):
|
||||
while self.producer_fifo and self.connected:
|
||||
first = self.producer_fifo[0]
|
||||
# handle empty string/buffer or None entry
|
||||
if not first:
|
||||
del self.producer_fifo[0]
|
||||
if first is None:
|
||||
self.handle_close()
|
||||
return
|
||||
|
||||
# handle classic producer behavior
|
||||
obs = self.ac_out_buffer_size
|
||||
try:
|
||||
data = first[:obs]
|
||||
except TypeError:
|
||||
data = first.more()
|
||||
if data:
|
||||
self.producer_fifo.appendleft(data)
|
||||
else:
|
||||
del self.producer_fifo[0]
|
||||
continue
|
||||
|
||||
if isinstance(data, str) and self.use_encoding:
|
||||
data = bytes(data, self.encoding)
|
||||
|
||||
# send the data
|
||||
try:
|
||||
num_sent = self.send(data)
|
||||
except OSError:
|
||||
self.handle_error()
|
||||
return
|
||||
|
||||
if num_sent:
|
||||
if num_sent < len(data) or obs < len(first):
|
||||
self.producer_fifo[0] = first[num_sent:]
|
||||
else:
|
||||
del self.producer_fifo[0]
|
||||
# we tried to send some actual data
|
||||
return
|
||||
|
||||
def discard_buffers(self):
|
||||
# Emergencies only!
|
||||
self.ac_in_buffer = b''
|
||||
del self.incoming[:]
|
||||
self.producer_fifo.clear()
|
||||
|
||||
|
||||
class simple_producer:
|
||||
|
||||
def __init__(self, data, buffer_size=512):
|
||||
self.data = data
|
||||
self.buffer_size = buffer_size
|
||||
|
||||
def more(self):
|
||||
if len(self.data) > self.buffer_size:
|
||||
result = self.data[:self.buffer_size]
|
||||
self.data = self.data[self.buffer_size:]
|
||||
return result
|
||||
else:
|
||||
result = self.data
|
||||
self.data = b''
|
||||
return result
|
||||
|
||||
|
||||
# Given 'haystack', see if any prefix of 'needle' is at its end. This
|
||||
# assumes an exact match has already been checked. Return the number of
|
||||
# characters matched.
|
||||
# for example:
|
||||
# f_p_a_e("qwerty\r", "\r\n") => 1
|
||||
# f_p_a_e("qwertydkjf", "\r\n") => 0
|
||||
# f_p_a_e("qwerty\r\n", "\r\n") => <undefined>
|
||||
|
||||
# this could maybe be made faster with a computed regex?
|
||||
# [answer: no; circa Python-2.0, Jan 2001]
|
||||
# new python: 28961/s
|
||||
# old python: 18307/s
|
||||
# re: 12820/s
|
||||
# regex: 14035/s
|
||||
|
||||
def find_prefix_at_end(haystack, needle):
|
||||
l = len(needle) - 1
|
||||
while l and not haystack.endswith(needle[:l]):
|
||||
l -= 1
|
||||
return l
|
649
Dependencies/Python/Lib/test/support/asyncore.py
vendored
Normal file
649
Dependencies/Python/Lib/test/support/asyncore.py
vendored
Normal file
@ -0,0 +1,649 @@
|
||||
# TODO: This module was deprecated and removed from CPython 3.12
|
||||
# Now it is a test-only helper. Any attempts to rewrite exising tests that
|
||||
# are using this module and remove it completely are appreciated!
|
||||
# See: https://github.com/python/cpython/issues/72719
|
||||
|
||||
# -*- Mode: Python -*-
|
||||
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
|
||||
# Author: Sam Rushing <rushing@nightmare.com>
|
||||
|
||||
# ======================================================================
|
||||
# Copyright 1996 by Sam Rushing
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and
|
||||
# its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of Sam
|
||||
# Rushing not be used in advertising or publicity pertaining to
|
||||
# distribution of the software without specific, written prior
|
||||
# permission.
|
||||
#
|
||||
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
||||
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
# ======================================================================
|
||||
|
||||
"""Basic infrastructure for asynchronous socket service clients and servers.
|
||||
|
||||
There are only two ways to have a program on a single processor do "more
|
||||
than one thing at a time". Multi-threaded programming is the simplest and
|
||||
most popular way to do it, but there is another very different technique,
|
||||
that lets you have nearly all the advantages of multi-threading, without
|
||||
actually using multiple threads. it's really only practical if your program
|
||||
is largely I/O bound. If your program is CPU bound, then pre-emptive
|
||||
scheduled threads are probably what you really need. Network servers are
|
||||
rarely CPU-bound, however.
|
||||
|
||||
If your operating system supports the select() system call in its I/O
|
||||
library (and nearly all do), then you can use it to juggle multiple
|
||||
communication channels at once; doing other work while your I/O is taking
|
||||
place in the "background." Although this strategy can seem strange and
|
||||
complex, especially at first, it is in many ways easier to understand and
|
||||
control than multi-threaded programming. The module documented here solves
|
||||
many of the difficult problems for you, making the task of building
|
||||
sophisticated high-performance network servers and clients a snap.
|
||||
"""
|
||||
|
||||
import select
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
import os
|
||||
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
|
||||
ENOTCONN, ESHUTDOWN, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
|
||||
errorcode
|
||||
|
||||
|
||||
_DISCONNECTED = frozenset({ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
|
||||
EBADF})
|
||||
|
||||
try:
|
||||
socket_map
|
||||
except NameError:
|
||||
socket_map = {}
|
||||
|
||||
def _strerror(err):
|
||||
try:
|
||||
return os.strerror(err)
|
||||
except (ValueError, OverflowError, NameError):
|
||||
if err in errorcode:
|
||||
return errorcode[err]
|
||||
return "Unknown error %s" %err
|
||||
|
||||
class ExitNow(Exception):
|
||||
pass
|
||||
|
||||
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
|
||||
|
||||
def read(obj):
|
||||
try:
|
||||
obj.handle_read_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def write(obj):
|
||||
try:
|
||||
obj.handle_write_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def _exception(obj):
|
||||
try:
|
||||
obj.handle_expt_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def readwrite(obj, flags):
|
||||
try:
|
||||
if flags & select.POLLIN:
|
||||
obj.handle_read_event()
|
||||
if flags & select.POLLOUT:
|
||||
obj.handle_write_event()
|
||||
if flags & select.POLLPRI:
|
||||
obj.handle_expt_event()
|
||||
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
|
||||
obj.handle_close()
|
||||
except OSError as e:
|
||||
if e.errno not in _DISCONNECTED:
|
||||
obj.handle_error()
|
||||
else:
|
||||
obj.handle_close()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def poll(timeout=0.0, map=None):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
if map:
|
||||
r = []; w = []; e = []
|
||||
for fd, obj in list(map.items()):
|
||||
is_r = obj.readable()
|
||||
is_w = obj.writable()
|
||||
if is_r:
|
||||
r.append(fd)
|
||||
# accepting sockets should not be writable
|
||||
if is_w and not obj.accepting:
|
||||
w.append(fd)
|
||||
if is_r or is_w:
|
||||
e.append(fd)
|
||||
if [] == r == w == e:
|
||||
time.sleep(timeout)
|
||||
return
|
||||
|
||||
r, w, e = select.select(r, w, e, timeout)
|
||||
|
||||
for fd in r:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
read(obj)
|
||||
|
||||
for fd in w:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
write(obj)
|
||||
|
||||
for fd in e:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
_exception(obj)
|
||||
|
||||
def poll2(timeout=0.0, map=None):
|
||||
# Use the poll() support added to the select module in Python 2.0
|
||||
if map is None:
|
||||
map = socket_map
|
||||
if timeout is not None:
|
||||
# timeout is in milliseconds
|
||||
timeout = int(timeout*1000)
|
||||
pollster = select.poll()
|
||||
if map:
|
||||
for fd, obj in list(map.items()):
|
||||
flags = 0
|
||||
if obj.readable():
|
||||
flags |= select.POLLIN | select.POLLPRI
|
||||
# accepting sockets should not be writable
|
||||
if obj.writable() and not obj.accepting:
|
||||
flags |= select.POLLOUT
|
||||
if flags:
|
||||
pollster.register(fd, flags)
|
||||
|
||||
r = pollster.poll(timeout)
|
||||
for fd, flags in r:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
readwrite(obj, flags)
|
||||
|
||||
poll3 = poll2 # Alias for backward compatibility
|
||||
|
||||
def loop(timeout=30.0, use_poll=False, map=None, count=None):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
|
||||
if use_poll and hasattr(select, 'poll'):
|
||||
poll_fun = poll2
|
||||
else:
|
||||
poll_fun = poll
|
||||
|
||||
if count is None:
|
||||
while map:
|
||||
poll_fun(timeout, map)
|
||||
|
||||
else:
|
||||
while map and count > 0:
|
||||
poll_fun(timeout, map)
|
||||
count = count - 1
|
||||
|
||||
class dispatcher:
|
||||
|
||||
debug = False
|
||||
connected = False
|
||||
accepting = False
|
||||
connecting = False
|
||||
closing = False
|
||||
addr = None
|
||||
ignore_log_types = frozenset({'warning'})
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
if map is None:
|
||||
self._map = socket_map
|
||||
else:
|
||||
self._map = map
|
||||
|
||||
self._fileno = None
|
||||
|
||||
if sock:
|
||||
# Set to nonblocking just to make sure for cases where we
|
||||
# get a socket from a blocking source.
|
||||
sock.setblocking(False)
|
||||
self.set_socket(sock, map)
|
||||
self.connected = True
|
||||
# The constructor no longer requires that the socket
|
||||
# passed be connected.
|
||||
try:
|
||||
self.addr = sock.getpeername()
|
||||
except OSError as err:
|
||||
if err.errno in (ENOTCONN, EINVAL):
|
||||
# To handle the case where we got an unconnected
|
||||
# socket.
|
||||
self.connected = False
|
||||
else:
|
||||
# The socket is broken in some unknown way, alert
|
||||
# the user and remove it from the map (to prevent
|
||||
# polling of broken sockets).
|
||||
self.del_channel(map)
|
||||
raise
|
||||
else:
|
||||
self.socket = None
|
||||
|
||||
def __repr__(self):
|
||||
status = [self.__class__.__module__+"."+self.__class__.__qualname__]
|
||||
if self.accepting and self.addr:
|
||||
status.append('listening')
|
||||
elif self.connected:
|
||||
status.append('connected')
|
||||
if self.addr is not None:
|
||||
try:
|
||||
status.append('%s:%d' % self.addr)
|
||||
except TypeError:
|
||||
status.append(repr(self.addr))
|
||||
return '<%s at %#x>' % (' '.join(status), id(self))
|
||||
|
||||
def add_channel(self, map=None):
|
||||
#self.log_info('adding channel %s' % self)
|
||||
if map is None:
|
||||
map = self._map
|
||||
map[self._fileno] = self
|
||||
|
||||
def del_channel(self, map=None):
|
||||
fd = self._fileno
|
||||
if map is None:
|
||||
map = self._map
|
||||
if fd in map:
|
||||
#self.log_info('closing channel %d:%s' % (fd, self))
|
||||
del map[fd]
|
||||
self._fileno = None
|
||||
|
||||
def create_socket(self, family=socket.AF_INET, type=socket.SOCK_STREAM):
|
||||
self.family_and_type = family, type
|
||||
sock = socket.socket(family, type)
|
||||
sock.setblocking(False)
|
||||
self.set_socket(sock)
|
||||
|
||||
def set_socket(self, sock, map=None):
|
||||
self.socket = sock
|
||||
self._fileno = sock.fileno()
|
||||
self.add_channel(map)
|
||||
|
||||
def set_reuse_addr(self):
|
||||
# try to re-use a server port if possible
|
||||
try:
|
||||
self.socket.setsockopt(
|
||||
socket.SOL_SOCKET, socket.SO_REUSEADDR,
|
||||
self.socket.getsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_REUSEADDR) | 1
|
||||
)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# ==================================================
|
||||
# predicates for select()
|
||||
# these are used as filters for the lists of sockets
|
||||
# to pass to select().
|
||||
# ==================================================
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def writable(self):
|
||||
return True
|
||||
|
||||
# ==================================================
|
||||
# socket object methods.
|
||||
# ==================================================
|
||||
|
||||
def listen(self, num):
|
||||
self.accepting = True
|
||||
if os.name == 'nt' and num > 5:
|
||||
num = 5
|
||||
return self.socket.listen(num)
|
||||
|
||||
def bind(self, addr):
|
||||
self.addr = addr
|
||||
return self.socket.bind(addr)
|
||||
|
||||
def connect(self, address):
|
||||
self.connected = False
|
||||
self.connecting = True
|
||||
err = self.socket.connect_ex(address)
|
||||
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
|
||||
or err == EINVAL and os.name == 'nt':
|
||||
self.addr = address
|
||||
return
|
||||
if err in (0, EISCONN):
|
||||
self.addr = address
|
||||
self.handle_connect_event()
|
||||
else:
|
||||
raise OSError(err, errorcode[err])
|
||||
|
||||
def accept(self):
|
||||
# XXX can return either an address pair or None
|
||||
try:
|
||||
conn, addr = self.socket.accept()
|
||||
except TypeError:
|
||||
return None
|
||||
except OSError as why:
|
||||
if why.errno in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
|
||||
return None
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
return conn, addr
|
||||
|
||||
def send(self, data):
|
||||
try:
|
||||
result = self.socket.send(data)
|
||||
return result
|
||||
except OSError as why:
|
||||
if why.errno == EWOULDBLOCK:
|
||||
return 0
|
||||
elif why.errno in _DISCONNECTED:
|
||||
self.handle_close()
|
||||
return 0
|
||||
else:
|
||||
raise
|
||||
|
||||
def recv(self, buffer_size):
|
||||
try:
|
||||
data = self.socket.recv(buffer_size)
|
||||
if not data:
|
||||
# a closed connection is indicated by signaling
|
||||
# a read condition, and having recv() return 0.
|
||||
self.handle_close()
|
||||
return b''
|
||||
else:
|
||||
return data
|
||||
except OSError as why:
|
||||
# winsock sometimes raises ENOTCONN
|
||||
if why.errno in _DISCONNECTED:
|
||||
self.handle_close()
|
||||
return b''
|
||||
else:
|
||||
raise
|
||||
|
||||
def close(self):
|
||||
self.connected = False
|
||||
self.accepting = False
|
||||
self.connecting = False
|
||||
self.del_channel()
|
||||
if self.socket is not None:
|
||||
try:
|
||||
self.socket.close()
|
||||
except OSError as why:
|
||||
if why.errno not in (ENOTCONN, EBADF):
|
||||
raise
|
||||
|
||||
# log and log_info may be overridden to provide more sophisticated
|
||||
# logging and warning methods. In general, log is for 'hit' logging
|
||||
# and 'log_info' is for informational, warning and error logging.
|
||||
|
||||
def log(self, message):
|
||||
sys.stderr.write('log: %s\n' % str(message))
|
||||
|
||||
def log_info(self, message, type='info'):
|
||||
if type not in self.ignore_log_types:
|
||||
print('%s: %s' % (type, message))
|
||||
|
||||
def handle_read_event(self):
|
||||
if self.accepting:
|
||||
# accepting sockets are never connected, they "spawn" new
|
||||
# sockets that are connected
|
||||
self.handle_accept()
|
||||
elif not self.connected:
|
||||
if self.connecting:
|
||||
self.handle_connect_event()
|
||||
self.handle_read()
|
||||
else:
|
||||
self.handle_read()
|
||||
|
||||
def handle_connect_event(self):
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
raise OSError(err, _strerror(err))
|
||||
self.handle_connect()
|
||||
self.connected = True
|
||||
self.connecting = False
|
||||
|
||||
def handle_write_event(self):
|
||||
if self.accepting:
|
||||
# Accepting sockets shouldn't get a write event.
|
||||
# We will pretend it didn't happen.
|
||||
return
|
||||
|
||||
if not self.connected:
|
||||
if self.connecting:
|
||||
self.handle_connect_event()
|
||||
self.handle_write()
|
||||
|
||||
def handle_expt_event(self):
|
||||
# handle_expt_event() is called if there might be an error on the
|
||||
# socket, or if there is OOB data
|
||||
# check for the error condition first
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
# we can get here when select.select() says that there is an
|
||||
# exceptional condition on the socket
|
||||
# since there is an error, we'll go ahead and close the socket
|
||||
# like we would in a subclassed handle_read() that received no
|
||||
# data
|
||||
self.handle_close()
|
||||
else:
|
||||
self.handle_expt()
|
||||
|
||||
def handle_error(self):
|
||||
nil, t, v, tbinfo = compact_traceback()
|
||||
|
||||
# sometimes a user repr method will crash.
|
||||
try:
|
||||
self_repr = repr(self)
|
||||
except:
|
||||
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
|
||||
|
||||
self.log_info(
|
||||
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
|
||||
self_repr,
|
||||
t,
|
||||
v,
|
||||
tbinfo
|
||||
),
|
||||
'error'
|
||||
)
|
||||
self.handle_close()
|
||||
|
||||
def handle_expt(self):
|
||||
self.log_info('unhandled incoming priority event', 'warning')
|
||||
|
||||
def handle_read(self):
|
||||
self.log_info('unhandled read event', 'warning')
|
||||
|
||||
def handle_write(self):
|
||||
self.log_info('unhandled write event', 'warning')
|
||||
|
||||
def handle_connect(self):
|
||||
self.log_info('unhandled connect event', 'warning')
|
||||
|
||||
def handle_accept(self):
|
||||
pair = self.accept()
|
||||
if pair is not None:
|
||||
self.handle_accepted(*pair)
|
||||
|
||||
def handle_accepted(self, sock, addr):
|
||||
sock.close()
|
||||
self.log_info('unhandled accepted event', 'warning')
|
||||
|
||||
def handle_close(self):
|
||||
self.log_info('unhandled close event', 'warning')
|
||||
self.close()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# adds simple buffered output capability, useful for simple clients.
|
||||
# [for more sophisticated usage use asynchat.async_chat]
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class dispatcher_with_send(dispatcher):
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
dispatcher.__init__(self, sock, map)
|
||||
self.out_buffer = b''
|
||||
|
||||
def initiate_send(self):
|
||||
num_sent = 0
|
||||
num_sent = dispatcher.send(self, self.out_buffer[:65536])
|
||||
self.out_buffer = self.out_buffer[num_sent:]
|
||||
|
||||
def handle_write(self):
|
||||
self.initiate_send()
|
||||
|
||||
def writable(self):
|
||||
return (not self.connected) or len(self.out_buffer)
|
||||
|
||||
def send(self, data):
|
||||
if self.debug:
|
||||
self.log_info('sending %s' % repr(data))
|
||||
self.out_buffer = self.out_buffer + data
|
||||
self.initiate_send()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# used for debugging.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def compact_traceback():
|
||||
exc = sys.exception()
|
||||
tb = exc.__traceback__
|
||||
if not tb: # Must have a traceback
|
||||
raise AssertionError("traceback does not exist")
|
||||
tbinfo = []
|
||||
while tb:
|
||||
tbinfo.append((
|
||||
tb.tb_frame.f_code.co_filename,
|
||||
tb.tb_frame.f_code.co_name,
|
||||
str(tb.tb_lineno)
|
||||
))
|
||||
tb = tb.tb_next
|
||||
|
||||
# just to be safe
|
||||
del tb
|
||||
|
||||
file, function, line = tbinfo[-1]
|
||||
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
|
||||
return (file, function, line), type(exc), exc, info
|
||||
|
||||
def close_all(map=None, ignore_all=False):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
for x in list(map.values()):
|
||||
try:
|
||||
x.close()
|
||||
except OSError as x:
|
||||
if x.errno == EBADF:
|
||||
pass
|
||||
elif not ignore_all:
|
||||
raise
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
if not ignore_all:
|
||||
raise
|
||||
map.clear()
|
||||
|
||||
# Asynchronous File I/O:
|
||||
#
|
||||
# After a little research (reading man pages on various unixen, and
|
||||
# digging through the linux kernel), I've determined that select()
|
||||
# isn't meant for doing asynchronous file i/o.
|
||||
# Heartening, though - reading linux/mm/filemap.c shows that linux
|
||||
# supports asynchronous read-ahead. So _MOST_ of the time, the data
|
||||
# will be sitting in memory for us already when we go to read it.
|
||||
#
|
||||
# What other OS's (besides NT) support async file i/o? [VMS?]
|
||||
#
|
||||
# Regardless, this is useful for pipes, and stdin/stdout...
|
||||
|
||||
if os.name == 'posix':
|
||||
class file_wrapper:
|
||||
# Here we override just enough to make a file
|
||||
# look like a socket for the purposes of asyncore.
|
||||
# The passed fd is automatically os.dup()'d
|
||||
|
||||
def __init__(self, fd):
|
||||
self.fd = os.dup(fd)
|
||||
|
||||
def __del__(self):
|
||||
if self.fd >= 0:
|
||||
warnings.warn("unclosed file %r" % self, ResourceWarning,
|
||||
source=self)
|
||||
self.close()
|
||||
|
||||
def recv(self, *args):
|
||||
return os.read(self.fd, *args)
|
||||
|
||||
def send(self, *args):
|
||||
return os.write(self.fd, *args)
|
||||
|
||||
def getsockopt(self, level, optname, buflen=None):
|
||||
if (level == socket.SOL_SOCKET and
|
||||
optname == socket.SO_ERROR and
|
||||
not buflen):
|
||||
return 0
|
||||
raise NotImplementedError("Only asyncore specific behaviour "
|
||||
"implemented.")
|
||||
|
||||
read = recv
|
||||
write = send
|
||||
|
||||
def close(self):
|
||||
if self.fd < 0:
|
||||
return
|
||||
fd = self.fd
|
||||
self.fd = -1
|
||||
os.close(fd)
|
||||
|
||||
def fileno(self):
|
||||
return self.fd
|
||||
|
||||
class file_dispatcher(dispatcher):
|
||||
|
||||
def __init__(self, fd, map=None):
|
||||
dispatcher.__init__(self, None, map)
|
||||
self.connected = True
|
||||
try:
|
||||
fd = fd.fileno()
|
||||
except AttributeError:
|
||||
pass
|
||||
self.set_file(fd)
|
||||
# set it to non-blocking mode
|
||||
os.set_blocking(fd, False)
|
||||
|
||||
def set_file(self, fd):
|
||||
self.socket = file_wrapper(fd)
|
||||
self._fileno = self.socket.fileno()
|
||||
self.add_channel()
|
158
Dependencies/Python/Lib/test/support/bytecode_helper.py
vendored
Normal file
158
Dependencies/Python/Lib/test/support/bytecode_helper.py
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
"""bytecode_helper - support tools for testing correct bytecode generation"""
|
||||
|
||||
import unittest
|
||||
import dis
|
||||
import io
|
||||
import opcode
|
||||
try:
|
||||
import _testinternalcapi
|
||||
except ImportError:
|
||||
_testinternalcapi = None
|
||||
|
||||
_UNSPECIFIED = object()
|
||||
|
||||
def instructions_with_positions(instrs, co_positions):
|
||||
# Return (instr, positions) pairs from the instrs list and co_positions
|
||||
# iterator. The latter contains items for cache lines and the former
|
||||
# doesn't, so those need to be skipped.
|
||||
|
||||
co_positions = co_positions or iter(())
|
||||
for instr in instrs:
|
||||
yield instr, next(co_positions, ())
|
||||
for _, size, _ in (instr.cache_info or ()):
|
||||
for i in range(size):
|
||||
next(co_positions, ())
|
||||
|
||||
class BytecodeTestCase(unittest.TestCase):
|
||||
"""Custom assertion methods for inspecting bytecode."""
|
||||
|
||||
def get_disassembly_as_string(self, co):
|
||||
s = io.StringIO()
|
||||
dis.dis(co, file=s)
|
||||
return s.getvalue()
|
||||
|
||||
def assertInBytecode(self, x, opname, argval=_UNSPECIFIED):
|
||||
"""Returns instr if opname is found, otherwise throws AssertionError"""
|
||||
self.assertIn(opname, dis.opmap)
|
||||
for instr in dis.get_instructions(x):
|
||||
if instr.opname == opname:
|
||||
if argval is _UNSPECIFIED or instr.argval == argval:
|
||||
return instr
|
||||
disassembly = self.get_disassembly_as_string(x)
|
||||
if argval is _UNSPECIFIED:
|
||||
msg = '%s not found in bytecode:\n%s' % (opname, disassembly)
|
||||
else:
|
||||
msg = '(%s,%r) not found in bytecode:\n%s'
|
||||
msg = msg % (opname, argval, disassembly)
|
||||
self.fail(msg)
|
||||
|
||||
def assertNotInBytecode(self, x, opname, argval=_UNSPECIFIED):
|
||||
"""Throws AssertionError if opname is found"""
|
||||
self.assertIn(opname, dis.opmap)
|
||||
for instr in dis.get_instructions(x):
|
||||
if instr.opname == opname:
|
||||
disassembly = self.get_disassembly_as_string(x)
|
||||
if argval is _UNSPECIFIED:
|
||||
msg = '%s occurs in bytecode:\n%s' % (opname, disassembly)
|
||||
self.fail(msg)
|
||||
elif instr.argval == argval:
|
||||
msg = '(%s,%r) occurs in bytecode:\n%s'
|
||||
msg = msg % (opname, argval, disassembly)
|
||||
self.fail(msg)
|
||||
|
||||
class CompilationStepTestCase(unittest.TestCase):
|
||||
|
||||
HAS_ARG = set(dis.hasarg)
|
||||
HAS_TARGET = set(dis.hasjrel + dis.hasjabs + dis.hasexc)
|
||||
HAS_ARG_OR_TARGET = HAS_ARG.union(HAS_TARGET)
|
||||
|
||||
class Label:
|
||||
pass
|
||||
|
||||
def assertInstructionsMatch(self, actual_seq, expected):
|
||||
# get an InstructionSequence and an expected list, where each
|
||||
# entry is a label or an instruction tuple. Construct an expcted
|
||||
# instruction sequence and compare with the one given.
|
||||
|
||||
self.assertIsInstance(expected, list)
|
||||
actual = actual_seq.get_instructions()
|
||||
expected = self.seq_from_insts(expected).get_instructions()
|
||||
self.assertEqual(len(actual), len(expected))
|
||||
|
||||
# compare instructions
|
||||
for act, exp in zip(actual, expected):
|
||||
if isinstance(act, int):
|
||||
self.assertEqual(exp, act)
|
||||
continue
|
||||
self.assertIsInstance(exp, tuple)
|
||||
self.assertIsInstance(act, tuple)
|
||||
idx = max([p[0] for p in enumerate(exp) if p[1] != -1])
|
||||
self.assertEqual(exp[:idx], act[:idx])
|
||||
|
||||
def resolveAndRemoveLabels(self, insts):
|
||||
idx = 0
|
||||
res = []
|
||||
for item in insts:
|
||||
assert isinstance(item, (self.Label, tuple))
|
||||
if isinstance(item, self.Label):
|
||||
item.value = idx
|
||||
else:
|
||||
idx += 1
|
||||
res.append(item)
|
||||
|
||||
return res
|
||||
|
||||
def seq_from_insts(self, insts):
|
||||
labels = {item for item in insts if isinstance(item, self.Label)}
|
||||
for i, lbl in enumerate(labels):
|
||||
lbl.value = i
|
||||
|
||||
seq = _testinternalcapi.new_instruction_sequence()
|
||||
for item in insts:
|
||||
if isinstance(item, self.Label):
|
||||
seq.use_label(item.value)
|
||||
else:
|
||||
op = item[0]
|
||||
if isinstance(op, str):
|
||||
op = opcode.opmap[op]
|
||||
arg, *loc = item[1:]
|
||||
if isinstance(arg, self.Label):
|
||||
arg = arg.value
|
||||
loc = loc + [-1] * (4 - len(loc))
|
||||
seq.addop(op, arg or 0, *loc)
|
||||
return seq
|
||||
|
||||
def check_instructions(self, insts):
|
||||
for inst in insts:
|
||||
if isinstance(inst, self.Label):
|
||||
continue
|
||||
op, arg, *loc = inst
|
||||
if isinstance(op, str):
|
||||
op = opcode.opmap[op]
|
||||
self.assertEqual(op in opcode.hasarg,
|
||||
arg is not None,
|
||||
f"{opcode.opname[op]=} {arg=}")
|
||||
self.assertTrue(all(isinstance(l, int) for l in loc))
|
||||
|
||||
|
||||
@unittest.skipIf(_testinternalcapi is None, "requires _testinternalcapi")
|
||||
class CodegenTestCase(CompilationStepTestCase):
|
||||
|
||||
def generate_code(self, ast):
|
||||
insts, _ = _testinternalcapi.compiler_codegen(ast, "my_file.py", 0)
|
||||
return insts
|
||||
|
||||
|
||||
@unittest.skipIf(_testinternalcapi is None, "requires _testinternalcapi")
|
||||
class CfgOptimizationTestCase(CompilationStepTestCase):
|
||||
|
||||
def get_optimized(self, seq, consts, nlocals=0):
|
||||
insts = _testinternalcapi.optimize_cfg(seq, consts, nlocals)
|
||||
return insts, consts
|
||||
|
||||
@unittest.skipIf(_testinternalcapi is None, "requires _testinternalcapi")
|
||||
class AssemblerTestCase(CompilationStepTestCase):
|
||||
|
||||
def get_code_object(self, filename, insts, metadata):
|
||||
co = _testinternalcapi.assemble_code_object(filename, insts, metadata)
|
||||
return co
|
51
Dependencies/Python/Lib/test/support/hashlib_helper.py
vendored
Normal file
51
Dependencies/Python/Lib/test/support/hashlib_helper.py
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
import functools
|
||||
import hashlib
|
||||
import unittest
|
||||
|
||||
try:
|
||||
import _hashlib
|
||||
except ImportError:
|
||||
_hashlib = None
|
||||
|
||||
|
||||
def requires_hashdigest(digestname, openssl=None, usedforsecurity=True):
|
||||
"""Decorator raising SkipTest if a hashing algorithm is not available
|
||||
|
||||
The hashing algorithm could be missing or blocked by a strict crypto
|
||||
policy.
|
||||
|
||||
If 'openssl' is True, then the decorator checks that OpenSSL provides
|
||||
the algorithm. Otherwise the check falls back to built-in
|
||||
implementations. The usedforsecurity flag is passed to the constructor.
|
||||
|
||||
ValueError: [digital envelope routines: EVP_DigestInit_ex] disabled for FIPS
|
||||
ValueError: unsupported hash type md4
|
||||
"""
|
||||
def decorator(func_or_class):
|
||||
if isinstance(func_or_class, type):
|
||||
setUpClass = func_or_class.__dict__.get('setUpClass')
|
||||
if setUpClass is None:
|
||||
def setUpClass(cls):
|
||||
super(func_or_class, cls).setUpClass()
|
||||
setUpClass.__qualname__ = func_or_class.__qualname__ + '.setUpClass'
|
||||
setUpClass.__module__ = func_or_class.__module__
|
||||
else:
|
||||
setUpClass = setUpClass.__func__
|
||||
setUpClass = classmethod(decorator(setUpClass))
|
||||
func_or_class.setUpClass = setUpClass
|
||||
return func_or_class
|
||||
|
||||
@functools.wraps(func_or_class)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
if openssl and _hashlib is not None:
|
||||
_hashlib.new(digestname, usedforsecurity=usedforsecurity)
|
||||
else:
|
||||
hashlib.new(digestname, usedforsecurity=usedforsecurity)
|
||||
except ValueError:
|
||||
raise unittest.SkipTest(
|
||||
f"hash digest '{digestname}' is not available."
|
||||
)
|
||||
return func_or_class(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
38
Dependencies/Python/Lib/test/support/hypothesis_helper.py
vendored
Normal file
38
Dependencies/Python/Lib/test/support/hypothesis_helper.py
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
import os
|
||||
|
||||
try:
|
||||
import hypothesis
|
||||
except ImportError:
|
||||
from . import _hypothesis_stubs as hypothesis
|
||||
else:
|
||||
# When using the real Hypothesis, we'll configure it to ignore occasional
|
||||
# slow tests (avoiding flakiness from random VM slowness in CI).
|
||||
hypothesis.settings.register_profile(
|
||||
"slow-is-ok",
|
||||
deadline=None,
|
||||
suppress_health_check=[
|
||||
hypothesis.HealthCheck.too_slow,
|
||||
hypothesis.HealthCheck.differing_executors,
|
||||
],
|
||||
)
|
||||
hypothesis.settings.load_profile("slow-is-ok")
|
||||
|
||||
# For local development, we'll write to the default on-local-disk database
|
||||
# of failing examples, and also use a pull-through cache to automatically
|
||||
# replay any failing examples discovered in CI. For details on how this
|
||||
# works, see https://hypothesis.readthedocs.io/en/latest/database.html
|
||||
if "CI" not in os.environ:
|
||||
from hypothesis.database import (
|
||||
GitHubArtifactDatabase,
|
||||
MultiplexedDatabase,
|
||||
ReadOnlyDatabase,
|
||||
)
|
||||
|
||||
hypothesis.settings.register_profile(
|
||||
"cpython-local-dev",
|
||||
database=MultiplexedDatabase(
|
||||
hypothesis.settings.default.database,
|
||||
ReadOnlyDatabase(GitHubArtifactDatabase("python", "cpython")),
|
||||
),
|
||||
)
|
||||
hypothesis.settings.load_profile("cpython-local-dev")
|
63
Dependencies/Python/Lib/test/support/i18n_helper.py
vendored
Normal file
63
Dependencies/Python/Lib/test/support/i18n_helper.py
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from test.support import REPO_ROOT, TEST_HOME_DIR, requires_subprocess
|
||||
from test.test_tools import skip_if_missing
|
||||
|
||||
|
||||
pygettext = Path(REPO_ROOT) / 'Tools' / 'i18n' / 'pygettext.py'
|
||||
|
||||
msgid_pattern = re.compile(r'msgid(.*?)(?:msgid_plural|msgctxt|msgstr)',
|
||||
re.DOTALL)
|
||||
msgid_string_pattern = re.compile(r'"((?:\\"|[^"])*)"')
|
||||
|
||||
|
||||
def _generate_po_file(path, *, stdout_only=True):
|
||||
res = subprocess.run([sys.executable, pygettext,
|
||||
'--no-location', '-o', '-', path],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
text=True)
|
||||
if stdout_only:
|
||||
return res.stdout
|
||||
return res
|
||||
|
||||
|
||||
def _extract_msgids(po):
|
||||
msgids = []
|
||||
for msgid in msgid_pattern.findall(po):
|
||||
msgid_string = ''.join(msgid_string_pattern.findall(msgid))
|
||||
msgid_string = msgid_string.replace(r'\"', '"')
|
||||
if msgid_string:
|
||||
msgids.append(msgid_string)
|
||||
return sorted(msgids)
|
||||
|
||||
|
||||
def _get_snapshot_path(module_name):
|
||||
return Path(TEST_HOME_DIR) / 'translationdata' / module_name / 'msgids.txt'
|
||||
|
||||
|
||||
@requires_subprocess()
|
||||
class TestTranslationsBase(unittest.TestCase):
|
||||
|
||||
def assertMsgidsEqual(self, module):
|
||||
'''Assert that msgids extracted from a given module match a
|
||||
snapshot.
|
||||
|
||||
'''
|
||||
skip_if_missing('i18n')
|
||||
res = _generate_po_file(module.__file__, stdout_only=False)
|
||||
self.assertEqual(res.returncode, 0)
|
||||
self.assertEqual(res.stderr, '')
|
||||
msgids = _extract_msgids(res.stdout)
|
||||
snapshot_path = _get_snapshot_path(module.__name__)
|
||||
snapshot = snapshot_path.read_text().splitlines()
|
||||
self.assertListEqual(msgids, snapshot)
|
||||
|
||||
|
||||
def update_translation_snapshots(module):
|
||||
contents = _generate_po_file(module.__file__)
|
||||
msgids = _extract_msgids(contents)
|
||||
snapshot_path = _get_snapshot_path(module.__name__)
|
||||
snapshot_path.write_text('\n'.join(msgids))
|
311
Dependencies/Python/Lib/test/support/import_helper.py
vendored
Normal file
311
Dependencies/Python/Lib/test/support/import_helper.py
vendored
Normal file
@ -0,0 +1,311 @@
|
||||
import contextlib
|
||||
import _imp
|
||||
import importlib
|
||||
import importlib.util
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import unittest
|
||||
import warnings
|
||||
|
||||
from .os_helper import unlink, temp_dir
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _ignore_deprecated_imports(ignore=True):
|
||||
"""Context manager to suppress package and module deprecation
|
||||
warnings when importing them.
|
||||
|
||||
If ignore is False, this context manager has no effect.
|
||||
"""
|
||||
if ignore:
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings("ignore", ".+ (module|package)",
|
||||
DeprecationWarning)
|
||||
yield
|
||||
else:
|
||||
yield
|
||||
|
||||
|
||||
def unload(name):
|
||||
try:
|
||||
del sys.modules[name]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
def forget(modname):
|
||||
"""'Forget' a module was ever imported.
|
||||
|
||||
This removes the module from sys.modules and deletes any PEP 3147/488 or
|
||||
legacy .pyc files.
|
||||
"""
|
||||
unload(modname)
|
||||
for dirname in sys.path:
|
||||
source = os.path.join(dirname, modname + '.py')
|
||||
# It doesn't matter if they exist or not, unlink all possible
|
||||
# combinations of PEP 3147/488 and legacy pyc files.
|
||||
unlink(source + 'c')
|
||||
for opt in ('', 1, 2):
|
||||
unlink(importlib.util.cache_from_source(source, optimization=opt))
|
||||
|
||||
|
||||
def make_legacy_pyc(source):
|
||||
"""Move a PEP 3147/488 pyc file to its legacy pyc location.
|
||||
|
||||
:param source: The file system path to the source file. The source file
|
||||
does not need to exist, however the PEP 3147/488 pyc file must exist.
|
||||
:return: The file system path to the legacy pyc file.
|
||||
"""
|
||||
pyc_file = importlib.util.cache_from_source(source)
|
||||
assert source.endswith('.py')
|
||||
legacy_pyc = source + 'c'
|
||||
shutil.move(pyc_file, legacy_pyc)
|
||||
return legacy_pyc
|
||||
|
||||
|
||||
def import_module(name, deprecated=False, *, required_on=()):
|
||||
"""Import and return the module to be tested, raising SkipTest if
|
||||
it is not available.
|
||||
|
||||
If deprecated is True, any module or package deprecation messages
|
||||
will be suppressed. If a module is required on a platform but optional for
|
||||
others, set required_on to an iterable of platform prefixes which will be
|
||||
compared against sys.platform.
|
||||
"""
|
||||
with _ignore_deprecated_imports(deprecated):
|
||||
try:
|
||||
return importlib.import_module(name)
|
||||
except ImportError as msg:
|
||||
if sys.platform.startswith(tuple(required_on)):
|
||||
raise
|
||||
raise unittest.SkipTest(str(msg))
|
||||
|
||||
|
||||
def _save_and_remove_modules(names):
|
||||
orig_modules = {}
|
||||
prefixes = tuple(name + '.' for name in names)
|
||||
for modname in list(sys.modules):
|
||||
if modname in names or modname.startswith(prefixes):
|
||||
orig_modules[modname] = sys.modules.pop(modname)
|
||||
return orig_modules
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def frozen_modules(enabled=True):
|
||||
"""Force frozen modules to be used (or not).
|
||||
|
||||
This only applies to modules that haven't been imported yet.
|
||||
Also, some essential modules will always be imported frozen.
|
||||
"""
|
||||
_imp._override_frozen_modules_for_tests(1 if enabled else -1)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_imp._override_frozen_modules_for_tests(0)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def multi_interp_extensions_check(enabled=True):
|
||||
"""Force legacy modules to be allowed in subinterpreters (or not).
|
||||
|
||||
("legacy" == single-phase init)
|
||||
|
||||
This only applies to modules that haven't been imported yet.
|
||||
It overrides the PyInterpreterConfig.check_multi_interp_extensions
|
||||
setting (see support.run_in_subinterp_with_config() and
|
||||
_interpreters.create()).
|
||||
|
||||
Also see importlib.utils.allowing_all_extensions().
|
||||
"""
|
||||
old = _imp._override_multi_interp_extensions_check(1 if enabled else -1)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_imp._override_multi_interp_extensions_check(old)
|
||||
|
||||
|
||||
def import_fresh_module(name, fresh=(), blocked=(), *,
|
||||
deprecated=False,
|
||||
usefrozen=False,
|
||||
):
|
||||
"""Import and return a module, deliberately bypassing sys.modules.
|
||||
|
||||
This function imports and returns a fresh copy of the named Python module
|
||||
by removing the named module from sys.modules before doing the import.
|
||||
Note that unlike reload, the original module is not affected by
|
||||
this operation.
|
||||
|
||||
*fresh* is an iterable of additional module names that are also removed
|
||||
from the sys.modules cache before doing the import. If one of these
|
||||
modules can't be imported, None is returned.
|
||||
|
||||
*blocked* is an iterable of module names that are replaced with None
|
||||
in the module cache during the import to ensure that attempts to import
|
||||
them raise ImportError.
|
||||
|
||||
The named module and any modules named in the *fresh* and *blocked*
|
||||
parameters are saved before starting the import and then reinserted into
|
||||
sys.modules when the fresh import is complete.
|
||||
|
||||
Module and package deprecation messages are suppressed during this import
|
||||
if *deprecated* is True.
|
||||
|
||||
This function will raise ImportError if the named module cannot be
|
||||
imported.
|
||||
|
||||
If "usefrozen" is False (the default) then the frozen importer is
|
||||
disabled (except for essential modules like importlib._bootstrap).
|
||||
"""
|
||||
# NOTE: test_heapq, test_json and test_warnings include extra sanity checks
|
||||
# to make sure that this utility function is working as expected
|
||||
with _ignore_deprecated_imports(deprecated):
|
||||
# Keep track of modules saved for later restoration as well
|
||||
# as those which just need a blocking entry removed
|
||||
fresh = list(fresh)
|
||||
blocked = list(blocked)
|
||||
names = {name, *fresh, *blocked}
|
||||
orig_modules = _save_and_remove_modules(names)
|
||||
for modname in blocked:
|
||||
sys.modules[modname] = None
|
||||
|
||||
try:
|
||||
with frozen_modules(usefrozen):
|
||||
# Return None when one of the "fresh" modules can not be imported.
|
||||
try:
|
||||
for modname in fresh:
|
||||
__import__(modname)
|
||||
except ImportError:
|
||||
return None
|
||||
return importlib.import_module(name)
|
||||
finally:
|
||||
_save_and_remove_modules(names)
|
||||
sys.modules.update(orig_modules)
|
||||
|
||||
|
||||
class CleanImport(object):
|
||||
"""Context manager to force import to return a new module reference.
|
||||
|
||||
This is useful for testing module-level behaviours, such as
|
||||
the emission of a DeprecationWarning on import.
|
||||
|
||||
Use like this:
|
||||
|
||||
with CleanImport("foo"):
|
||||
importlib.import_module("foo") # new reference
|
||||
|
||||
If "usefrozen" is False (the default) then the frozen importer is
|
||||
disabled (except for essential modules like importlib._bootstrap).
|
||||
"""
|
||||
|
||||
def __init__(self, *module_names, usefrozen=False):
|
||||
self.original_modules = sys.modules.copy()
|
||||
for module_name in module_names:
|
||||
if module_name in sys.modules:
|
||||
module = sys.modules[module_name]
|
||||
# It is possible that module_name is just an alias for
|
||||
# another module (e.g. stub for modules renamed in 3.x).
|
||||
# In that case, we also need delete the real module to clear
|
||||
# the import cache.
|
||||
if module.__name__ != module_name:
|
||||
del sys.modules[module.__name__]
|
||||
del sys.modules[module_name]
|
||||
self._frozen_modules = frozen_modules(usefrozen)
|
||||
|
||||
def __enter__(self):
|
||||
self._frozen_modules.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *ignore_exc):
|
||||
sys.modules.update(self.original_modules)
|
||||
self._frozen_modules.__exit__(*ignore_exc)
|
||||
|
||||
|
||||
class DirsOnSysPath(object):
|
||||
"""Context manager to temporarily add directories to sys.path.
|
||||
|
||||
This makes a copy of sys.path, appends any directories given
|
||||
as positional arguments, then reverts sys.path to the copied
|
||||
settings when the context ends.
|
||||
|
||||
Note that *all* sys.path modifications in the body of the
|
||||
context manager, including replacement of the object,
|
||||
will be reverted at the end of the block.
|
||||
"""
|
||||
|
||||
def __init__(self, *paths):
|
||||
self.original_value = sys.path[:]
|
||||
self.original_object = sys.path
|
||||
sys.path.extend(paths)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *ignore_exc):
|
||||
sys.path = self.original_object
|
||||
sys.path[:] = self.original_value
|
||||
|
||||
|
||||
def modules_setup():
|
||||
return sys.modules.copy(),
|
||||
|
||||
|
||||
def modules_cleanup(oldmodules):
|
||||
# Encoders/decoders are registered permanently within the internal
|
||||
# codec cache. If we destroy the corresponding modules their
|
||||
# globals will be set to None which will trip up the cached functions.
|
||||
encodings = [(k, v) for k, v in sys.modules.items()
|
||||
if k.startswith('encodings.')]
|
||||
sys.modules.clear()
|
||||
sys.modules.update(encodings)
|
||||
# XXX: This kind of problem can affect more than just encodings.
|
||||
# In particular extension modules (such as _ssl) don't cope
|
||||
# with reloading properly. Really, test modules should be cleaning
|
||||
# out the test specific modules they know they added (ala test_runpy)
|
||||
# rather than relying on this function (as test_importhooks and test_pkg
|
||||
# do currently). Implicitly imported *real* modules should be left alone
|
||||
# (see issue 10556).
|
||||
sys.modules.update(oldmodules)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def isolated_modules():
|
||||
"""
|
||||
Save modules on entry and cleanup on exit.
|
||||
"""
|
||||
(saved,) = modules_setup()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
modules_cleanup(saved)
|
||||
|
||||
|
||||
def mock_register_at_fork(func):
|
||||
# bpo-30599: Mock os.register_at_fork() when importing the random module,
|
||||
# since this function doesn't allow to unregister callbacks and would leak
|
||||
# memory.
|
||||
from unittest import mock
|
||||
return mock.patch('os.register_at_fork', create=True)(func)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def ready_to_import(name=None, source=""):
|
||||
from test.support import script_helper
|
||||
|
||||
# 1. Sets up a temporary directory and removes it afterwards
|
||||
# 2. Creates the module file
|
||||
# 3. Temporarily clears the module from sys.modules (if any)
|
||||
# 4. Reverts or removes the module when cleaning up
|
||||
name = name or "spam"
|
||||
with temp_dir() as tempdir:
|
||||
path = script_helper.make_script(tempdir, name, source)
|
||||
old_module = sys.modules.pop(name, None)
|
||||
try:
|
||||
sys.path.insert(0, tempdir)
|
||||
yield name, path
|
||||
sys.path.remove(tempdir)
|
||||
finally:
|
||||
if old_module is not None:
|
||||
sys.modules[name] = old_module
|
||||
else:
|
||||
sys.modules.pop(name, None)
|
258
Dependencies/Python/Lib/test/support/interpreters/__init__.py
vendored
Normal file
258
Dependencies/Python/Lib/test/support/interpreters/__init__.py
vendored
Normal file
@ -0,0 +1,258 @@
|
||||
"""Subinterpreters High Level Module."""
|
||||
|
||||
import threading
|
||||
import weakref
|
||||
import _interpreters
|
||||
|
||||
# aliases:
|
||||
from _interpreters import (
|
||||
InterpreterError, InterpreterNotFoundError, NotShareableError,
|
||||
is_shareable,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
'get_current', 'get_main', 'create', 'list_all', 'is_shareable',
|
||||
'Interpreter',
|
||||
'InterpreterError', 'InterpreterNotFoundError', 'ExecutionFailed',
|
||||
'NotShareableError',
|
||||
'create_queue', 'Queue', 'QueueEmpty', 'QueueFull',
|
||||
]
|
||||
|
||||
|
||||
_queuemod = None
|
||||
|
||||
def __getattr__(name):
|
||||
if name in ('Queue', 'QueueEmpty', 'QueueFull', 'create_queue'):
|
||||
global create_queue, Queue, QueueEmpty, QueueFull
|
||||
ns = globals()
|
||||
from .queues import (
|
||||
create as create_queue,
|
||||
Queue, QueueEmpty, QueueFull,
|
||||
)
|
||||
return ns[name]
|
||||
else:
|
||||
raise AttributeError(name)
|
||||
|
||||
|
||||
_EXEC_FAILURE_STR = """
|
||||
{superstr}
|
||||
|
||||
Uncaught in the interpreter:
|
||||
|
||||
{formatted}
|
||||
""".strip()
|
||||
|
||||
class ExecutionFailed(InterpreterError):
|
||||
"""An unhandled exception happened during execution.
|
||||
|
||||
This is raised from Interpreter.exec() and Interpreter.call().
|
||||
"""
|
||||
|
||||
def __init__(self, excinfo):
|
||||
msg = excinfo.formatted
|
||||
if not msg:
|
||||
if excinfo.type and excinfo.msg:
|
||||
msg = f'{excinfo.type.__name__}: {excinfo.msg}'
|
||||
else:
|
||||
msg = excinfo.type.__name__ or excinfo.msg
|
||||
super().__init__(msg)
|
||||
self.excinfo = excinfo
|
||||
|
||||
def __str__(self):
|
||||
try:
|
||||
formatted = self.excinfo.errdisplay
|
||||
except Exception:
|
||||
return super().__str__()
|
||||
else:
|
||||
return _EXEC_FAILURE_STR.format(
|
||||
superstr=super().__str__(),
|
||||
formatted=formatted,
|
||||
)
|
||||
|
||||
|
||||
def create():
|
||||
"""Return a new (idle) Python interpreter."""
|
||||
id = _interpreters.create(reqrefs=True)
|
||||
return Interpreter(id, _ownsref=True)
|
||||
|
||||
|
||||
def list_all():
|
||||
"""Return all existing interpreters."""
|
||||
return [Interpreter(id, _whence=whence)
|
||||
for id, whence in _interpreters.list_all(require_ready=True)]
|
||||
|
||||
|
||||
def get_current():
|
||||
"""Return the currently running interpreter."""
|
||||
id, whence = _interpreters.get_current()
|
||||
return Interpreter(id, _whence=whence)
|
||||
|
||||
|
||||
def get_main():
|
||||
"""Return the main interpreter."""
|
||||
id, whence = _interpreters.get_main()
|
||||
assert whence == _interpreters.WHENCE_RUNTIME, repr(whence)
|
||||
return Interpreter(id, _whence=whence)
|
||||
|
||||
|
||||
_known = weakref.WeakValueDictionary()
|
||||
|
||||
class Interpreter:
|
||||
"""A single Python interpreter.
|
||||
|
||||
Attributes:
|
||||
|
||||
"id" - the unique process-global ID number for the interpreter
|
||||
"whence" - indicates where the interpreter was created
|
||||
|
||||
If the interpreter wasn't created by this module
|
||||
then any method that modifies the interpreter will fail,
|
||||
i.e. .close(), .prepare_main(), .exec(), and .call()
|
||||
"""
|
||||
|
||||
_WHENCE_TO_STR = {
|
||||
_interpreters.WHENCE_UNKNOWN: 'unknown',
|
||||
_interpreters.WHENCE_RUNTIME: 'runtime init',
|
||||
_interpreters.WHENCE_LEGACY_CAPI: 'legacy C-API',
|
||||
_interpreters.WHENCE_CAPI: 'C-API',
|
||||
_interpreters.WHENCE_XI: 'cross-interpreter C-API',
|
||||
_interpreters.WHENCE_STDLIB: '_interpreters module',
|
||||
}
|
||||
|
||||
def __new__(cls, id, /, _whence=None, _ownsref=None):
|
||||
# There is only one instance for any given ID.
|
||||
if not isinstance(id, int):
|
||||
raise TypeError(f'id must be an int, got {id!r}')
|
||||
id = int(id)
|
||||
if _whence is None:
|
||||
if _ownsref:
|
||||
_whence = _interpreters.WHENCE_STDLIB
|
||||
else:
|
||||
_whence = _interpreters.whence(id)
|
||||
assert _whence in cls._WHENCE_TO_STR, repr(_whence)
|
||||
if _ownsref is None:
|
||||
_ownsref = (_whence == _interpreters.WHENCE_STDLIB)
|
||||
try:
|
||||
self = _known[id]
|
||||
assert hasattr(self, '_ownsref')
|
||||
except KeyError:
|
||||
self = super().__new__(cls)
|
||||
_known[id] = self
|
||||
self._id = id
|
||||
self._whence = _whence
|
||||
self._ownsref = _ownsref
|
||||
if _ownsref:
|
||||
# This may raise InterpreterNotFoundError:
|
||||
_interpreters.incref(id)
|
||||
return self
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self).__name__}({self.id})'
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._id)
|
||||
|
||||
def __del__(self):
|
||||
self._decref()
|
||||
|
||||
# for pickling:
|
||||
def __getnewargs__(self):
|
||||
return (self._id,)
|
||||
|
||||
# for pickling:
|
||||
def __getstate__(self):
|
||||
return None
|
||||
|
||||
def _decref(self):
|
||||
if not self._ownsref:
|
||||
return
|
||||
self._ownsref = False
|
||||
try:
|
||||
_interpreters.decref(self._id)
|
||||
except InterpreterNotFoundError:
|
||||
pass
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self._id
|
||||
|
||||
@property
|
||||
def whence(self):
|
||||
return self._WHENCE_TO_STR[self._whence]
|
||||
|
||||
def is_running(self):
|
||||
"""Return whether or not the identified interpreter is running."""
|
||||
return _interpreters.is_running(self._id)
|
||||
|
||||
# Everything past here is available only to interpreters created by
|
||||
# interpreters.create().
|
||||
|
||||
def close(self):
|
||||
"""Finalize and destroy the interpreter.
|
||||
|
||||
Attempting to destroy the current interpreter results
|
||||
in an InterpreterError.
|
||||
"""
|
||||
return _interpreters.destroy(self._id, restrict=True)
|
||||
|
||||
def prepare_main(self, ns=None, /, **kwargs):
|
||||
"""Bind the given values into the interpreter's __main__.
|
||||
|
||||
The values must be shareable.
|
||||
"""
|
||||
ns = dict(ns, **kwargs) if ns is not None else kwargs
|
||||
_interpreters.set___main___attrs(self._id, ns, restrict=True)
|
||||
|
||||
def exec(self, code, /):
|
||||
"""Run the given source code in the interpreter.
|
||||
|
||||
This is essentially the same as calling the builtin "exec"
|
||||
with this interpreter, using the __dict__ of its __main__
|
||||
module as both globals and locals.
|
||||
|
||||
There is no return value.
|
||||
|
||||
If the code raises an unhandled exception then an ExecutionFailed
|
||||
exception is raised, which summarizes the unhandled exception.
|
||||
The actual exception is discarded because objects cannot be
|
||||
shared between interpreters.
|
||||
|
||||
This blocks the current Python thread until done. During
|
||||
that time, the previous interpreter is allowed to run
|
||||
in other threads.
|
||||
"""
|
||||
excinfo = _interpreters.exec(self._id, code, restrict=True)
|
||||
if excinfo is not None:
|
||||
raise ExecutionFailed(excinfo)
|
||||
|
||||
def call(self, callable, /):
|
||||
"""Call the object in the interpreter with given args/kwargs.
|
||||
|
||||
Only functions that take no arguments and have no closure
|
||||
are supported.
|
||||
|
||||
The return value is discarded.
|
||||
|
||||
If the callable raises an exception then the error display
|
||||
(including full traceback) is send back between the interpreters
|
||||
and an ExecutionFailed exception is raised, much like what
|
||||
happens with Interpreter.exec().
|
||||
"""
|
||||
# XXX Support args and kwargs.
|
||||
# XXX Support arbitrary callables.
|
||||
# XXX Support returning the return value (e.g. via pickle).
|
||||
excinfo = _interpreters.call(self._id, callable, restrict=True)
|
||||
if excinfo is not None:
|
||||
raise ExecutionFailed(excinfo)
|
||||
|
||||
def call_in_thread(self, callable, /):
|
||||
"""Return a new thread that calls the object in the interpreter.
|
||||
|
||||
The return value and any raised exception are discarded.
|
||||
"""
|
||||
def task():
|
||||
self.call(callable)
|
||||
t = threading.Thread(target=task)
|
||||
t.start()
|
||||
return t
|
102
Dependencies/Python/Lib/test/support/interpreters/_crossinterp.py
vendored
Normal file
102
Dependencies/Python/Lib/test/support/interpreters/_crossinterp.py
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
"""Common code between queues and channels."""
|
||||
|
||||
|
||||
class ItemInterpreterDestroyed(Exception):
|
||||
"""Raised when trying to get an item whose interpreter was destroyed."""
|
||||
|
||||
|
||||
class classonly:
|
||||
"""A non-data descriptor that makes a value only visible on the class.
|
||||
|
||||
This is like the "classmethod" builtin, but does not show up on
|
||||
instances of the class. It may be used as a decorator.
|
||||
"""
|
||||
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
self.getter = classmethod(value).__get__
|
||||
self.name = None
|
||||
|
||||
def __set_name__(self, cls, name):
|
||||
if self.name is not None:
|
||||
raise TypeError('already used')
|
||||
self.name = name
|
||||
|
||||
def __get__(self, obj, cls):
|
||||
if obj is not None:
|
||||
raise AttributeError(self.name)
|
||||
# called on the class
|
||||
return self.getter(None, cls)
|
||||
|
||||
|
||||
class UnboundItem:
|
||||
"""Represents a cross-interpreter item no longer bound to an interpreter.
|
||||
|
||||
An item is unbound when the interpreter that added it to the
|
||||
cross-interpreter container is destroyed.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
@classonly
|
||||
def singleton(cls, kind, module, name='UNBOUND'):
|
||||
doc = cls.__doc__.replace('cross-interpreter container', kind)
|
||||
doc = doc.replace('cross-interpreter', kind)
|
||||
subclass = type(
|
||||
f'Unbound{kind.capitalize()}Item',
|
||||
(cls,),
|
||||
dict(
|
||||
_MODULE=module,
|
||||
_NAME=name,
|
||||
__doc__=doc,
|
||||
),
|
||||
)
|
||||
return object.__new__(subclass)
|
||||
|
||||
_MODULE = __name__
|
||||
_NAME = 'UNBOUND'
|
||||
|
||||
def __new__(cls):
|
||||
raise Exception(f'use {cls._MODULE}.{cls._NAME}')
|
||||
|
||||
def __repr__(self):
|
||||
return f'{self._MODULE}.{self._NAME}'
|
||||
# return f'interpreters.queues.UNBOUND'
|
||||
|
||||
|
||||
UNBOUND = object.__new__(UnboundItem)
|
||||
UNBOUND_ERROR = object()
|
||||
UNBOUND_REMOVE = object()
|
||||
|
||||
_UNBOUND_CONSTANT_TO_FLAG = {
|
||||
UNBOUND_REMOVE: 1,
|
||||
UNBOUND_ERROR: 2,
|
||||
UNBOUND: 3,
|
||||
}
|
||||
_UNBOUND_FLAG_TO_CONSTANT = {v: k
|
||||
for k, v in _UNBOUND_CONSTANT_TO_FLAG.items()}
|
||||
|
||||
|
||||
def serialize_unbound(unbound):
|
||||
op = unbound
|
||||
try:
|
||||
flag = _UNBOUND_CONSTANT_TO_FLAG[op]
|
||||
except KeyError:
|
||||
raise NotImplementedError(f'unsupported unbound replacement op {op!r}')
|
||||
return flag,
|
||||
|
||||
|
||||
def resolve_unbound(flag, exctype_destroyed):
|
||||
try:
|
||||
op = _UNBOUND_FLAG_TO_CONSTANT[flag]
|
||||
except KeyError:
|
||||
raise NotImplementedError(f'unsupported unbound replacement op {flag!r}')
|
||||
if op is UNBOUND_REMOVE:
|
||||
# "remove" not possible here
|
||||
raise NotImplementedError
|
||||
elif op is UNBOUND_ERROR:
|
||||
raise exctype_destroyed("item's original interpreter destroyed")
|
||||
elif op is UNBOUND:
|
||||
return UNBOUND
|
||||
else:
|
||||
raise NotImplementedError(repr(op))
|
257
Dependencies/Python/Lib/test/support/interpreters/channels.py
vendored
Normal file
257
Dependencies/Python/Lib/test/support/interpreters/channels.py
vendored
Normal file
@ -0,0 +1,257 @@
|
||||
"""Cross-interpreter Channels High Level Module."""
|
||||
|
||||
import time
|
||||
import _interpchannels as _channels
|
||||
from . import _crossinterp
|
||||
|
||||
# aliases:
|
||||
from _interpchannels import (
|
||||
ChannelError, ChannelNotFoundError, ChannelClosedError,
|
||||
ChannelEmptyError, ChannelNotEmptyError,
|
||||
)
|
||||
from ._crossinterp import (
|
||||
UNBOUND_ERROR, UNBOUND_REMOVE,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
'UNBOUND', 'UNBOUND_ERROR', 'UNBOUND_REMOVE',
|
||||
'create', 'list_all',
|
||||
'SendChannel', 'RecvChannel',
|
||||
'ChannelError', 'ChannelNotFoundError', 'ChannelEmptyError',
|
||||
'ItemInterpreterDestroyed',
|
||||
]
|
||||
|
||||
|
||||
class ItemInterpreterDestroyed(ChannelError,
|
||||
_crossinterp.ItemInterpreterDestroyed):
|
||||
"""Raised from get() and get_nowait()."""
|
||||
|
||||
|
||||
UNBOUND = _crossinterp.UnboundItem.singleton('queue', __name__)
|
||||
|
||||
|
||||
def _serialize_unbound(unbound):
|
||||
if unbound is UNBOUND:
|
||||
unbound = _crossinterp.UNBOUND
|
||||
return _crossinterp.serialize_unbound(unbound)
|
||||
|
||||
|
||||
def _resolve_unbound(flag):
|
||||
resolved = _crossinterp.resolve_unbound(flag, ItemInterpreterDestroyed)
|
||||
if resolved is _crossinterp.UNBOUND:
|
||||
resolved = UNBOUND
|
||||
return resolved
|
||||
|
||||
|
||||
def create(*, unbounditems=UNBOUND):
|
||||
"""Return (recv, send) for a new cross-interpreter channel.
|
||||
|
||||
The channel may be used to pass data safely between interpreters.
|
||||
|
||||
"unbounditems" sets the default for the send end of the channel.
|
||||
See SendChannel.send() for supported values. The default value
|
||||
is UNBOUND, which replaces the unbound item when received.
|
||||
"""
|
||||
unbound = _serialize_unbound(unbounditems)
|
||||
unboundop, = unbound
|
||||
cid = _channels.create(unboundop)
|
||||
recv, send = RecvChannel(cid), SendChannel(cid, _unbound=unbound)
|
||||
return recv, send
|
||||
|
||||
|
||||
def list_all():
|
||||
"""Return a list of (recv, send) for all open channels."""
|
||||
return [(RecvChannel(cid), SendChannel(cid, _unbound=unbound))
|
||||
for cid, unbound in _channels.list_all()]
|
||||
|
||||
|
||||
class _ChannelEnd:
|
||||
"""The base class for RecvChannel and SendChannel."""
|
||||
|
||||
_end = None
|
||||
|
||||
def __new__(cls, cid):
|
||||
self = super().__new__(cls)
|
||||
if self._end == 'send':
|
||||
cid = _channels._channel_id(cid, send=True, force=True)
|
||||
elif self._end == 'recv':
|
||||
cid = _channels._channel_id(cid, recv=True, force=True)
|
||||
else:
|
||||
raise NotImplementedError(self._end)
|
||||
self._id = cid
|
||||
return self
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self).__name__}(id={int(self._id)})'
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._id)
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(self, RecvChannel):
|
||||
if not isinstance(other, RecvChannel):
|
||||
return NotImplemented
|
||||
elif not isinstance(other, SendChannel):
|
||||
return NotImplemented
|
||||
return other._id == self._id
|
||||
|
||||
# for pickling:
|
||||
def __getnewargs__(self):
|
||||
return (int(self._id),)
|
||||
|
||||
# for pickling:
|
||||
def __getstate__(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self._id
|
||||
|
||||
@property
|
||||
def _info(self):
|
||||
return _channels.get_info(self._id)
|
||||
|
||||
@property
|
||||
def is_closed(self):
|
||||
return self._info.closed
|
||||
|
||||
|
||||
_NOT_SET = object()
|
||||
|
||||
|
||||
class RecvChannel(_ChannelEnd):
|
||||
"""The receiving end of a cross-interpreter channel."""
|
||||
|
||||
_end = 'recv'
|
||||
|
||||
def recv(self, timeout=None, *,
|
||||
_sentinel=object(),
|
||||
_delay=10 / 1000, # 10 milliseconds
|
||||
):
|
||||
"""Return the next object from the channel.
|
||||
|
||||
This blocks until an object has been sent, if none have been
|
||||
sent already.
|
||||
"""
|
||||
if timeout is not None:
|
||||
timeout = int(timeout)
|
||||
if timeout < 0:
|
||||
raise ValueError(f'timeout value must be non-negative')
|
||||
end = time.time() + timeout
|
||||
obj, unboundop = _channels.recv(self._id, _sentinel)
|
||||
while obj is _sentinel:
|
||||
time.sleep(_delay)
|
||||
if timeout is not None and time.time() >= end:
|
||||
raise TimeoutError
|
||||
obj, unboundop = _channels.recv(self._id, _sentinel)
|
||||
if unboundop is not None:
|
||||
assert obj is None, repr(obj)
|
||||
return _resolve_unbound(unboundop)
|
||||
return obj
|
||||
|
||||
def recv_nowait(self, default=_NOT_SET):
|
||||
"""Return the next object from the channel.
|
||||
|
||||
If none have been sent then return the default if one
|
||||
is provided or fail with ChannelEmptyError. Otherwise this
|
||||
is the same as recv().
|
||||
"""
|
||||
if default is _NOT_SET:
|
||||
obj, unboundop = _channels.recv(self._id)
|
||||
else:
|
||||
obj, unboundop = _channels.recv(self._id, default)
|
||||
if unboundop is not None:
|
||||
assert obj is None, repr(obj)
|
||||
return _resolve_unbound(unboundop)
|
||||
return obj
|
||||
|
||||
def close(self):
|
||||
_channels.close(self._id, recv=True)
|
||||
|
||||
|
||||
class SendChannel(_ChannelEnd):
|
||||
"""The sending end of a cross-interpreter channel."""
|
||||
|
||||
_end = 'send'
|
||||
|
||||
def __new__(cls, cid, *, _unbound=None):
|
||||
if _unbound is None:
|
||||
try:
|
||||
op = _channels.get_channel_defaults(cid)
|
||||
_unbound = (op,)
|
||||
except ChannelNotFoundError:
|
||||
_unbound = _serialize_unbound(UNBOUND)
|
||||
self = super().__new__(cls, cid)
|
||||
self._unbound = _unbound
|
||||
return self
|
||||
|
||||
@property
|
||||
def is_closed(self):
|
||||
info = self._info
|
||||
return info.closed or info.closing
|
||||
|
||||
def send(self, obj, timeout=None, *,
|
||||
unbound=None,
|
||||
):
|
||||
"""Send the object (i.e. its data) to the channel's receiving end.
|
||||
|
||||
This blocks until the object is received.
|
||||
"""
|
||||
if unbound is None:
|
||||
unboundop, = self._unbound
|
||||
else:
|
||||
unboundop, = _serialize_unbound(unbound)
|
||||
_channels.send(self._id, obj, unboundop, timeout=timeout, blocking=True)
|
||||
|
||||
def send_nowait(self, obj, *,
|
||||
unbound=None,
|
||||
):
|
||||
"""Send the object to the channel's receiving end.
|
||||
|
||||
If the object is immediately received then return True
|
||||
(else False). Otherwise this is the same as send().
|
||||
"""
|
||||
if unbound is None:
|
||||
unboundop, = self._unbound
|
||||
else:
|
||||
unboundop, = _serialize_unbound(unbound)
|
||||
# XXX Note that at the moment channel_send() only ever returns
|
||||
# None. This should be fixed when channel_send_wait() is added.
|
||||
# See bpo-32604 and gh-19829.
|
||||
return _channels.send(self._id, obj, unboundop, blocking=False)
|
||||
|
||||
def send_buffer(self, obj, timeout=None, *,
|
||||
unbound=None,
|
||||
):
|
||||
"""Send the object's buffer to the channel's receiving end.
|
||||
|
||||
This blocks until the object is received.
|
||||
"""
|
||||
if unbound is None:
|
||||
unboundop, = self._unbound
|
||||
else:
|
||||
unboundop, = _serialize_unbound(unbound)
|
||||
_channels.send_buffer(self._id, obj, unboundop,
|
||||
timeout=timeout, blocking=True)
|
||||
|
||||
def send_buffer_nowait(self, obj, *,
|
||||
unbound=None,
|
||||
):
|
||||
"""Send the object's buffer to the channel's receiving end.
|
||||
|
||||
If the object is immediately received then return True
|
||||
(else False). Otherwise this is the same as send().
|
||||
"""
|
||||
if unbound is None:
|
||||
unboundop, = self._unbound
|
||||
else:
|
||||
unboundop, = _serialize_unbound(unbound)
|
||||
return _channels.send_buffer(self._id, obj, unboundop, blocking=False)
|
||||
|
||||
def close(self):
|
||||
_channels.close(self._id, send=True)
|
||||
|
||||
|
||||
# XXX This is causing leaks (gh-110318):
|
||||
_channels._register_end_types(SendChannel, RecvChannel)
|
313
Dependencies/Python/Lib/test/support/interpreters/queues.py
vendored
Normal file
313
Dependencies/Python/Lib/test/support/interpreters/queues.py
vendored
Normal file
@ -0,0 +1,313 @@
|
||||
"""Cross-interpreter Queues High Level Module."""
|
||||
|
||||
import pickle
|
||||
import queue
|
||||
import time
|
||||
import weakref
|
||||
import _interpqueues as _queues
|
||||
from . import _crossinterp
|
||||
|
||||
# aliases:
|
||||
from _interpqueues import (
|
||||
QueueError, QueueNotFoundError,
|
||||
)
|
||||
from ._crossinterp import (
|
||||
UNBOUND_ERROR, UNBOUND_REMOVE,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'UNBOUND', 'UNBOUND_ERROR', 'UNBOUND_REMOVE',
|
||||
'create', 'list_all',
|
||||
'Queue',
|
||||
'QueueError', 'QueueNotFoundError', 'QueueEmpty', 'QueueFull',
|
||||
'ItemInterpreterDestroyed',
|
||||
]
|
||||
|
||||
|
||||
class QueueEmpty(QueueError, queue.Empty):
|
||||
"""Raised from get_nowait() when the queue is empty.
|
||||
|
||||
It is also raised from get() if it times out.
|
||||
"""
|
||||
|
||||
|
||||
class QueueFull(QueueError, queue.Full):
|
||||
"""Raised from put_nowait() when the queue is full.
|
||||
|
||||
It is also raised from put() if it times out.
|
||||
"""
|
||||
|
||||
|
||||
class ItemInterpreterDestroyed(QueueError,
|
||||
_crossinterp.ItemInterpreterDestroyed):
|
||||
"""Raised from get() and get_nowait()."""
|
||||
|
||||
|
||||
_SHARED_ONLY = 0
|
||||
_PICKLED = 1
|
||||
|
||||
|
||||
UNBOUND = _crossinterp.UnboundItem.singleton('queue', __name__)
|
||||
|
||||
|
||||
def _serialize_unbound(unbound):
|
||||
if unbound is UNBOUND:
|
||||
unbound = _crossinterp.UNBOUND
|
||||
return _crossinterp.serialize_unbound(unbound)
|
||||
|
||||
|
||||
def _resolve_unbound(flag):
|
||||
resolved = _crossinterp.resolve_unbound(flag, ItemInterpreterDestroyed)
|
||||
if resolved is _crossinterp.UNBOUND:
|
||||
resolved = UNBOUND
|
||||
return resolved
|
||||
|
||||
|
||||
def create(maxsize=0, *, syncobj=False, unbounditems=UNBOUND):
|
||||
"""Return a new cross-interpreter queue.
|
||||
|
||||
The queue may be used to pass data safely between interpreters.
|
||||
|
||||
"syncobj" sets the default for Queue.put()
|
||||
and Queue.put_nowait().
|
||||
|
||||
"unbounditems" likewise sets the default. See Queue.put() for
|
||||
supported values. The default value is UNBOUND, which replaces
|
||||
the unbound item.
|
||||
"""
|
||||
fmt = _SHARED_ONLY if syncobj else _PICKLED
|
||||
unbound = _serialize_unbound(unbounditems)
|
||||
unboundop, = unbound
|
||||
qid = _queues.create(maxsize, fmt, unboundop)
|
||||
return Queue(qid, _fmt=fmt, _unbound=unbound)
|
||||
|
||||
|
||||
def list_all():
|
||||
"""Return a list of all open queues."""
|
||||
return [Queue(qid, _fmt=fmt, _unbound=(unboundop,))
|
||||
for qid, fmt, unboundop in _queues.list_all()]
|
||||
|
||||
|
||||
_known_queues = weakref.WeakValueDictionary()
|
||||
|
||||
class Queue:
|
||||
"""A cross-interpreter queue."""
|
||||
|
||||
def __new__(cls, id, /, *, _fmt=None, _unbound=None):
|
||||
# There is only one instance for any given ID.
|
||||
if isinstance(id, int):
|
||||
id = int(id)
|
||||
else:
|
||||
raise TypeError(f'id must be an int, got {id!r}')
|
||||
if _fmt is None:
|
||||
if _unbound is None:
|
||||
_fmt, op = _queues.get_queue_defaults(id)
|
||||
_unbound = (op,)
|
||||
else:
|
||||
_fmt, _ = _queues.get_queue_defaults(id)
|
||||
elif _unbound is None:
|
||||
_, op = _queues.get_queue_defaults(id)
|
||||
_unbound = (op,)
|
||||
try:
|
||||
self = _known_queues[id]
|
||||
except KeyError:
|
||||
self = super().__new__(cls)
|
||||
self._id = id
|
||||
self._fmt = _fmt
|
||||
self._unbound = _unbound
|
||||
_known_queues[id] = self
|
||||
_queues.bind(id)
|
||||
return self
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
_queues.release(self._id)
|
||||
except QueueNotFoundError:
|
||||
pass
|
||||
try:
|
||||
del _known_queues[self._id]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self).__name__}({self.id})'
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._id)
|
||||
|
||||
# for pickling:
|
||||
def __getnewargs__(self):
|
||||
return (self._id,)
|
||||
|
||||
# for pickling:
|
||||
def __getstate__(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self._id
|
||||
|
||||
@property
|
||||
def maxsize(self):
|
||||
try:
|
||||
return self._maxsize
|
||||
except AttributeError:
|
||||
self._maxsize = _queues.get_maxsize(self._id)
|
||||
return self._maxsize
|
||||
|
||||
def empty(self):
|
||||
return self.qsize() == 0
|
||||
|
||||
def full(self):
|
||||
return _queues.is_full(self._id)
|
||||
|
||||
def qsize(self):
|
||||
return _queues.get_count(self._id)
|
||||
|
||||
def put(self, obj, timeout=None, *,
|
||||
syncobj=None,
|
||||
unbound=None,
|
||||
_delay=10 / 1000, # 10 milliseconds
|
||||
):
|
||||
"""Add the object to the queue.
|
||||
|
||||
This blocks while the queue is full.
|
||||
|
||||
If "syncobj" is None (the default) then it uses the
|
||||
queue's default, set with create_queue().
|
||||
|
||||
If "syncobj" is false then all objects are supported,
|
||||
at the expense of worse performance.
|
||||
|
||||
If "syncobj" is true then the object must be "shareable".
|
||||
Examples of "shareable" objects include the builtin singletons,
|
||||
str, and memoryview. One benefit is that such objects are
|
||||
passed through the queue efficiently.
|
||||
|
||||
The key difference, though, is conceptual: the corresponding
|
||||
object returned from Queue.get() will be strictly equivalent
|
||||
to the given obj. In other words, the two objects will be
|
||||
effectively indistinguishable from each other, even if the
|
||||
object is mutable. The received object may actually be the
|
||||
same object, or a copy (immutable values only), or a proxy.
|
||||
Regardless, the received object should be treated as though
|
||||
the original has been shared directly, whether or not it
|
||||
actually is. That's a slightly different and stronger promise
|
||||
than just (initial) equality, which is all "syncobj=False"
|
||||
can promise.
|
||||
|
||||
"unbound" controls the behavior of Queue.get() for the given
|
||||
object if the current interpreter (calling put()) is later
|
||||
destroyed.
|
||||
|
||||
If "unbound" is None (the default) then it uses the
|
||||
queue's default, set with create_queue(),
|
||||
which is usually UNBOUND.
|
||||
|
||||
If "unbound" is UNBOUND_ERROR then get() will raise an
|
||||
ItemInterpreterDestroyed exception if the original interpreter
|
||||
has been destroyed. This does not otherwise affect the queue;
|
||||
the next call to put() will work like normal, returning the next
|
||||
item in the queue.
|
||||
|
||||
If "unbound" is UNBOUND_REMOVE then the item will be removed
|
||||
from the queue as soon as the original interpreter is destroyed.
|
||||
Be aware that this will introduce an imbalance between put()
|
||||
and get() calls.
|
||||
|
||||
If "unbound" is UNBOUND then it is returned by get() in place
|
||||
of the unbound item.
|
||||
"""
|
||||
if syncobj is None:
|
||||
fmt = self._fmt
|
||||
else:
|
||||
fmt = _SHARED_ONLY if syncobj else _PICKLED
|
||||
if unbound is None:
|
||||
unboundop, = self._unbound
|
||||
else:
|
||||
unboundop, = _serialize_unbound(unbound)
|
||||
if timeout is not None:
|
||||
timeout = int(timeout)
|
||||
if timeout < 0:
|
||||
raise ValueError(f'timeout value must be non-negative')
|
||||
end = time.time() + timeout
|
||||
if fmt is _PICKLED:
|
||||
obj = pickle.dumps(obj)
|
||||
while True:
|
||||
try:
|
||||
_queues.put(self._id, obj, fmt, unboundop)
|
||||
except QueueFull as exc:
|
||||
if timeout is not None and time.time() >= end:
|
||||
raise # re-raise
|
||||
time.sleep(_delay)
|
||||
else:
|
||||
break
|
||||
|
||||
def put_nowait(self, obj, *, syncobj=None, unbound=None):
|
||||
if syncobj is None:
|
||||
fmt = self._fmt
|
||||
else:
|
||||
fmt = _SHARED_ONLY if syncobj else _PICKLED
|
||||
if unbound is None:
|
||||
unboundop, = self._unbound
|
||||
else:
|
||||
unboundop, = _serialize_unbound(unbound)
|
||||
if fmt is _PICKLED:
|
||||
obj = pickle.dumps(obj)
|
||||
_queues.put(self._id, obj, fmt, unboundop)
|
||||
|
||||
def get(self, timeout=None, *,
|
||||
_delay=10 / 1000, # 10 milliseconds
|
||||
):
|
||||
"""Return the next object from the queue.
|
||||
|
||||
This blocks while the queue is empty.
|
||||
|
||||
If the next item's original interpreter has been destroyed
|
||||
then the "next object" is determined by the value of the
|
||||
"unbound" argument to put().
|
||||
"""
|
||||
if timeout is not None:
|
||||
timeout = int(timeout)
|
||||
if timeout < 0:
|
||||
raise ValueError(f'timeout value must be non-negative')
|
||||
end = time.time() + timeout
|
||||
while True:
|
||||
try:
|
||||
obj, fmt, unboundop = _queues.get(self._id)
|
||||
except QueueEmpty as exc:
|
||||
if timeout is not None and time.time() >= end:
|
||||
raise # re-raise
|
||||
time.sleep(_delay)
|
||||
else:
|
||||
break
|
||||
if unboundop is not None:
|
||||
assert obj is None, repr(obj)
|
||||
return _resolve_unbound(unboundop)
|
||||
if fmt == _PICKLED:
|
||||
obj = pickle.loads(obj)
|
||||
else:
|
||||
assert fmt == _SHARED_ONLY
|
||||
return obj
|
||||
|
||||
def get_nowait(self):
|
||||
"""Return the next object from the channel.
|
||||
|
||||
If the queue is empty then raise QueueEmpty. Otherwise this
|
||||
is the same as get().
|
||||
"""
|
||||
try:
|
||||
obj, fmt, unboundop = _queues.get(self._id)
|
||||
except QueueEmpty as exc:
|
||||
raise # re-raise
|
||||
if unboundop is not None:
|
||||
assert obj is None, repr(obj)
|
||||
return _resolve_unbound(unboundop)
|
||||
if fmt == _PICKLED:
|
||||
obj = pickle.loads(obj)
|
||||
else:
|
||||
assert fmt == _SHARED_ONLY
|
||||
return obj
|
||||
|
||||
|
||||
_queues._register_heap_types(Queue, QueueEmpty, QueueFull)
|
29
Dependencies/Python/Lib/test/support/logging_helper.py
vendored
Normal file
29
Dependencies/Python/Lib/test/support/logging_helper.py
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
import logging.handlers
|
||||
|
||||
class TestHandler(logging.handlers.BufferingHandler):
|
||||
def __init__(self, matcher):
|
||||
# BufferingHandler takes a "capacity" argument
|
||||
# so as to know when to flush. As we're overriding
|
||||
# shouldFlush anyway, we can set a capacity of zero.
|
||||
# You can call flush() manually to clear out the
|
||||
# buffer.
|
||||
logging.handlers.BufferingHandler.__init__(self, 0)
|
||||
self.matcher = matcher
|
||||
|
||||
def shouldFlush(self):
|
||||
return False
|
||||
|
||||
def emit(self, record):
|
||||
self.format(record)
|
||||
self.buffer.append(record.__dict__)
|
||||
|
||||
def matches(self, **kwargs):
|
||||
"""
|
||||
Look for a saved dict whose keys/values match the supplied arguments.
|
||||
"""
|
||||
result = False
|
||||
for d in self.buffer:
|
||||
if self.matcher.matches(d, **kwargs):
|
||||
result = True
|
||||
break
|
||||
return result
|
80
Dependencies/Python/Lib/test/support/numbers.py
vendored
Normal file
80
Dependencies/Python/Lib/test/support/numbers.py
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
# These are shared with test_tokenize and other test modules.
|
||||
#
|
||||
# Note: since several test cases filter out floats by looking for "e" and ".",
|
||||
# don't add hexadecimal literals that contain "e" or "E".
|
||||
VALID_UNDERSCORE_LITERALS = [
|
||||
'0_0_0',
|
||||
'4_2',
|
||||
'1_0000_0000',
|
||||
'0b1001_0100',
|
||||
'0xffff_ffff',
|
||||
'0o5_7_7',
|
||||
'1_00_00.5',
|
||||
'1_00_00.5e5',
|
||||
'1_00_00e5_1',
|
||||
'1e1_0',
|
||||
'.1_4',
|
||||
'.1_4e1',
|
||||
'0b_0',
|
||||
'0x_f',
|
||||
'0o_5',
|
||||
'1_00_00j',
|
||||
'1_00_00.5j',
|
||||
'1_00_00e5_1j',
|
||||
'.1_4j',
|
||||
'(1_2.5+3_3j)',
|
||||
'(.5_6j)',
|
||||
]
|
||||
INVALID_UNDERSCORE_LITERALS = [
|
||||
# Trailing underscores:
|
||||
'0_',
|
||||
'42_',
|
||||
'1.4j_',
|
||||
'0x_',
|
||||
'0b1_',
|
||||
'0xf_',
|
||||
'0o5_',
|
||||
'0 if 1_Else 1',
|
||||
# Underscores in the base selector:
|
||||
'0_b0',
|
||||
'0_xf',
|
||||
'0_o5',
|
||||
# Old-style octal, still disallowed:
|
||||
'0_7',
|
||||
'09_99',
|
||||
# Multiple consecutive underscores:
|
||||
'4_______2',
|
||||
'0.1__4',
|
||||
'0.1__4j',
|
||||
'0b1001__0100',
|
||||
'0xffff__ffff',
|
||||
'0x___',
|
||||
'0o5__77',
|
||||
'1e1__0',
|
||||
'1e1__0j',
|
||||
# Underscore right before a dot:
|
||||
'1_.4',
|
||||
'1_.4j',
|
||||
# Underscore right after a dot:
|
||||
'1._4',
|
||||
'1._4j',
|
||||
'._5',
|
||||
'._5j',
|
||||
# Underscore right after a sign:
|
||||
'1.0e+_1',
|
||||
'1.0e+_1j',
|
||||
# Underscore right before j:
|
||||
'1.4_j',
|
||||
'1.4e5_j',
|
||||
# Underscore right before e:
|
||||
'1_e1',
|
||||
'1.4_e1',
|
||||
'1.4_e1j',
|
||||
# Underscore right after e:
|
||||
'1e_1',
|
||||
'1.4e_1',
|
||||
'1.4e_1j',
|
||||
# Complex cases with parens:
|
||||
'(1+1.5_j_)',
|
||||
'(1+1.5_j)',
|
||||
]
|
805
Dependencies/Python/Lib/test/support/os_helper.py
vendored
Normal file
805
Dependencies/Python/Lib/test/support/os_helper.py
vendored
Normal file
@ -0,0 +1,805 @@
|
||||
import collections.abc
|
||||
import contextlib
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import stat
|
||||
import string
|
||||
import sys
|
||||
import time
|
||||
import unittest
|
||||
import warnings
|
||||
|
||||
from test import support
|
||||
|
||||
|
||||
# Filename used for testing
|
||||
TESTFN_ASCII = '@test'
|
||||
|
||||
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
|
||||
# module name.
|
||||
TESTFN_ASCII = "{}_{}_tmp".format(TESTFN_ASCII, os.getpid())
|
||||
|
||||
# TESTFN_UNICODE is a non-ascii filename
|
||||
TESTFN_UNICODE = TESTFN_ASCII + "-\xe0\xf2\u0258\u0141\u011f"
|
||||
if support.is_apple:
|
||||
# On Apple's VFS API file names are, by definition, canonically
|
||||
# decomposed Unicode, encoded using UTF-8. See QA1173:
|
||||
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
|
||||
import unicodedata
|
||||
TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE)
|
||||
|
||||
# TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be
|
||||
# encoded by the filesystem encoding (in strict mode). It can be None if we
|
||||
# cannot generate such filename.
|
||||
TESTFN_UNENCODABLE = None
|
||||
if os.name == 'nt':
|
||||
# skip win32s (0) or Windows 9x/ME (1)
|
||||
if sys.getwindowsversion().platform >= 2:
|
||||
# Different kinds of characters from various languages to minimize the
|
||||
# probability that the whole name is encodable to MBCS (issue #9819)
|
||||
TESTFN_UNENCODABLE = TESTFN_ASCII + "-\u5171\u0141\u2661\u0363\uDC80"
|
||||
try:
|
||||
TESTFN_UNENCODABLE.encode(sys.getfilesystemencoding())
|
||||
except UnicodeEncodeError:
|
||||
pass
|
||||
else:
|
||||
print('WARNING: The filename %r CAN be encoded by the filesystem '
|
||||
'encoding (%s). Unicode filename tests may not be effective'
|
||||
% (TESTFN_UNENCODABLE, sys.getfilesystemencoding()))
|
||||
TESTFN_UNENCODABLE = None
|
||||
# Apple and Emscripten deny unencodable filenames (invalid utf-8)
|
||||
elif not support.is_apple and sys.platform not in {"emscripten", "wasi"}:
|
||||
try:
|
||||
# ascii and utf-8 cannot encode the byte 0xff
|
||||
b'\xff'.decode(sys.getfilesystemencoding())
|
||||
except UnicodeDecodeError:
|
||||
# 0xff will be encoded using the surrogate character u+DCFF
|
||||
TESTFN_UNENCODABLE = TESTFN_ASCII \
|
||||
+ b'-\xff'.decode(sys.getfilesystemencoding(), 'surrogateescape')
|
||||
else:
|
||||
# File system encoding (eg. ISO-8859-* encodings) can encode
|
||||
# the byte 0xff. Skip some unicode filename tests.
|
||||
pass
|
||||
|
||||
# FS_NONASCII: non-ASCII character encodable by os.fsencode(),
|
||||
# or an empty string if there is no such character.
|
||||
FS_NONASCII = ''
|
||||
for character in (
|
||||
# First try printable and common characters to have a readable filename.
|
||||
# For each character, the encoding list are just example of encodings able
|
||||
# to encode the character (the list is not exhaustive).
|
||||
|
||||
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
|
||||
'\u00E6',
|
||||
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
|
||||
'\u0130',
|
||||
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
|
||||
'\u0141',
|
||||
# U+03C6 (Greek Small Letter Phi): cp1253
|
||||
'\u03C6',
|
||||
# U+041A (Cyrillic Capital Letter Ka): cp1251
|
||||
'\u041A',
|
||||
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
|
||||
'\u05D0',
|
||||
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
|
||||
'\u060C',
|
||||
# U+062A (Arabic Letter Teh): cp720
|
||||
'\u062A',
|
||||
# U+0E01 (Thai Character Ko Kai): cp874
|
||||
'\u0E01',
|
||||
|
||||
# Then try more "special" characters. "special" because they may be
|
||||
# interpreted or displayed differently depending on the exact locale
|
||||
# encoding and the font.
|
||||
|
||||
# U+00A0 (No-Break Space)
|
||||
'\u00A0',
|
||||
# U+20AC (Euro Sign)
|
||||
'\u20AC',
|
||||
):
|
||||
try:
|
||||
# If Python is set up to use the legacy 'mbcs' in Windows,
|
||||
# 'replace' error mode is used, and encode() returns b'?'
|
||||
# for characters missing in the ANSI codepage
|
||||
if os.fsdecode(os.fsencode(character)) != character:
|
||||
raise UnicodeError
|
||||
except UnicodeError:
|
||||
pass
|
||||
else:
|
||||
FS_NONASCII = character
|
||||
break
|
||||
|
||||
# Save the initial cwd
|
||||
SAVEDCWD = os.getcwd()
|
||||
|
||||
# TESTFN_UNDECODABLE is a filename (bytes type) that should *not* be able to be
|
||||
# decoded from the filesystem encoding (in strict mode). It can be None if we
|
||||
# cannot generate such filename (ex: the latin1 encoding can decode any byte
|
||||
# sequence). On UNIX, TESTFN_UNDECODABLE can be decoded by os.fsdecode() thanks
|
||||
# to the surrogateescape error handler (PEP 383), but not from the filesystem
|
||||
# encoding in strict mode.
|
||||
TESTFN_UNDECODABLE = None
|
||||
for name in (
|
||||
# b'\xff' is not decodable by os.fsdecode() with code page 932. Windows
|
||||
# accepts it to create a file or a directory, or don't accept to enter to
|
||||
# such directory (when the bytes name is used). So test b'\xe7' first:
|
||||
# it is not decodable from cp932.
|
||||
b'\xe7w\xf0',
|
||||
# undecodable from ASCII, UTF-8
|
||||
b'\xff',
|
||||
# undecodable from iso8859-3, iso8859-6, iso8859-7, cp424, iso8859-8, cp856
|
||||
# and cp857
|
||||
b'\xae\xd5'
|
||||
# undecodable from UTF-8 (UNIX and Mac OS X)
|
||||
b'\xed\xb2\x80', b'\xed\xb4\x80',
|
||||
# undecodable from shift_jis, cp869, cp874, cp932, cp1250, cp1251, cp1252,
|
||||
# cp1253, cp1254, cp1255, cp1257, cp1258
|
||||
b'\x81\x98',
|
||||
):
|
||||
try:
|
||||
name.decode(sys.getfilesystemencoding())
|
||||
except UnicodeDecodeError:
|
||||
try:
|
||||
name.decode(sys.getfilesystemencoding(),
|
||||
sys.getfilesystemencodeerrors())
|
||||
except UnicodeDecodeError:
|
||||
continue
|
||||
TESTFN_UNDECODABLE = os.fsencode(TESTFN_ASCII) + name
|
||||
break
|
||||
|
||||
if FS_NONASCII:
|
||||
TESTFN_NONASCII = TESTFN_ASCII + FS_NONASCII
|
||||
else:
|
||||
TESTFN_NONASCII = None
|
||||
TESTFN = TESTFN_NONASCII or TESTFN_ASCII
|
||||
|
||||
|
||||
def make_bad_fd():
|
||||
"""
|
||||
Create an invalid file descriptor by opening and closing a file and return
|
||||
its fd.
|
||||
"""
|
||||
file = open(TESTFN, "wb")
|
||||
try:
|
||||
return file.fileno()
|
||||
finally:
|
||||
file.close()
|
||||
unlink(TESTFN)
|
||||
|
||||
|
||||
_can_symlink = None
|
||||
|
||||
|
||||
def can_symlink():
|
||||
global _can_symlink
|
||||
if _can_symlink is not None:
|
||||
return _can_symlink
|
||||
# WASI / wasmtime prevents symlinks with absolute paths, see man
|
||||
# openat2(2) RESOLVE_BENEATH. Almost all symlink tests use absolute
|
||||
# paths. Skip symlink tests on WASI for now.
|
||||
src = os.path.abspath(TESTFN)
|
||||
symlink_path = src + "can_symlink"
|
||||
try:
|
||||
os.symlink(src, symlink_path)
|
||||
can = True
|
||||
except (OSError, NotImplementedError, AttributeError):
|
||||
can = False
|
||||
else:
|
||||
os.remove(symlink_path)
|
||||
_can_symlink = can
|
||||
return can
|
||||
|
||||
|
||||
def skip_unless_symlink(test):
|
||||
"""Skip decorator for tests that require functional symlink"""
|
||||
ok = can_symlink()
|
||||
msg = "Requires functional symlink implementation"
|
||||
return test if ok else unittest.skip(msg)(test)
|
||||
|
||||
|
||||
_can_hardlink = None
|
||||
|
||||
def can_hardlink():
|
||||
global _can_hardlink
|
||||
if _can_hardlink is None:
|
||||
# Android blocks hard links using SELinux
|
||||
# (https://stackoverflow.com/q/32365690).
|
||||
_can_hardlink = hasattr(os, "link") and not support.is_android
|
||||
return _can_hardlink
|
||||
|
||||
|
||||
def skip_unless_hardlink(test):
|
||||
ok = can_hardlink()
|
||||
msg = "requires hardlink support"
|
||||
return test if ok else unittest.skip(msg)(test)
|
||||
|
||||
|
||||
_can_xattr = None
|
||||
|
||||
|
||||
def can_xattr():
|
||||
import tempfile
|
||||
global _can_xattr
|
||||
if _can_xattr is not None:
|
||||
return _can_xattr
|
||||
if not hasattr(os, "setxattr"):
|
||||
can = False
|
||||
else:
|
||||
import platform
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
tmp_fp, tmp_name = tempfile.mkstemp(dir=tmp_dir)
|
||||
try:
|
||||
with open(TESTFN, "wb") as fp:
|
||||
try:
|
||||
# TESTFN & tempfile may use different file systems with
|
||||
# different capabilities
|
||||
os.setxattr(tmp_fp, b"user.test", b"")
|
||||
os.setxattr(tmp_name, b"trusted.foo", b"42")
|
||||
os.setxattr(fp.fileno(), b"user.test", b"")
|
||||
# Kernels < 2.6.39 don't respect setxattr flags.
|
||||
kernel_version = platform.release()
|
||||
m = re.match(r"2.6.(\d{1,2})", kernel_version)
|
||||
can = m is None or int(m.group(1)) >= 39
|
||||
except OSError:
|
||||
can = False
|
||||
finally:
|
||||
unlink(TESTFN)
|
||||
unlink(tmp_name)
|
||||
rmdir(tmp_dir)
|
||||
_can_xattr = can
|
||||
return can
|
||||
|
||||
|
||||
def skip_unless_xattr(test):
|
||||
"""Skip decorator for tests that require functional extended attributes"""
|
||||
ok = can_xattr()
|
||||
msg = "no non-broken extended attribute support"
|
||||
return test if ok else unittest.skip(msg)(test)
|
||||
|
||||
|
||||
_can_chmod = None
|
||||
|
||||
def can_chmod():
|
||||
global _can_chmod
|
||||
if _can_chmod is not None:
|
||||
return _can_chmod
|
||||
if not hasattr(os, "chmod"):
|
||||
_can_chmod = False
|
||||
return _can_chmod
|
||||
try:
|
||||
with open(TESTFN, "wb") as f:
|
||||
try:
|
||||
os.chmod(TESTFN, 0o555)
|
||||
mode1 = os.stat(TESTFN).st_mode
|
||||
os.chmod(TESTFN, 0o777)
|
||||
mode2 = os.stat(TESTFN).st_mode
|
||||
except OSError as e:
|
||||
can = False
|
||||
else:
|
||||
can = stat.S_IMODE(mode1) != stat.S_IMODE(mode2)
|
||||
finally:
|
||||
unlink(TESTFN)
|
||||
_can_chmod = can
|
||||
return can
|
||||
|
||||
|
||||
def skip_unless_working_chmod(test):
|
||||
"""Skip tests that require working os.chmod()
|
||||
|
||||
WASI SDK 15.0 cannot change file mode bits.
|
||||
"""
|
||||
ok = can_chmod()
|
||||
msg = "requires working os.chmod()"
|
||||
return test if ok else unittest.skip(msg)(test)
|
||||
|
||||
|
||||
# Check whether the current effective user has the capability to override
|
||||
# DAC (discretionary access control). Typically user root is able to
|
||||
# bypass file read, write, and execute permission checks. The capability
|
||||
# is independent of the effective user. See capabilities(7).
|
||||
_can_dac_override = None
|
||||
|
||||
def can_dac_override():
|
||||
global _can_dac_override
|
||||
|
||||
if not can_chmod():
|
||||
_can_dac_override = False
|
||||
if _can_dac_override is not None:
|
||||
return _can_dac_override
|
||||
|
||||
try:
|
||||
with open(TESTFN, "wb") as f:
|
||||
os.chmod(TESTFN, 0o400)
|
||||
try:
|
||||
with open(TESTFN, "wb"):
|
||||
pass
|
||||
except OSError:
|
||||
_can_dac_override = False
|
||||
else:
|
||||
_can_dac_override = True
|
||||
finally:
|
||||
try:
|
||||
os.chmod(TESTFN, 0o700)
|
||||
except OSError:
|
||||
pass
|
||||
unlink(TESTFN)
|
||||
|
||||
return _can_dac_override
|
||||
|
||||
|
||||
def skip_if_dac_override(test):
|
||||
ok = not can_dac_override()
|
||||
msg = "incompatible with CAP_DAC_OVERRIDE"
|
||||
return test if ok else unittest.skip(msg)(test)
|
||||
|
||||
|
||||
def skip_unless_dac_override(test):
|
||||
ok = can_dac_override()
|
||||
msg = "requires CAP_DAC_OVERRIDE"
|
||||
return test if ok else unittest.skip(msg)(test)
|
||||
|
||||
|
||||
def unlink(filename):
|
||||
try:
|
||||
_unlink(filename)
|
||||
except (FileNotFoundError, NotADirectoryError):
|
||||
pass
|
||||
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
def _waitfor(func, pathname, waitall=False):
|
||||
# Perform the operation
|
||||
func(pathname)
|
||||
# Now setup the wait loop
|
||||
if waitall:
|
||||
dirname = pathname
|
||||
else:
|
||||
dirname, name = os.path.split(pathname)
|
||||
dirname = dirname or '.'
|
||||
# Check for `pathname` to be removed from the filesystem.
|
||||
# The exponential backoff of the timeout amounts to a total
|
||||
# of ~1 second after which the deletion is probably an error
|
||||
# anyway.
|
||||
# Testing on an i7@4.3GHz shows that usually only 1 iteration is
|
||||
# required when contention occurs.
|
||||
timeout = 0.001
|
||||
while timeout < 1.0:
|
||||
# Note we are only testing for the existence of the file(s) in
|
||||
# the contents of the directory regardless of any security or
|
||||
# access rights. If we have made it this far, we have sufficient
|
||||
# permissions to do that much using Python's equivalent of the
|
||||
# Windows API FindFirstFile.
|
||||
# Other Windows APIs can fail or give incorrect results when
|
||||
# dealing with files that are pending deletion.
|
||||
L = os.listdir(dirname)
|
||||
if not (L if waitall else name in L):
|
||||
return
|
||||
# Increase the timeout and try again
|
||||
time.sleep(timeout)
|
||||
timeout *= 2
|
||||
logging.getLogger(__name__).warning(
|
||||
'tests may fail, delete still pending for %s',
|
||||
pathname,
|
||||
stack_info=True,
|
||||
stacklevel=4,
|
||||
)
|
||||
|
||||
def _unlink(filename):
|
||||
_waitfor(os.unlink, filename)
|
||||
|
||||
def _rmdir(dirname):
|
||||
_waitfor(os.rmdir, dirname)
|
||||
|
||||
def _rmtree(path):
|
||||
from test.support import _force_run
|
||||
|
||||
def _rmtree_inner(path):
|
||||
for name in _force_run(path, os.listdir, path):
|
||||
fullname = os.path.join(path, name)
|
||||
try:
|
||||
mode = os.lstat(fullname).st_mode
|
||||
except OSError as exc:
|
||||
print("support.rmtree(): os.lstat(%r) failed with %s"
|
||||
% (fullname, exc),
|
||||
file=sys.__stderr__)
|
||||
mode = 0
|
||||
if stat.S_ISDIR(mode):
|
||||
_waitfor(_rmtree_inner, fullname, waitall=True)
|
||||
_force_run(fullname, os.rmdir, fullname)
|
||||
else:
|
||||
_force_run(fullname, os.unlink, fullname)
|
||||
_waitfor(_rmtree_inner, path, waitall=True)
|
||||
_waitfor(lambda p: _force_run(p, os.rmdir, p), path)
|
||||
|
||||
def _longpath(path):
|
||||
try:
|
||||
import ctypes
|
||||
except ImportError:
|
||||
# No ctypes means we can't expands paths.
|
||||
pass
|
||||
else:
|
||||
buffer = ctypes.create_unicode_buffer(len(path) * 2)
|
||||
length = ctypes.windll.kernel32.GetLongPathNameW(path, buffer,
|
||||
len(buffer))
|
||||
if length:
|
||||
return buffer[:length]
|
||||
return path
|
||||
else:
|
||||
_unlink = os.unlink
|
||||
_rmdir = os.rmdir
|
||||
|
||||
def _rmtree(path):
|
||||
import shutil
|
||||
try:
|
||||
shutil.rmtree(path)
|
||||
return
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def _rmtree_inner(path):
|
||||
from test.support import _force_run
|
||||
for name in _force_run(path, os.listdir, path):
|
||||
fullname = os.path.join(path, name)
|
||||
try:
|
||||
mode = os.lstat(fullname).st_mode
|
||||
except OSError:
|
||||
mode = 0
|
||||
if stat.S_ISDIR(mode):
|
||||
_rmtree_inner(fullname)
|
||||
_force_run(path, os.rmdir, fullname)
|
||||
else:
|
||||
_force_run(path, os.unlink, fullname)
|
||||
_rmtree_inner(path)
|
||||
os.rmdir(path)
|
||||
|
||||
def _longpath(path):
|
||||
return path
|
||||
|
||||
|
||||
def rmdir(dirname):
|
||||
try:
|
||||
_rmdir(dirname)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
|
||||
def rmtree(path):
|
||||
try:
|
||||
_rmtree(path)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def temp_dir(path=None, quiet=False):
|
||||
"""Return a context manager that creates a temporary directory.
|
||||
|
||||
Arguments:
|
||||
|
||||
path: the directory to create temporarily. If omitted or None,
|
||||
defaults to creating a temporary directory using tempfile.mkdtemp.
|
||||
|
||||
quiet: if False (the default), the context manager raises an exception
|
||||
on error. Otherwise, if the path is specified and cannot be
|
||||
created, only a warning is issued.
|
||||
|
||||
"""
|
||||
import tempfile
|
||||
dir_created = False
|
||||
if path is None:
|
||||
path = tempfile.mkdtemp()
|
||||
dir_created = True
|
||||
path = os.path.realpath(path)
|
||||
else:
|
||||
try:
|
||||
os.mkdir(path)
|
||||
dir_created = True
|
||||
except OSError as exc:
|
||||
if not quiet:
|
||||
raise
|
||||
logging.getLogger(__name__).warning(
|
||||
"tests may fail, unable to create temporary directory %r: %s",
|
||||
path,
|
||||
exc,
|
||||
exc_info=exc,
|
||||
stack_info=True,
|
||||
stacklevel=3,
|
||||
)
|
||||
if dir_created:
|
||||
pid = os.getpid()
|
||||
try:
|
||||
yield path
|
||||
finally:
|
||||
# In case the process forks, let only the parent remove the
|
||||
# directory. The child has a different process id. (bpo-30028)
|
||||
if dir_created and pid == os.getpid():
|
||||
rmtree(path)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def change_cwd(path, quiet=False):
|
||||
"""Return a context manager that changes the current working directory.
|
||||
|
||||
Arguments:
|
||||
|
||||
path: the directory to use as the temporary current working directory.
|
||||
|
||||
quiet: if False (the default), the context manager raises an exception
|
||||
on error. Otherwise, it issues only a warning and keeps the current
|
||||
working directory the same.
|
||||
|
||||
"""
|
||||
saved_dir = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.realpath(path))
|
||||
except OSError as exc:
|
||||
if not quiet:
|
||||
raise
|
||||
logging.getLogger(__name__).warning(
|
||||
'tests may fail, unable to change the current working directory '
|
||||
'to %r: %s',
|
||||
path,
|
||||
exc,
|
||||
exc_info=exc,
|
||||
stack_info=True,
|
||||
stacklevel=3,
|
||||
)
|
||||
try:
|
||||
yield os.getcwd()
|
||||
finally:
|
||||
os.chdir(saved_dir)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def temp_cwd(name='tempcwd', quiet=False):
|
||||
"""
|
||||
Context manager that temporarily creates and changes the CWD.
|
||||
|
||||
The function temporarily changes the current working directory
|
||||
after creating a temporary directory in the current directory with
|
||||
name *name*. If *name* is None, the temporary directory is
|
||||
created using tempfile.mkdtemp.
|
||||
|
||||
If *quiet* is False (default) and it is not possible to
|
||||
create or change the CWD, an error is raised. If *quiet* is True,
|
||||
only a warning is raised and the original CWD is used.
|
||||
|
||||
"""
|
||||
with temp_dir(path=name, quiet=quiet) as temp_path:
|
||||
with change_cwd(temp_path, quiet=quiet) as cwd_dir:
|
||||
yield cwd_dir
|
||||
|
||||
|
||||
def create_empty_file(filename):
|
||||
"""Create an empty file. If the file already exists, truncate it."""
|
||||
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
|
||||
os.close(fd)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def open_dir_fd(path):
|
||||
"""Open a file descriptor to a directory."""
|
||||
assert os.path.isdir(path)
|
||||
flags = os.O_RDONLY
|
||||
if hasattr(os, "O_DIRECTORY"):
|
||||
flags |= os.O_DIRECTORY
|
||||
dir_fd = os.open(path, flags)
|
||||
try:
|
||||
yield dir_fd
|
||||
finally:
|
||||
os.close(dir_fd)
|
||||
|
||||
|
||||
def fs_is_case_insensitive(directory):
|
||||
"""Detects if the file system for the specified directory
|
||||
is case-insensitive."""
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(dir=directory) as base:
|
||||
base_path = base.name
|
||||
case_path = base_path.upper()
|
||||
if case_path == base_path:
|
||||
case_path = base_path.lower()
|
||||
try:
|
||||
return os.path.samefile(base_path, case_path)
|
||||
except FileNotFoundError:
|
||||
return False
|
||||
|
||||
|
||||
class FakePath:
|
||||
"""Simple implementation of the path protocol.
|
||||
"""
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
|
||||
def __repr__(self):
|
||||
return f'<FakePath {self.path!r}>'
|
||||
|
||||
def __fspath__(self):
|
||||
if (isinstance(self.path, BaseException) or
|
||||
isinstance(self.path, type) and
|
||||
issubclass(self.path, BaseException)):
|
||||
raise self.path
|
||||
else:
|
||||
return self.path
|
||||
|
||||
|
||||
def fd_count():
|
||||
"""Count the number of open file descriptors.
|
||||
"""
|
||||
if sys.platform.startswith(('linux', 'android', 'freebsd', 'emscripten')):
|
||||
fd_path = "/proc/self/fd"
|
||||
elif sys.platform == "darwin":
|
||||
fd_path = "/dev/fd"
|
||||
else:
|
||||
fd_path = None
|
||||
|
||||
if fd_path is not None:
|
||||
try:
|
||||
names = os.listdir(fd_path)
|
||||
# Subtract one because listdir() internally opens a file
|
||||
# descriptor to list the content of the directory.
|
||||
return len(names) - 1
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
MAXFD = 256
|
||||
if hasattr(os, 'sysconf'):
|
||||
try:
|
||||
MAXFD = os.sysconf("SC_OPEN_MAX")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
old_modes = None
|
||||
if sys.platform == 'win32':
|
||||
# bpo-25306, bpo-31009: Call CrtSetReportMode() to not kill the process
|
||||
# on invalid file descriptor if Python is compiled in debug mode
|
||||
try:
|
||||
import msvcrt
|
||||
msvcrt.CrtSetReportMode
|
||||
except (AttributeError, ImportError):
|
||||
# no msvcrt or a release build
|
||||
pass
|
||||
else:
|
||||
old_modes = {}
|
||||
for report_type in (msvcrt.CRT_WARN,
|
||||
msvcrt.CRT_ERROR,
|
||||
msvcrt.CRT_ASSERT):
|
||||
old_modes[report_type] = msvcrt.CrtSetReportMode(report_type,
|
||||
0)
|
||||
|
||||
try:
|
||||
count = 0
|
||||
for fd in range(MAXFD):
|
||||
try:
|
||||
# Prefer dup() over fstat(). fstat() can require input/output
|
||||
# whereas dup() doesn't.
|
||||
fd2 = os.dup(fd)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EBADF:
|
||||
raise
|
||||
else:
|
||||
os.close(fd2)
|
||||
count += 1
|
||||
finally:
|
||||
if old_modes is not None:
|
||||
for report_type in (msvcrt.CRT_WARN,
|
||||
msvcrt.CRT_ERROR,
|
||||
msvcrt.CRT_ASSERT):
|
||||
msvcrt.CrtSetReportMode(report_type, old_modes[report_type])
|
||||
|
||||
return count
|
||||
|
||||
|
||||
if hasattr(os, "umask"):
|
||||
@contextlib.contextmanager
|
||||
def temp_umask(umask):
|
||||
"""Context manager that temporarily sets the process umask."""
|
||||
oldmask = os.umask(umask)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.umask(oldmask)
|
||||
else:
|
||||
@contextlib.contextmanager
|
||||
def temp_umask(umask):
|
||||
"""no-op on platforms without umask()"""
|
||||
yield
|
||||
|
||||
|
||||
class EnvironmentVarGuard(collections.abc.MutableMapping):
|
||||
"""Class to help protect the environment variable properly.
|
||||
|
||||
Can be used as a context manager.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._environ = os.environ
|
||||
self._changed = {}
|
||||
|
||||
def __getitem__(self, envvar):
|
||||
return self._environ[envvar]
|
||||
|
||||
def __setitem__(self, envvar, value):
|
||||
# Remember the initial value on the first access
|
||||
if envvar not in self._changed:
|
||||
self._changed[envvar] = self._environ.get(envvar)
|
||||
self._environ[envvar] = value
|
||||
|
||||
def __delitem__(self, envvar):
|
||||
# Remember the initial value on the first access
|
||||
if envvar not in self._changed:
|
||||
self._changed[envvar] = self._environ.get(envvar)
|
||||
if envvar in self._environ:
|
||||
del self._environ[envvar]
|
||||
|
||||
def keys(self):
|
||||
return self._environ.keys()
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._environ)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._environ)
|
||||
|
||||
def set(self, envvar, value):
|
||||
self[envvar] = value
|
||||
|
||||
def unset(self, envvar, /, *envvars):
|
||||
"""Unset one or more environment variables."""
|
||||
for ev in (envvar, *envvars):
|
||||
del self[ev]
|
||||
|
||||
def copy(self):
|
||||
# We do what os.environ.copy() does.
|
||||
return dict(self)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *ignore_exc):
|
||||
for (k, v) in self._changed.items():
|
||||
if v is None:
|
||||
if k in self._environ:
|
||||
del self._environ[k]
|
||||
else:
|
||||
self._environ[k] = v
|
||||
os.environ = self._environ
|
||||
|
||||
|
||||
try:
|
||||
if support.MS_WINDOWS:
|
||||
import ctypes
|
||||
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
|
||||
|
||||
ERROR_FILE_NOT_FOUND = 2
|
||||
DDD_REMOVE_DEFINITION = 2
|
||||
DDD_EXACT_MATCH_ON_REMOVE = 4
|
||||
DDD_NO_BROADCAST_SYSTEM = 8
|
||||
else:
|
||||
raise AttributeError
|
||||
except (ImportError, AttributeError):
|
||||
def subst_drive(path):
|
||||
raise unittest.SkipTest('ctypes or kernel32 is not available')
|
||||
else:
|
||||
@contextlib.contextmanager
|
||||
def subst_drive(path):
|
||||
"""Temporarily yield a substitute drive for a given path."""
|
||||
for c in reversed(string.ascii_uppercase):
|
||||
drive = f'{c}:'
|
||||
if (not kernel32.QueryDosDeviceW(drive, None, 0) and
|
||||
ctypes.get_last_error() == ERROR_FILE_NOT_FOUND):
|
||||
break
|
||||
else:
|
||||
raise unittest.SkipTest('no available logical drive')
|
||||
if not kernel32.DefineDosDeviceW(
|
||||
DDD_NO_BROADCAST_SYSTEM, drive, path):
|
||||
raise ctypes.WinError(ctypes.get_last_error())
|
||||
try:
|
||||
yield drive
|
||||
finally:
|
||||
if not kernel32.DefineDosDeviceW(
|
||||
DDD_REMOVE_DEFINITION | DDD_EXACT_MATCH_ON_REMOVE,
|
||||
drive, path):
|
||||
raise ctypes.WinError(ctypes.get_last_error())
|
80
Dependencies/Python/Lib/test/support/pty_helper.py
vendored
Normal file
80
Dependencies/Python/Lib/test/support/pty_helper.py
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
"""
|
||||
Helper to run a script in a pseudo-terminal.
|
||||
"""
|
||||
import os
|
||||
import selectors
|
||||
import subprocess
|
||||
import sys
|
||||
from contextlib import ExitStack
|
||||
from errno import EIO
|
||||
|
||||
from test.support.import_helper import import_module
|
||||
|
||||
def run_pty(script, input=b"dummy input\r", env=None):
|
||||
pty = import_module('pty')
|
||||
output = bytearray()
|
||||
[master, slave] = pty.openpty()
|
||||
args = (sys.executable, '-c', script)
|
||||
proc = subprocess.Popen(args, stdin=slave, stdout=slave, stderr=slave, env=env)
|
||||
os.close(slave)
|
||||
with ExitStack() as cleanup:
|
||||
cleanup.enter_context(proc)
|
||||
def terminate(proc):
|
||||
try:
|
||||
proc.terminate()
|
||||
except ProcessLookupError:
|
||||
# Workaround for Open/Net BSD bug (Issue 16762)
|
||||
pass
|
||||
cleanup.callback(terminate, proc)
|
||||
cleanup.callback(os.close, master)
|
||||
# Avoid using DefaultSelector and PollSelector. Kqueue() does not
|
||||
# work with pseudo-terminals on OS X < 10.9 (Issue 20365) and Open
|
||||
# BSD (Issue 20667). Poll() does not work with OS X 10.6 or 10.4
|
||||
# either (Issue 20472). Hopefully the file descriptor is low enough
|
||||
# to use with select().
|
||||
sel = cleanup.enter_context(selectors.SelectSelector())
|
||||
sel.register(master, selectors.EVENT_READ | selectors.EVENT_WRITE)
|
||||
os.set_blocking(master, False)
|
||||
while True:
|
||||
for [_, events] in sel.select():
|
||||
if events & selectors.EVENT_READ:
|
||||
try:
|
||||
chunk = os.read(master, 0x10000)
|
||||
except OSError as err:
|
||||
# Linux raises EIO when slave is closed (Issue 5380)
|
||||
if err.errno != EIO:
|
||||
raise
|
||||
chunk = b""
|
||||
if not chunk:
|
||||
return output
|
||||
output.extend(chunk)
|
||||
if events & selectors.EVENT_WRITE:
|
||||
try:
|
||||
input = input[os.write(master, input):]
|
||||
except OSError as err:
|
||||
# Apparently EIO means the slave was closed
|
||||
if err.errno != EIO:
|
||||
raise
|
||||
input = b"" # Stop writing
|
||||
if not input:
|
||||
sel.modify(master, selectors.EVENT_READ)
|
||||
|
||||
|
||||
######################################################################
|
||||
## Fake stdin (for testing interactive debugging)
|
||||
######################################################################
|
||||
|
||||
class FakeInput:
|
||||
"""
|
||||
A fake input stream for pdb's interactive debugger. Whenever a
|
||||
line is read, print it (to simulate the user typing it), and then
|
||||
return it. The set of lines to return is specified in the
|
||||
constructor; they should not have trailing newlines.
|
||||
"""
|
||||
def __init__(self, lines):
|
||||
self.lines = lines
|
||||
|
||||
def readline(self):
|
||||
line = self.lines.pop(0)
|
||||
print(line)
|
||||
return line + '\n'
|
8
Dependencies/Python/Lib/test/support/refleak_helper.py
vendored
Normal file
8
Dependencies/Python/Lib/test/support/refleak_helper.py
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
"""
|
||||
Utilities for changing test behaviour while hunting
|
||||
for refleaks
|
||||
"""
|
||||
|
||||
_hunting_for_refleaks = False
|
||||
def hunting_for_refleaks():
|
||||
return _hunting_for_refleaks
|
322
Dependencies/Python/Lib/test/support/script_helper.py
vendored
Normal file
322
Dependencies/Python/Lib/test/support/script_helper.py
vendored
Normal file
@ -0,0 +1,322 @@
|
||||
# Common utility functions used by various script execution tests
|
||||
# e.g. test_cmd_line, test_cmd_line_script and test_runpy
|
||||
|
||||
import collections
|
||||
import importlib
|
||||
import sys
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import py_compile
|
||||
|
||||
from importlib.util import source_from_cache
|
||||
from test import support
|
||||
from test.support.import_helper import make_legacy_pyc
|
||||
|
||||
|
||||
# Cached result of the expensive test performed in the function below.
|
||||
__cached_interp_requires_environment = None
|
||||
|
||||
|
||||
def interpreter_requires_environment():
|
||||
"""
|
||||
Returns True if our sys.executable interpreter requires environment
|
||||
variables in order to be able to run at all.
|
||||
|
||||
This is designed to be used with @unittest.skipIf() to annotate tests
|
||||
that need to use an assert_python*() function to launch an isolated
|
||||
mode (-I) or no environment mode (-E) sub-interpreter process.
|
||||
|
||||
A normal build & test does not run into this situation but it can happen
|
||||
when trying to run the standard library test suite from an interpreter that
|
||||
doesn't have an obvious home with Python's current home finding logic.
|
||||
|
||||
Setting PYTHONHOME is one way to get most of the testsuite to run in that
|
||||
situation. PYTHONPATH or PYTHONUSERSITE are other common environment
|
||||
variables that might impact whether or not the interpreter can start.
|
||||
"""
|
||||
global __cached_interp_requires_environment
|
||||
if __cached_interp_requires_environment is None:
|
||||
# If PYTHONHOME is set, assume that we need it
|
||||
if 'PYTHONHOME' in os.environ:
|
||||
__cached_interp_requires_environment = True
|
||||
return True
|
||||
# cannot run subprocess, assume we don't need it
|
||||
if not support.has_subprocess_support:
|
||||
__cached_interp_requires_environment = False
|
||||
return False
|
||||
|
||||
# Try running an interpreter with -E to see if it works or not.
|
||||
try:
|
||||
subprocess.check_call([sys.executable, '-E',
|
||||
'-c', 'import sys; sys.exit(0)'])
|
||||
except subprocess.CalledProcessError:
|
||||
__cached_interp_requires_environment = True
|
||||
else:
|
||||
__cached_interp_requires_environment = False
|
||||
|
||||
return __cached_interp_requires_environment
|
||||
|
||||
|
||||
class _PythonRunResult(collections.namedtuple("_PythonRunResult",
|
||||
("rc", "out", "err"))):
|
||||
"""Helper for reporting Python subprocess run results"""
|
||||
def fail(self, cmd_line):
|
||||
"""Provide helpful details about failed subcommand runs"""
|
||||
# Limit to 300 lines of ASCII characters
|
||||
maxlen = 300 * 100
|
||||
out, err = self.out, self.err
|
||||
if len(out) > maxlen:
|
||||
out = b'(... truncated stdout ...)' + out[-maxlen:]
|
||||
if len(err) > maxlen:
|
||||
err = b'(... truncated stderr ...)' + err[-maxlen:]
|
||||
out = out.decode('ascii', 'replace').rstrip()
|
||||
err = err.decode('ascii', 'replace').rstrip()
|
||||
raise AssertionError("Process return code is %d\n"
|
||||
"command line: %r\n"
|
||||
"\n"
|
||||
"stdout:\n"
|
||||
"---\n"
|
||||
"%s\n"
|
||||
"---\n"
|
||||
"\n"
|
||||
"stderr:\n"
|
||||
"---\n"
|
||||
"%s\n"
|
||||
"---"
|
||||
% (self.rc, cmd_line,
|
||||
out,
|
||||
err))
|
||||
|
||||
|
||||
# Executing the interpreter in a subprocess
|
||||
@support.requires_subprocess()
|
||||
def run_python_until_end(*args, **env_vars):
|
||||
"""Used to implement assert_python_*.
|
||||
|
||||
*args are the command line flags to pass to the python interpreter.
|
||||
**env_vars keyword arguments are environment variables to set on the process.
|
||||
|
||||
If __run_using_command= is supplied, it must be a list of
|
||||
command line arguments to prepend to the command line used.
|
||||
Useful when you want to run another command that should launch the
|
||||
python interpreter via its own arguments. ["/bin/echo", "--"] for
|
||||
example could print the unquoted python command line instead of
|
||||
run it.
|
||||
"""
|
||||
env_required = interpreter_requires_environment()
|
||||
run_using_command = env_vars.pop('__run_using_command', None)
|
||||
cwd = env_vars.pop('__cwd', None)
|
||||
if '__isolated' in env_vars:
|
||||
isolated = env_vars.pop('__isolated')
|
||||
else:
|
||||
isolated = not env_vars and not env_required
|
||||
cmd_line = [sys.executable, '-X', 'faulthandler']
|
||||
if run_using_command:
|
||||
cmd_line = run_using_command + cmd_line
|
||||
if isolated:
|
||||
# isolated mode: ignore Python environment variables, ignore user
|
||||
# site-packages, and don't add the current directory to sys.path
|
||||
cmd_line.append('-I')
|
||||
elif not env_vars and not env_required:
|
||||
# ignore Python environment variables
|
||||
cmd_line.append('-E')
|
||||
|
||||
# But a special flag that can be set to override -- in this case, the
|
||||
# caller is responsible to pass the full environment.
|
||||
if env_vars.pop('__cleanenv', None):
|
||||
env = {}
|
||||
if sys.platform == 'win32':
|
||||
# Windows requires at least the SYSTEMROOT environment variable to
|
||||
# start Python.
|
||||
env['SYSTEMROOT'] = os.environ['SYSTEMROOT']
|
||||
|
||||
# Other interesting environment variables, not copied currently:
|
||||
# COMSPEC, HOME, PATH, TEMP, TMPDIR, TMP.
|
||||
else:
|
||||
# Need to preserve the original environment, for in-place testing of
|
||||
# shared library builds.
|
||||
env = os.environ.copy()
|
||||
|
||||
# set TERM='' unless the TERM environment variable is passed explicitly
|
||||
# see issues #11390 and #18300
|
||||
if 'TERM' not in env_vars:
|
||||
env['TERM'] = ''
|
||||
|
||||
env.update(env_vars)
|
||||
cmd_line.extend(args)
|
||||
proc = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
env=env, cwd=cwd)
|
||||
with proc:
|
||||
try:
|
||||
out, err = proc.communicate()
|
||||
finally:
|
||||
proc.kill()
|
||||
subprocess._cleanup()
|
||||
rc = proc.returncode
|
||||
return _PythonRunResult(rc, out, err), cmd_line
|
||||
|
||||
|
||||
@support.requires_subprocess()
|
||||
def _assert_python(expected_success, /, *args, **env_vars):
|
||||
res, cmd_line = run_python_until_end(*args, **env_vars)
|
||||
if (res.rc and expected_success) or (not res.rc and not expected_success):
|
||||
res.fail(cmd_line)
|
||||
return res
|
||||
|
||||
|
||||
def assert_python_ok(*args, **env_vars):
|
||||
"""
|
||||
Assert that running the interpreter with `args` and optional environment
|
||||
variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,
|
||||
stderr) tuple.
|
||||
|
||||
If the __cleanenv keyword is set, env_vars is used as a fresh environment.
|
||||
|
||||
Python is started in isolated mode (command line option -I),
|
||||
except if the __isolated keyword is set to False.
|
||||
"""
|
||||
return _assert_python(True, *args, **env_vars)
|
||||
|
||||
|
||||
def assert_python_failure(*args, **env_vars):
|
||||
"""
|
||||
Assert that running the interpreter with `args` and optional environment
|
||||
variables `env_vars` fails (rc != 0) and return a (return code, stdout,
|
||||
stderr) tuple.
|
||||
|
||||
See assert_python_ok() for more options.
|
||||
"""
|
||||
return _assert_python(False, *args, **env_vars)
|
||||
|
||||
|
||||
@support.requires_subprocess()
|
||||
def spawn_python(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kw):
|
||||
"""Run a Python subprocess with the given arguments.
|
||||
|
||||
kw is extra keyword args to pass to subprocess.Popen. Returns a Popen
|
||||
object.
|
||||
"""
|
||||
cmd_line = [sys.executable]
|
||||
if not interpreter_requires_environment():
|
||||
cmd_line.append('-E')
|
||||
cmd_line.extend(args)
|
||||
# Under Fedora (?), GNU readline can output junk on stderr when initialized,
|
||||
# depending on the TERM setting. Setting TERM=vt100 is supposed to disable
|
||||
# that. References:
|
||||
# - http://reinout.vanrees.org/weblog/2009/08/14/readline-invisible-character-hack.html
|
||||
# - http://stackoverflow.com/questions/15760712/python-readline-module-prints-escape-character-during-import
|
||||
# - http://lists.gnu.org/archive/html/bug-readline/2007-08/msg00004.html
|
||||
env = kw.setdefault('env', dict(os.environ))
|
||||
env['TERM'] = 'vt100'
|
||||
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
|
||||
stdout=stdout, stderr=stderr,
|
||||
**kw)
|
||||
|
||||
|
||||
def kill_python(p):
|
||||
"""Run the given Popen process until completion and return stdout."""
|
||||
p.stdin.close()
|
||||
data = p.stdout.read()
|
||||
p.stdout.close()
|
||||
# try to cleanup the child so we don't appear to leak when running
|
||||
# with regrtest -R.
|
||||
p.wait()
|
||||
subprocess._cleanup()
|
||||
return data
|
||||
|
||||
|
||||
def make_script(script_dir, script_basename, source, omit_suffix=False):
|
||||
script_filename = script_basename
|
||||
if not omit_suffix:
|
||||
script_filename += os.extsep + 'py'
|
||||
script_name = os.path.join(script_dir, script_filename)
|
||||
if isinstance(source, str):
|
||||
# The script should be encoded to UTF-8, the default string encoding
|
||||
with open(script_name, 'w', encoding='utf-8') as script_file:
|
||||
script_file.write(source)
|
||||
else:
|
||||
with open(script_name, 'wb') as script_file:
|
||||
script_file.write(source)
|
||||
importlib.invalidate_caches()
|
||||
return script_name
|
||||
|
||||
|
||||
def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
|
||||
import zipfile
|
||||
zip_filename = zip_basename+os.extsep+'zip'
|
||||
zip_name = os.path.join(zip_dir, zip_filename)
|
||||
with zipfile.ZipFile(zip_name, 'w') as zip_file:
|
||||
if name_in_zip is None:
|
||||
parts = script_name.split(os.sep)
|
||||
if len(parts) >= 2 and parts[-2] == '__pycache__':
|
||||
legacy_pyc = make_legacy_pyc(source_from_cache(script_name))
|
||||
name_in_zip = os.path.basename(legacy_pyc)
|
||||
script_name = legacy_pyc
|
||||
else:
|
||||
name_in_zip = os.path.basename(script_name)
|
||||
zip_file.write(script_name, name_in_zip)
|
||||
#if test.support.verbose:
|
||||
# with zipfile.ZipFile(zip_name, 'r') as zip_file:
|
||||
# print 'Contents of %r:' % zip_name
|
||||
# zip_file.printdir()
|
||||
return zip_name, os.path.join(zip_name, name_in_zip)
|
||||
|
||||
|
||||
def make_pkg(pkg_dir, init_source=''):
|
||||
os.mkdir(pkg_dir)
|
||||
make_script(pkg_dir, '__init__', init_source)
|
||||
|
||||
|
||||
def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
|
||||
source, depth=1, compiled=False):
|
||||
import zipfile
|
||||
unlink = []
|
||||
init_name = make_script(zip_dir, '__init__', '')
|
||||
unlink.append(init_name)
|
||||
init_basename = os.path.basename(init_name)
|
||||
script_name = make_script(zip_dir, script_basename, source)
|
||||
unlink.append(script_name)
|
||||
if compiled:
|
||||
init_name = py_compile.compile(init_name, doraise=True)
|
||||
script_name = py_compile.compile(script_name, doraise=True)
|
||||
unlink.extend((init_name, script_name))
|
||||
pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
|
||||
script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
|
||||
zip_filename = zip_basename+os.extsep+'zip'
|
||||
zip_name = os.path.join(zip_dir, zip_filename)
|
||||
with zipfile.ZipFile(zip_name, 'w') as zip_file:
|
||||
for name in pkg_names:
|
||||
init_name_in_zip = os.path.join(name, init_basename)
|
||||
zip_file.write(init_name, init_name_in_zip)
|
||||
zip_file.write(script_name, script_name_in_zip)
|
||||
for name in unlink:
|
||||
os.unlink(name)
|
||||
#if test.support.verbose:
|
||||
# with zipfile.ZipFile(zip_name, 'r') as zip_file:
|
||||
# print 'Contents of %r:' % zip_name
|
||||
# zip_file.printdir()
|
||||
return zip_name, os.path.join(zip_name, script_name_in_zip)
|
||||
|
||||
|
||||
@support.requires_subprocess()
|
||||
def run_test_script(script):
|
||||
# use -u to try to get the full output if the test hangs or crash
|
||||
if support.verbose:
|
||||
def title(text):
|
||||
return f"===== {text} ======"
|
||||
|
||||
name = f"script {os.path.basename(script)}"
|
||||
print()
|
||||
print(title(name), flush=True)
|
||||
# In verbose mode, the child process inherit stdout and stdout,
|
||||
# to see output in realtime and reduce the risk of losing output.
|
||||
args = [sys.executable, "-E", "-X", "faulthandler", "-u", script, "-v"]
|
||||
proc = subprocess.run(args)
|
||||
print(title(f"{name} completed: exit code {proc.returncode}"),
|
||||
flush=True)
|
||||
if proc.returncode:
|
||||
raise AssertionError(f"{name} failed")
|
||||
else:
|
||||
assert_python_ok("-u", script, "-v")
|
873
Dependencies/Python/Lib/test/support/smtpd.py
vendored
Normal file
873
Dependencies/Python/Lib/test/support/smtpd.py
vendored
Normal file
@ -0,0 +1,873 @@
|
||||
#! /usr/bin/env python3
|
||||
"""An RFC 5321 smtp proxy with optional RFC 1870 and RFC 6531 extensions.
|
||||
|
||||
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
|
||||
|
||||
Options:
|
||||
|
||||
--nosetuid
|
||||
-n
|
||||
This program generally tries to setuid `nobody', unless this flag is
|
||||
set. The setuid call will fail if this program is not run as root (in
|
||||
which case, use this flag).
|
||||
|
||||
--version
|
||||
-V
|
||||
Print the version number and exit.
|
||||
|
||||
--class classname
|
||||
-c classname
|
||||
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
|
||||
default.
|
||||
|
||||
--size limit
|
||||
-s limit
|
||||
Restrict the total size of the incoming message to "limit" number of
|
||||
bytes via the RFC 1870 SIZE extension. Defaults to 33554432 bytes.
|
||||
|
||||
--smtputf8
|
||||
-u
|
||||
Enable the SMTPUTF8 extension and behave as an RFC 6531 smtp proxy.
|
||||
|
||||
--debug
|
||||
-d
|
||||
Turn on debugging prints.
|
||||
|
||||
--help
|
||||
-h
|
||||
Print this message and exit.
|
||||
|
||||
Version: %(__version__)s
|
||||
|
||||
If localhost is not given then `localhost' is used, and if localport is not
|
||||
given then 8025 is used. If remotehost is not given then `localhost' is used,
|
||||
and if remoteport is not given, then 25 is used.
|
||||
"""
|
||||
|
||||
# Overview:
|
||||
#
|
||||
# This file implements the minimal SMTP protocol as defined in RFC 5321. It
|
||||
# has a hierarchy of classes which implement the backend functionality for the
|
||||
# smtpd. A number of classes are provided:
|
||||
#
|
||||
# SMTPServer - the base class for the backend. Raises NotImplementedError
|
||||
# if you try to use it.
|
||||
#
|
||||
# DebuggingServer - simply prints each message it receives on stdout.
|
||||
#
|
||||
# PureProxy - Proxies all messages to a real smtpd which does final
|
||||
# delivery. One known problem with this class is that it doesn't handle
|
||||
# SMTP errors from the backend server at all. This should be fixed
|
||||
# (contributions are welcome!).
|
||||
#
|
||||
#
|
||||
# Author: Barry Warsaw <barry@python.org>
|
||||
#
|
||||
# TODO:
|
||||
#
|
||||
# - support mailbox delivery
|
||||
# - alias files
|
||||
# - Handle more ESMTP extensions
|
||||
# - handle error codes from the backend smtpd
|
||||
|
||||
import sys
|
||||
import os
|
||||
import errno
|
||||
import getopt
|
||||
import time
|
||||
import socket
|
||||
import collections
|
||||
from test.support import asyncore, asynchat
|
||||
from warnings import warn
|
||||
from email._header_value_parser import get_addr_spec, get_angle_addr
|
||||
|
||||
__all__ = [
|
||||
"SMTPChannel", "SMTPServer", "DebuggingServer", "PureProxy",
|
||||
]
|
||||
|
||||
program = sys.argv[0]
|
||||
__version__ = 'Python SMTP proxy version 0.3'
|
||||
|
||||
|
||||
class Devnull:
|
||||
def write(self, msg): pass
|
||||
def flush(self): pass
|
||||
|
||||
|
||||
DEBUGSTREAM = Devnull()
|
||||
NEWLINE = '\n'
|
||||
COMMASPACE = ', '
|
||||
DATA_SIZE_DEFAULT = 33554432
|
||||
|
||||
|
||||
def usage(code, msg=''):
|
||||
print(__doc__ % globals(), file=sys.stderr)
|
||||
if msg:
|
||||
print(msg, file=sys.stderr)
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
class SMTPChannel(asynchat.async_chat):
|
||||
COMMAND = 0
|
||||
DATA = 1
|
||||
|
||||
command_size_limit = 512
|
||||
command_size_limits = collections.defaultdict(lambda x=command_size_limit: x)
|
||||
|
||||
@property
|
||||
def max_command_size_limit(self):
|
||||
try:
|
||||
return max(self.command_size_limits.values())
|
||||
except ValueError:
|
||||
return self.command_size_limit
|
||||
|
||||
def __init__(self, server, conn, addr, data_size_limit=DATA_SIZE_DEFAULT,
|
||||
map=None, enable_SMTPUTF8=False, decode_data=False):
|
||||
asynchat.async_chat.__init__(self, conn, map=map)
|
||||
self.smtp_server = server
|
||||
self.conn = conn
|
||||
self.addr = addr
|
||||
self.data_size_limit = data_size_limit
|
||||
self.enable_SMTPUTF8 = enable_SMTPUTF8
|
||||
self._decode_data = decode_data
|
||||
if enable_SMTPUTF8 and decode_data:
|
||||
raise ValueError("decode_data and enable_SMTPUTF8 cannot"
|
||||
" be set to True at the same time")
|
||||
if decode_data:
|
||||
self._emptystring = ''
|
||||
self._linesep = '\r\n'
|
||||
self._dotsep = '.'
|
||||
self._newline = NEWLINE
|
||||
else:
|
||||
self._emptystring = b''
|
||||
self._linesep = b'\r\n'
|
||||
self._dotsep = ord(b'.')
|
||||
self._newline = b'\n'
|
||||
self._set_rset_state()
|
||||
self.seen_greeting = ''
|
||||
self.extended_smtp = False
|
||||
self.command_size_limits.clear()
|
||||
self.fqdn = socket.getfqdn()
|
||||
try:
|
||||
self.peer = conn.getpeername()
|
||||
except OSError as err:
|
||||
# a race condition may occur if the other end is closing
|
||||
# before we can get the peername
|
||||
self.close()
|
||||
if err.errno != errno.ENOTCONN:
|
||||
raise
|
||||
return
|
||||
print('Peer:', repr(self.peer), file=DEBUGSTREAM)
|
||||
self.push('220 %s %s' % (self.fqdn, __version__))
|
||||
|
||||
def _set_post_data_state(self):
|
||||
"""Reset state variables to their post-DATA state."""
|
||||
self.smtp_state = self.COMMAND
|
||||
self.mailfrom = None
|
||||
self.rcpttos = []
|
||||
self.require_SMTPUTF8 = False
|
||||
self.num_bytes = 0
|
||||
self.set_terminator(b'\r\n')
|
||||
|
||||
def _set_rset_state(self):
|
||||
"""Reset all state variables except the greeting."""
|
||||
self._set_post_data_state()
|
||||
self.received_data = ''
|
||||
self.received_lines = []
|
||||
|
||||
|
||||
# properties for backwards-compatibility
|
||||
@property
|
||||
def __server(self):
|
||||
warn("Access to __server attribute on SMTPChannel is deprecated, "
|
||||
"use 'smtp_server' instead", DeprecationWarning, 2)
|
||||
return self.smtp_server
|
||||
@__server.setter
|
||||
def __server(self, value):
|
||||
warn("Setting __server attribute on SMTPChannel is deprecated, "
|
||||
"set 'smtp_server' instead", DeprecationWarning, 2)
|
||||
self.smtp_server = value
|
||||
|
||||
@property
|
||||
def __line(self):
|
||||
warn("Access to __line attribute on SMTPChannel is deprecated, "
|
||||
"use 'received_lines' instead", DeprecationWarning, 2)
|
||||
return self.received_lines
|
||||
@__line.setter
|
||||
def __line(self, value):
|
||||
warn("Setting __line attribute on SMTPChannel is deprecated, "
|
||||
"set 'received_lines' instead", DeprecationWarning, 2)
|
||||
self.received_lines = value
|
||||
|
||||
@property
|
||||
def __state(self):
|
||||
warn("Access to __state attribute on SMTPChannel is deprecated, "
|
||||
"use 'smtp_state' instead", DeprecationWarning, 2)
|
||||
return self.smtp_state
|
||||
@__state.setter
|
||||
def __state(self, value):
|
||||
warn("Setting __state attribute on SMTPChannel is deprecated, "
|
||||
"set 'smtp_state' instead", DeprecationWarning, 2)
|
||||
self.smtp_state = value
|
||||
|
||||
@property
|
||||
def __greeting(self):
|
||||
warn("Access to __greeting attribute on SMTPChannel is deprecated, "
|
||||
"use 'seen_greeting' instead", DeprecationWarning, 2)
|
||||
return self.seen_greeting
|
||||
@__greeting.setter
|
||||
def __greeting(self, value):
|
||||
warn("Setting __greeting attribute on SMTPChannel is deprecated, "
|
||||
"set 'seen_greeting' instead", DeprecationWarning, 2)
|
||||
self.seen_greeting = value
|
||||
|
||||
@property
|
||||
def __mailfrom(self):
|
||||
warn("Access to __mailfrom attribute on SMTPChannel is deprecated, "
|
||||
"use 'mailfrom' instead", DeprecationWarning, 2)
|
||||
return self.mailfrom
|
||||
@__mailfrom.setter
|
||||
def __mailfrom(self, value):
|
||||
warn("Setting __mailfrom attribute on SMTPChannel is deprecated, "
|
||||
"set 'mailfrom' instead", DeprecationWarning, 2)
|
||||
self.mailfrom = value
|
||||
|
||||
@property
|
||||
def __rcpttos(self):
|
||||
warn("Access to __rcpttos attribute on SMTPChannel is deprecated, "
|
||||
"use 'rcpttos' instead", DeprecationWarning, 2)
|
||||
return self.rcpttos
|
||||
@__rcpttos.setter
|
||||
def __rcpttos(self, value):
|
||||
warn("Setting __rcpttos attribute on SMTPChannel is deprecated, "
|
||||
"set 'rcpttos' instead", DeprecationWarning, 2)
|
||||
self.rcpttos = value
|
||||
|
||||
@property
|
||||
def __data(self):
|
||||
warn("Access to __data attribute on SMTPChannel is deprecated, "
|
||||
"use 'received_data' instead", DeprecationWarning, 2)
|
||||
return self.received_data
|
||||
@__data.setter
|
||||
def __data(self, value):
|
||||
warn("Setting __data attribute on SMTPChannel is deprecated, "
|
||||
"set 'received_data' instead", DeprecationWarning, 2)
|
||||
self.received_data = value
|
||||
|
||||
@property
|
||||
def __fqdn(self):
|
||||
warn("Access to __fqdn attribute on SMTPChannel is deprecated, "
|
||||
"use 'fqdn' instead", DeprecationWarning, 2)
|
||||
return self.fqdn
|
||||
@__fqdn.setter
|
||||
def __fqdn(self, value):
|
||||
warn("Setting __fqdn attribute on SMTPChannel is deprecated, "
|
||||
"set 'fqdn' instead", DeprecationWarning, 2)
|
||||
self.fqdn = value
|
||||
|
||||
@property
|
||||
def __peer(self):
|
||||
warn("Access to __peer attribute on SMTPChannel is deprecated, "
|
||||
"use 'peer' instead", DeprecationWarning, 2)
|
||||
return self.peer
|
||||
@__peer.setter
|
||||
def __peer(self, value):
|
||||
warn("Setting __peer attribute on SMTPChannel is deprecated, "
|
||||
"set 'peer' instead", DeprecationWarning, 2)
|
||||
self.peer = value
|
||||
|
||||
@property
|
||||
def __conn(self):
|
||||
warn("Access to __conn attribute on SMTPChannel is deprecated, "
|
||||
"use 'conn' instead", DeprecationWarning, 2)
|
||||
return self.conn
|
||||
@__conn.setter
|
||||
def __conn(self, value):
|
||||
warn("Setting __conn attribute on SMTPChannel is deprecated, "
|
||||
"set 'conn' instead", DeprecationWarning, 2)
|
||||
self.conn = value
|
||||
|
||||
@property
|
||||
def __addr(self):
|
||||
warn("Access to __addr attribute on SMTPChannel is deprecated, "
|
||||
"use 'addr' instead", DeprecationWarning, 2)
|
||||
return self.addr
|
||||
@__addr.setter
|
||||
def __addr(self, value):
|
||||
warn("Setting __addr attribute on SMTPChannel is deprecated, "
|
||||
"set 'addr' instead", DeprecationWarning, 2)
|
||||
self.addr = value
|
||||
|
||||
# Overrides base class for convenience.
|
||||
def push(self, msg):
|
||||
asynchat.async_chat.push(self, bytes(
|
||||
msg + '\r\n', 'utf-8' if self.require_SMTPUTF8 else 'ascii'))
|
||||
|
||||
# Implementation of base class abstract method
|
||||
def collect_incoming_data(self, data):
|
||||
limit = None
|
||||
if self.smtp_state == self.COMMAND:
|
||||
limit = self.max_command_size_limit
|
||||
elif self.smtp_state == self.DATA:
|
||||
limit = self.data_size_limit
|
||||
if limit and self.num_bytes > limit:
|
||||
return
|
||||
elif limit:
|
||||
self.num_bytes += len(data)
|
||||
if self._decode_data:
|
||||
self.received_lines.append(str(data, 'utf-8'))
|
||||
else:
|
||||
self.received_lines.append(data)
|
||||
|
||||
# Implementation of base class abstract method
|
||||
def found_terminator(self):
|
||||
line = self._emptystring.join(self.received_lines)
|
||||
print('Data:', repr(line), file=DEBUGSTREAM)
|
||||
self.received_lines = []
|
||||
if self.smtp_state == self.COMMAND:
|
||||
sz, self.num_bytes = self.num_bytes, 0
|
||||
if not line:
|
||||
self.push('500 Error: bad syntax')
|
||||
return
|
||||
if not self._decode_data:
|
||||
line = str(line, 'utf-8')
|
||||
i = line.find(' ')
|
||||
if i < 0:
|
||||
command = line.upper()
|
||||
arg = None
|
||||
else:
|
||||
command = line[:i].upper()
|
||||
arg = line[i+1:].strip()
|
||||
max_sz = (self.command_size_limits[command]
|
||||
if self.extended_smtp else self.command_size_limit)
|
||||
if sz > max_sz:
|
||||
self.push('500 Error: line too long')
|
||||
return
|
||||
method = getattr(self, 'smtp_' + command, None)
|
||||
if not method:
|
||||
self.push('500 Error: command "%s" not recognized' % command)
|
||||
return
|
||||
method(arg)
|
||||
return
|
||||
else:
|
||||
if self.smtp_state != self.DATA:
|
||||
self.push('451 Internal confusion')
|
||||
self.num_bytes = 0
|
||||
return
|
||||
if self.data_size_limit and self.num_bytes > self.data_size_limit:
|
||||
self.push('552 Error: Too much mail data')
|
||||
self.num_bytes = 0
|
||||
return
|
||||
# Remove extraneous carriage returns and de-transparency according
|
||||
# to RFC 5321, Section 4.5.2.
|
||||
data = []
|
||||
for text in line.split(self._linesep):
|
||||
if text and text[0] == self._dotsep:
|
||||
data.append(text[1:])
|
||||
else:
|
||||
data.append(text)
|
||||
self.received_data = self._newline.join(data)
|
||||
args = (self.peer, self.mailfrom, self.rcpttos, self.received_data)
|
||||
kwargs = {}
|
||||
if not self._decode_data:
|
||||
kwargs = {
|
||||
'mail_options': self.mail_options,
|
||||
'rcpt_options': self.rcpt_options,
|
||||
}
|
||||
status = self.smtp_server.process_message(*args, **kwargs)
|
||||
self._set_post_data_state()
|
||||
if not status:
|
||||
self.push('250 OK')
|
||||
else:
|
||||
self.push(status)
|
||||
|
||||
# SMTP and ESMTP commands
|
||||
def smtp_HELO(self, arg):
|
||||
if not arg:
|
||||
self.push('501 Syntax: HELO hostname')
|
||||
return
|
||||
# See issue #21783 for a discussion of this behavior.
|
||||
if self.seen_greeting:
|
||||
self.push('503 Duplicate HELO/EHLO')
|
||||
return
|
||||
self._set_rset_state()
|
||||
self.seen_greeting = arg
|
||||
self.push('250 %s' % self.fqdn)
|
||||
|
||||
def smtp_EHLO(self, arg):
|
||||
if not arg:
|
||||
self.push('501 Syntax: EHLO hostname')
|
||||
return
|
||||
# See issue #21783 for a discussion of this behavior.
|
||||
if self.seen_greeting:
|
||||
self.push('503 Duplicate HELO/EHLO')
|
||||
return
|
||||
self._set_rset_state()
|
||||
self.seen_greeting = arg
|
||||
self.extended_smtp = True
|
||||
self.push('250-%s' % self.fqdn)
|
||||
if self.data_size_limit:
|
||||
self.push('250-SIZE %s' % self.data_size_limit)
|
||||
self.command_size_limits['MAIL'] += 26
|
||||
if not self._decode_data:
|
||||
self.push('250-8BITMIME')
|
||||
if self.enable_SMTPUTF8:
|
||||
self.push('250-SMTPUTF8')
|
||||
self.command_size_limits['MAIL'] += 10
|
||||
self.push('250 HELP')
|
||||
|
||||
def smtp_NOOP(self, arg):
|
||||
if arg:
|
||||
self.push('501 Syntax: NOOP')
|
||||
else:
|
||||
self.push('250 OK')
|
||||
|
||||
def smtp_QUIT(self, arg):
|
||||
# args is ignored
|
||||
self.push('221 Bye')
|
||||
self.close_when_done()
|
||||
|
||||
def _strip_command_keyword(self, keyword, arg):
|
||||
keylen = len(keyword)
|
||||
if arg[:keylen].upper() == keyword:
|
||||
return arg[keylen:].strip()
|
||||
return ''
|
||||
|
||||
def _getaddr(self, arg):
|
||||
if not arg:
|
||||
return '', ''
|
||||
if arg.lstrip().startswith('<'):
|
||||
address, rest = get_angle_addr(arg)
|
||||
else:
|
||||
address, rest = get_addr_spec(arg)
|
||||
if not address:
|
||||
return address, rest
|
||||
return address.addr_spec, rest
|
||||
|
||||
def _getparams(self, params):
|
||||
# Return params as dictionary. Return None if not all parameters
|
||||
# appear to be syntactically valid according to RFC 1869.
|
||||
result = {}
|
||||
for param in params:
|
||||
param, eq, value = param.partition('=')
|
||||
if not param.isalnum() or eq and not value:
|
||||
return None
|
||||
result[param] = value if eq else True
|
||||
return result
|
||||
|
||||
def smtp_HELP(self, arg):
|
||||
if arg:
|
||||
extended = ' [SP <mail-parameters>]'
|
||||
lc_arg = arg.upper()
|
||||
if lc_arg == 'EHLO':
|
||||
self.push('250 Syntax: EHLO hostname')
|
||||
elif lc_arg == 'HELO':
|
||||
self.push('250 Syntax: HELO hostname')
|
||||
elif lc_arg == 'MAIL':
|
||||
msg = '250 Syntax: MAIL FROM: <address>'
|
||||
if self.extended_smtp:
|
||||
msg += extended
|
||||
self.push(msg)
|
||||
elif lc_arg == 'RCPT':
|
||||
msg = '250 Syntax: RCPT TO: <address>'
|
||||
if self.extended_smtp:
|
||||
msg += extended
|
||||
self.push(msg)
|
||||
elif lc_arg == 'DATA':
|
||||
self.push('250 Syntax: DATA')
|
||||
elif lc_arg == 'RSET':
|
||||
self.push('250 Syntax: RSET')
|
||||
elif lc_arg == 'NOOP':
|
||||
self.push('250 Syntax: NOOP')
|
||||
elif lc_arg == 'QUIT':
|
||||
self.push('250 Syntax: QUIT')
|
||||
elif lc_arg == 'VRFY':
|
||||
self.push('250 Syntax: VRFY <address>')
|
||||
else:
|
||||
self.push('501 Supported commands: EHLO HELO MAIL RCPT '
|
||||
'DATA RSET NOOP QUIT VRFY')
|
||||
else:
|
||||
self.push('250 Supported commands: EHLO HELO MAIL RCPT DATA '
|
||||
'RSET NOOP QUIT VRFY')
|
||||
|
||||
def smtp_VRFY(self, arg):
|
||||
if arg:
|
||||
address, params = self._getaddr(arg)
|
||||
if address:
|
||||
self.push('252 Cannot VRFY user, but will accept message '
|
||||
'and attempt delivery')
|
||||
else:
|
||||
self.push('502 Could not VRFY %s' % arg)
|
||||
else:
|
||||
self.push('501 Syntax: VRFY <address>')
|
||||
|
||||
def smtp_MAIL(self, arg):
|
||||
if not self.seen_greeting:
|
||||
self.push('503 Error: send HELO first')
|
||||
return
|
||||
print('===> MAIL', arg, file=DEBUGSTREAM)
|
||||
syntaxerr = '501 Syntax: MAIL FROM: <address>'
|
||||
if self.extended_smtp:
|
||||
syntaxerr += ' [SP <mail-parameters>]'
|
||||
if arg is None:
|
||||
self.push(syntaxerr)
|
||||
return
|
||||
arg = self._strip_command_keyword('FROM:', arg)
|
||||
address, params = self._getaddr(arg)
|
||||
if not address:
|
||||
self.push(syntaxerr)
|
||||
return
|
||||
if not self.extended_smtp and params:
|
||||
self.push(syntaxerr)
|
||||
return
|
||||
if self.mailfrom:
|
||||
self.push('503 Error: nested MAIL command')
|
||||
return
|
||||
self.mail_options = params.upper().split()
|
||||
params = self._getparams(self.mail_options)
|
||||
if params is None:
|
||||
self.push(syntaxerr)
|
||||
return
|
||||
if not self._decode_data:
|
||||
body = params.pop('BODY', '7BIT')
|
||||
if body not in ['7BIT', '8BITMIME']:
|
||||
self.push('501 Error: BODY can only be one of 7BIT, 8BITMIME')
|
||||
return
|
||||
if self.enable_SMTPUTF8:
|
||||
smtputf8 = params.pop('SMTPUTF8', False)
|
||||
if smtputf8 is True:
|
||||
self.require_SMTPUTF8 = True
|
||||
elif smtputf8 is not False:
|
||||
self.push('501 Error: SMTPUTF8 takes no arguments')
|
||||
return
|
||||
size = params.pop('SIZE', None)
|
||||
if size:
|
||||
if not size.isdigit():
|
||||
self.push(syntaxerr)
|
||||
return
|
||||
elif self.data_size_limit and int(size) > self.data_size_limit:
|
||||
self.push('552 Error: message size exceeds fixed maximum message size')
|
||||
return
|
||||
if len(params.keys()) > 0:
|
||||
self.push('555 MAIL FROM parameters not recognized or not implemented')
|
||||
return
|
||||
self.mailfrom = address
|
||||
print('sender:', self.mailfrom, file=DEBUGSTREAM)
|
||||
self.push('250 OK')
|
||||
|
||||
def smtp_RCPT(self, arg):
|
||||
if not self.seen_greeting:
|
||||
self.push('503 Error: send HELO first');
|
||||
return
|
||||
print('===> RCPT', arg, file=DEBUGSTREAM)
|
||||
if not self.mailfrom:
|
||||
self.push('503 Error: need MAIL command')
|
||||
return
|
||||
syntaxerr = '501 Syntax: RCPT TO: <address>'
|
||||
if self.extended_smtp:
|
||||
syntaxerr += ' [SP <mail-parameters>]'
|
||||
if arg is None:
|
||||
self.push(syntaxerr)
|
||||
return
|
||||
arg = self._strip_command_keyword('TO:', arg)
|
||||
address, params = self._getaddr(arg)
|
||||
if not address:
|
||||
self.push(syntaxerr)
|
||||
return
|
||||
if not self.extended_smtp and params:
|
||||
self.push(syntaxerr)
|
||||
return
|
||||
self.rcpt_options = params.upper().split()
|
||||
params = self._getparams(self.rcpt_options)
|
||||
if params is None:
|
||||
self.push(syntaxerr)
|
||||
return
|
||||
# XXX currently there are no options we recognize.
|
||||
if len(params.keys()) > 0:
|
||||
self.push('555 RCPT TO parameters not recognized or not implemented')
|
||||
return
|
||||
self.rcpttos.append(address)
|
||||
print('recips:', self.rcpttos, file=DEBUGSTREAM)
|
||||
self.push('250 OK')
|
||||
|
||||
def smtp_RSET(self, arg):
|
||||
if arg:
|
||||
self.push('501 Syntax: RSET')
|
||||
return
|
||||
self._set_rset_state()
|
||||
self.push('250 OK')
|
||||
|
||||
def smtp_DATA(self, arg):
|
||||
if not self.seen_greeting:
|
||||
self.push('503 Error: send HELO first');
|
||||
return
|
||||
if not self.rcpttos:
|
||||
self.push('503 Error: need RCPT command')
|
||||
return
|
||||
if arg:
|
||||
self.push('501 Syntax: DATA')
|
||||
return
|
||||
self.smtp_state = self.DATA
|
||||
self.set_terminator(b'\r\n.\r\n')
|
||||
self.push('354 End data with <CR><LF>.<CR><LF>')
|
||||
|
||||
# Commands that have not been implemented
|
||||
def smtp_EXPN(self, arg):
|
||||
self.push('502 EXPN not implemented')
|
||||
|
||||
|
||||
class SMTPServer(asyncore.dispatcher):
|
||||
# SMTPChannel class to use for managing client connections
|
||||
channel_class = SMTPChannel
|
||||
|
||||
def __init__(self, localaddr, remoteaddr,
|
||||
data_size_limit=DATA_SIZE_DEFAULT, map=None,
|
||||
enable_SMTPUTF8=False, decode_data=False):
|
||||
self._localaddr = localaddr
|
||||
self._remoteaddr = remoteaddr
|
||||
self.data_size_limit = data_size_limit
|
||||
self.enable_SMTPUTF8 = enable_SMTPUTF8
|
||||
self._decode_data = decode_data
|
||||
if enable_SMTPUTF8 and decode_data:
|
||||
raise ValueError("decode_data and enable_SMTPUTF8 cannot"
|
||||
" be set to True at the same time")
|
||||
asyncore.dispatcher.__init__(self, map=map)
|
||||
try:
|
||||
gai_results = socket.getaddrinfo(*localaddr,
|
||||
type=socket.SOCK_STREAM)
|
||||
self.create_socket(gai_results[0][0], gai_results[0][1])
|
||||
# try to re-use a server port if possible
|
||||
self.set_reuse_addr()
|
||||
self.bind(localaddr)
|
||||
self.listen(5)
|
||||
except:
|
||||
self.close()
|
||||
raise
|
||||
else:
|
||||
print('%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
|
||||
self.__class__.__name__, time.ctime(time.time()),
|
||||
localaddr, remoteaddr), file=DEBUGSTREAM)
|
||||
|
||||
def handle_accepted(self, conn, addr):
|
||||
print('Incoming connection from %s' % repr(addr), file=DEBUGSTREAM)
|
||||
channel = self.channel_class(self,
|
||||
conn,
|
||||
addr,
|
||||
self.data_size_limit,
|
||||
self._map,
|
||||
self.enable_SMTPUTF8,
|
||||
self._decode_data)
|
||||
|
||||
# API for "doing something useful with the message"
|
||||
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
|
||||
"""Override this abstract method to handle messages from the client.
|
||||
|
||||
peer is a tuple containing (ipaddr, port) of the client that made the
|
||||
socket connection to our smtp port.
|
||||
|
||||
mailfrom is the raw address the client claims the message is coming
|
||||
from.
|
||||
|
||||
rcpttos is a list of raw addresses the client wishes to deliver the
|
||||
message to.
|
||||
|
||||
data is a string containing the entire full text of the message,
|
||||
headers (if supplied) and all. It has been `de-transparencied'
|
||||
according to RFC 821, Section 4.5.2. In other words, a line
|
||||
containing a `.' followed by other text has had the leading dot
|
||||
removed.
|
||||
|
||||
kwargs is a dictionary containing additional information. It is
|
||||
empty if decode_data=True was given as init parameter, otherwise
|
||||
it will contain the following keys:
|
||||
'mail_options': list of parameters to the mail command. All
|
||||
elements are uppercase strings. Example:
|
||||
['BODY=8BITMIME', 'SMTPUTF8'].
|
||||
'rcpt_options': same, for the rcpt command.
|
||||
|
||||
This function should return None for a normal `250 Ok' response;
|
||||
otherwise, it should return the desired response string in RFC 821
|
||||
format.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class DebuggingServer(SMTPServer):
|
||||
|
||||
def _print_message_content(self, peer, data):
|
||||
inheaders = 1
|
||||
lines = data.splitlines()
|
||||
for line in lines:
|
||||
# headers first
|
||||
if inheaders and not line:
|
||||
peerheader = 'X-Peer: ' + peer[0]
|
||||
if not isinstance(data, str):
|
||||
# decoded_data=false; make header match other binary output
|
||||
peerheader = repr(peerheader.encode('utf-8'))
|
||||
print(peerheader)
|
||||
inheaders = 0
|
||||
if not isinstance(data, str):
|
||||
# Avoid spurious 'str on bytes instance' warning.
|
||||
line = repr(line)
|
||||
print(line)
|
||||
|
||||
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
|
||||
print('---------- MESSAGE FOLLOWS ----------')
|
||||
if kwargs:
|
||||
if kwargs.get('mail_options'):
|
||||
print('mail options: %s' % kwargs['mail_options'])
|
||||
if kwargs.get('rcpt_options'):
|
||||
print('rcpt options: %s\n' % kwargs['rcpt_options'])
|
||||
self._print_message_content(peer, data)
|
||||
print('------------ END MESSAGE ------------')
|
||||
|
||||
|
||||
class PureProxy(SMTPServer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
if 'enable_SMTPUTF8' in kwargs and kwargs['enable_SMTPUTF8']:
|
||||
raise ValueError("PureProxy does not support SMTPUTF8.")
|
||||
super(PureProxy, self).__init__(*args, **kwargs)
|
||||
|
||||
def process_message(self, peer, mailfrom, rcpttos, data):
|
||||
lines = data.split('\n')
|
||||
# Look for the last header
|
||||
i = 0
|
||||
for line in lines:
|
||||
if not line:
|
||||
break
|
||||
i += 1
|
||||
lines.insert(i, 'X-Peer: %s' % peer[0])
|
||||
data = NEWLINE.join(lines)
|
||||
refused = self._deliver(mailfrom, rcpttos, data)
|
||||
# TBD: what to do with refused addresses?
|
||||
print('we got some refusals:', refused, file=DEBUGSTREAM)
|
||||
|
||||
def _deliver(self, mailfrom, rcpttos, data):
|
||||
import smtplib
|
||||
refused = {}
|
||||
try:
|
||||
s = smtplib.SMTP()
|
||||
s.connect(self._remoteaddr[0], self._remoteaddr[1])
|
||||
try:
|
||||
refused = s.sendmail(mailfrom, rcpttos, data)
|
||||
finally:
|
||||
s.quit()
|
||||
except smtplib.SMTPRecipientsRefused as e:
|
||||
print('got SMTPRecipientsRefused', file=DEBUGSTREAM)
|
||||
refused = e.recipients
|
||||
except (OSError, smtplib.SMTPException) as e:
|
||||
print('got', e.__class__, file=DEBUGSTREAM)
|
||||
# All recipients were refused. If the exception had an associated
|
||||
# error code, use it. Otherwise,fake it with a non-triggering
|
||||
# exception code.
|
||||
errcode = getattr(e, 'smtp_code', -1)
|
||||
errmsg = getattr(e, 'smtp_error', 'ignore')
|
||||
for r in rcpttos:
|
||||
refused[r] = (errcode, errmsg)
|
||||
return refused
|
||||
|
||||
|
||||
class Options:
|
||||
setuid = True
|
||||
classname = 'PureProxy'
|
||||
size_limit = None
|
||||
enable_SMTPUTF8 = False
|
||||
|
||||
|
||||
def parseargs():
|
||||
global DEBUGSTREAM
|
||||
try:
|
||||
opts, args = getopt.getopt(
|
||||
sys.argv[1:], 'nVhc:s:du',
|
||||
['class=', 'nosetuid', 'version', 'help', 'size=', 'debug',
|
||||
'smtputf8'])
|
||||
except getopt.error as e:
|
||||
usage(1, e)
|
||||
|
||||
options = Options()
|
||||
for opt, arg in opts:
|
||||
if opt in ('-h', '--help'):
|
||||
usage(0)
|
||||
elif opt in ('-V', '--version'):
|
||||
print(__version__)
|
||||
sys.exit(0)
|
||||
elif opt in ('-n', '--nosetuid'):
|
||||
options.setuid = False
|
||||
elif opt in ('-c', '--class'):
|
||||
options.classname = arg
|
||||
elif opt in ('-d', '--debug'):
|
||||
DEBUGSTREAM = sys.stderr
|
||||
elif opt in ('-u', '--smtputf8'):
|
||||
options.enable_SMTPUTF8 = True
|
||||
elif opt in ('-s', '--size'):
|
||||
try:
|
||||
int_size = int(arg)
|
||||
options.size_limit = int_size
|
||||
except:
|
||||
print('Invalid size: ' + arg, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# parse the rest of the arguments
|
||||
if len(args) < 1:
|
||||
localspec = 'localhost:8025'
|
||||
remotespec = 'localhost:25'
|
||||
elif len(args) < 2:
|
||||
localspec = args[0]
|
||||
remotespec = 'localhost:25'
|
||||
elif len(args) < 3:
|
||||
localspec = args[0]
|
||||
remotespec = args[1]
|
||||
else:
|
||||
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
|
||||
|
||||
# split into host/port pairs
|
||||
i = localspec.find(':')
|
||||
if i < 0:
|
||||
usage(1, 'Bad local spec: %s' % localspec)
|
||||
options.localhost = localspec[:i]
|
||||
try:
|
||||
options.localport = int(localspec[i+1:])
|
||||
except ValueError:
|
||||
usage(1, 'Bad local port: %s' % localspec)
|
||||
i = remotespec.find(':')
|
||||
if i < 0:
|
||||
usage(1, 'Bad remote spec: %s' % remotespec)
|
||||
options.remotehost = remotespec[:i]
|
||||
try:
|
||||
options.remoteport = int(remotespec[i+1:])
|
||||
except ValueError:
|
||||
usage(1, 'Bad remote port: %s' % remotespec)
|
||||
return options
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
options = parseargs()
|
||||
# Become nobody
|
||||
classname = options.classname
|
||||
if "." in classname:
|
||||
lastdot = classname.rfind(".")
|
||||
mod = __import__(classname[:lastdot], globals(), locals(), [""])
|
||||
classname = classname[lastdot+1:]
|
||||
else:
|
||||
import __main__ as mod
|
||||
class_ = getattr(mod, classname)
|
||||
proxy = class_((options.localhost, options.localport),
|
||||
(options.remotehost, options.remoteport),
|
||||
options.size_limit, enable_SMTPUTF8=options.enable_SMTPUTF8)
|
||||
if options.setuid:
|
||||
try:
|
||||
import pwd
|
||||
except ImportError:
|
||||
print('Cannot import module "pwd"; try running with -n option.', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
nobody = pwd.getpwnam('nobody')[2]
|
||||
try:
|
||||
os.setuid(nobody)
|
||||
except PermissionError:
|
||||
print('Cannot setuid "nobody"; try running with -n option.', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
try:
|
||||
asyncore.loop()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
339
Dependencies/Python/Lib/test/support/socket_helper.py
vendored
Normal file
339
Dependencies/Python/Lib/test/support/socket_helper.py
vendored
Normal file
@ -0,0 +1,339 @@
|
||||
import contextlib
|
||||
import errno
|
||||
import os.path
|
||||
import socket
|
||||
import sys
|
||||
import subprocess
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from .. import support
|
||||
|
||||
HOST = "localhost"
|
||||
HOSTv4 = "127.0.0.1"
|
||||
HOSTv6 = "::1"
|
||||
|
||||
# WASI SDK 15.0 does not provide gethostname, stub raises OSError ENOTSUP.
|
||||
has_gethostname = not support.is_wasi
|
||||
|
||||
|
||||
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
|
||||
"""Returns an unused port that should be suitable for binding. This is
|
||||
achieved by creating a temporary socket with the same family and type as
|
||||
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
|
||||
the specified host address (defaults to 0.0.0.0) with the port set to 0,
|
||||
eliciting an unused ephemeral port from the OS. The temporary socket is
|
||||
then closed and deleted, and the ephemeral port is returned.
|
||||
|
||||
Either this method or bind_port() should be used for any tests where a
|
||||
server socket needs to be bound to a particular port for the duration of
|
||||
the test. Which one to use depends on whether the calling code is creating
|
||||
a python socket, or if an unused port needs to be provided in a constructor
|
||||
or passed to an external program (i.e. the -accept argument to openssl's
|
||||
s_server mode). Always prefer bind_port() over find_unused_port() where
|
||||
possible. Hard coded ports should *NEVER* be used. As soon as a server
|
||||
socket is bound to a hard coded port, the ability to run multiple instances
|
||||
of the test simultaneously on the same host is compromised, which makes the
|
||||
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
|
||||
may simply manifest as a failed test, which can be recovered from without
|
||||
intervention in most cases, but on Windows, the entire python process can
|
||||
completely and utterly wedge, requiring someone to log in to the buildbot
|
||||
and manually kill the affected process.
|
||||
|
||||
(This is easy to reproduce on Windows, unfortunately, and can be traced to
|
||||
the SO_REUSEADDR socket option having different semantics on Windows versus
|
||||
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
|
||||
listen and then accept connections on identical host/ports. An EADDRINUSE
|
||||
OSError will be raised at some point (depending on the platform and
|
||||
the order bind and listen were called on each socket).
|
||||
|
||||
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
|
||||
will ever be raised when attempting to bind two identical host/ports. When
|
||||
accept() is called on each socket, the second caller's process will steal
|
||||
the port from the first caller, leaving them both in an awkwardly wedged
|
||||
state where they'll no longer respond to any signals or graceful kills, and
|
||||
must be forcibly killed via OpenProcess()/TerminateProcess().
|
||||
|
||||
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
|
||||
instead of SO_REUSEADDR, which effectively affords the same semantics as
|
||||
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
|
||||
Source world compared to Windows ones, this is a common mistake. A quick
|
||||
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
|
||||
openssl.exe is called with the 's_server' option, for example. See
|
||||
http://bugs.python.org/issue2550 for more info. The following site also
|
||||
has a very thorough description about the implications of both REUSEADDR
|
||||
and EXCLUSIVEADDRUSE on Windows:
|
||||
https://learn.microsoft.com/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
|
||||
|
||||
XXX: although this approach is a vast improvement on previous attempts to
|
||||
elicit unused ports, it rests heavily on the assumption that the ephemeral
|
||||
port returned to us by the OS won't immediately be dished back out to some
|
||||
other process when we close and delete our temporary socket but before our
|
||||
calling code has a chance to bind the returned port. We can deal with this
|
||||
issue if/when we come across it.
|
||||
"""
|
||||
|
||||
with socket.socket(family, socktype) as tempsock:
|
||||
port = bind_port(tempsock)
|
||||
del tempsock
|
||||
return port
|
||||
|
||||
def bind_port(sock, host=HOST):
|
||||
"""Bind the socket to a free port and return the port number. Relies on
|
||||
ephemeral ports in order to ensure we are using an unbound port. This is
|
||||
important as many tests may be running simultaneously, especially in a
|
||||
buildbot environment. This method raises an exception if the sock.family
|
||||
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
|
||||
or SO_REUSEPORT set on it. Tests should *never* set these socket options
|
||||
for TCP/IP sockets. The only case for setting these options is testing
|
||||
multicasting via multiple UDP sockets.
|
||||
|
||||
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
|
||||
on Windows), it will be set on the socket. This will prevent anyone else
|
||||
from bind()'ing to our host/port for the duration of the test.
|
||||
"""
|
||||
|
||||
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
|
||||
if hasattr(socket, 'SO_REUSEADDR'):
|
||||
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
|
||||
raise support.TestFailed("tests should never set the "
|
||||
"SO_REUSEADDR socket option on "
|
||||
"TCP/IP sockets!")
|
||||
if hasattr(socket, 'SO_REUSEPORT'):
|
||||
try:
|
||||
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
|
||||
raise support.TestFailed("tests should never set the "
|
||||
"SO_REUSEPORT socket option on "
|
||||
"TCP/IP sockets!")
|
||||
except OSError:
|
||||
# Python's socket module was compiled using modern headers
|
||||
# thus defining SO_REUSEPORT but this process is running
|
||||
# under an older kernel that does not support SO_REUSEPORT.
|
||||
pass
|
||||
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
|
||||
|
||||
sock.bind((host, 0))
|
||||
port = sock.getsockname()[1]
|
||||
return port
|
||||
|
||||
def bind_unix_socket(sock, addr):
|
||||
"""Bind a unix socket, raising SkipTest if PermissionError is raised."""
|
||||
assert sock.family == socket.AF_UNIX
|
||||
try:
|
||||
sock.bind(addr)
|
||||
except PermissionError:
|
||||
sock.close()
|
||||
raise unittest.SkipTest('cannot bind AF_UNIX sockets')
|
||||
|
||||
def _is_ipv6_enabled():
|
||||
"""Check whether IPv6 is enabled on this host."""
|
||||
if socket.has_ipv6:
|
||||
sock = None
|
||||
try:
|
||||
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
|
||||
sock.bind((HOSTv6, 0))
|
||||
return True
|
||||
except OSError:
|
||||
pass
|
||||
finally:
|
||||
if sock:
|
||||
sock.close()
|
||||
return False
|
||||
|
||||
IPV6_ENABLED = _is_ipv6_enabled()
|
||||
|
||||
|
||||
_bind_nix_socket_error = None
|
||||
def skip_unless_bind_unix_socket(test):
|
||||
"""Decorator for tests requiring a functional bind() for unix sockets."""
|
||||
if not hasattr(socket, 'AF_UNIX'):
|
||||
return unittest.skip('No UNIX Sockets')(test)
|
||||
global _bind_nix_socket_error
|
||||
if _bind_nix_socket_error is None:
|
||||
from .os_helper import TESTFN, unlink
|
||||
path = TESTFN + "can_bind_unix_socket"
|
||||
with socket.socket(socket.AF_UNIX) as sock:
|
||||
try:
|
||||
sock.bind(path)
|
||||
_bind_nix_socket_error = False
|
||||
except OSError as e:
|
||||
_bind_nix_socket_error = e
|
||||
finally:
|
||||
unlink(path)
|
||||
if _bind_nix_socket_error:
|
||||
msg = 'Requires a functional unix bind(): %s' % _bind_nix_socket_error
|
||||
return unittest.skip(msg)(test)
|
||||
else:
|
||||
return test
|
||||
|
||||
|
||||
def get_socket_conn_refused_errs():
|
||||
"""
|
||||
Get the different socket error numbers ('errno') which can be received
|
||||
when a connection is refused.
|
||||
"""
|
||||
errors = [errno.ECONNREFUSED]
|
||||
if hasattr(errno, 'ENETUNREACH'):
|
||||
# On Solaris, ENETUNREACH is returned sometimes instead of ECONNREFUSED
|
||||
errors.append(errno.ENETUNREACH)
|
||||
if hasattr(errno, 'EADDRNOTAVAIL'):
|
||||
# bpo-31910: socket.create_connection() fails randomly
|
||||
# with EADDRNOTAVAIL on Travis CI
|
||||
errors.append(errno.EADDRNOTAVAIL)
|
||||
if hasattr(errno, 'EHOSTUNREACH'):
|
||||
# bpo-37583: The destination host cannot be reached
|
||||
errors.append(errno.EHOSTUNREACH)
|
||||
if not IPV6_ENABLED:
|
||||
errors.append(errno.EAFNOSUPPORT)
|
||||
return errors
|
||||
|
||||
|
||||
_NOT_SET = object()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def transient_internet(resource_name, *, timeout=_NOT_SET, errnos=()):
|
||||
"""Return a context manager that raises ResourceDenied when various issues
|
||||
with the internet connection manifest themselves as exceptions."""
|
||||
import urllib.error
|
||||
if timeout is _NOT_SET:
|
||||
timeout = support.INTERNET_TIMEOUT
|
||||
|
||||
default_errnos = [
|
||||
('ECONNREFUSED', 111),
|
||||
('ECONNRESET', 104),
|
||||
('EHOSTUNREACH', 113),
|
||||
('ENETUNREACH', 101),
|
||||
('ETIMEDOUT', 110),
|
||||
# socket.create_connection() fails randomly with
|
||||
# EADDRNOTAVAIL on Travis CI.
|
||||
('EADDRNOTAVAIL', 99),
|
||||
]
|
||||
default_gai_errnos = [
|
||||
('EAI_AGAIN', -3),
|
||||
('EAI_FAIL', -4),
|
||||
('EAI_NONAME', -2),
|
||||
('EAI_NODATA', -5),
|
||||
# Encountered when trying to resolve IPv6-only hostnames
|
||||
('WSANO_DATA', 11004),
|
||||
]
|
||||
|
||||
denied = support.ResourceDenied("Resource %r is not available" % resource_name)
|
||||
captured_errnos = errnos
|
||||
gai_errnos = []
|
||||
if not captured_errnos:
|
||||
captured_errnos = [getattr(errno, name, num)
|
||||
for (name, num) in default_errnos]
|
||||
gai_errnos = [getattr(socket, name, num)
|
||||
for (name, num) in default_gai_errnos]
|
||||
|
||||
def filter_error(err):
|
||||
n = getattr(err, 'errno', None)
|
||||
if (isinstance(err, TimeoutError) or
|
||||
(isinstance(err, socket.gaierror) and n in gai_errnos) or
|
||||
(isinstance(err, urllib.error.HTTPError) and
|
||||
500 <= err.code <= 599) or
|
||||
(isinstance(err, urllib.error.URLError) and
|
||||
(("ConnectionRefusedError" in err.reason) or
|
||||
("TimeoutError" in err.reason) or
|
||||
("EOFError" in err.reason))) or
|
||||
n in captured_errnos):
|
||||
if not support.verbose:
|
||||
sys.stderr.write(denied.args[0] + "\n")
|
||||
raise denied from err
|
||||
|
||||
old_timeout = socket.getdefaulttimeout()
|
||||
try:
|
||||
if timeout is not None:
|
||||
socket.setdefaulttimeout(timeout)
|
||||
yield
|
||||
except OSError as err:
|
||||
# urllib can wrap original socket errors multiple times (!), we must
|
||||
# unwrap to get at the original error.
|
||||
while True:
|
||||
a = err.args
|
||||
if len(a) >= 1 and isinstance(a[0], OSError):
|
||||
err = a[0]
|
||||
# The error can also be wrapped as args[1]:
|
||||
# except socket.error as msg:
|
||||
# raise OSError('socket error', msg) from msg
|
||||
elif len(a) >= 2 and isinstance(a[1], OSError):
|
||||
err = a[1]
|
||||
else:
|
||||
break
|
||||
filter_error(err)
|
||||
raise
|
||||
# XXX should we catch generic exceptions and look for their
|
||||
# __cause__ or __context__?
|
||||
finally:
|
||||
socket.setdefaulttimeout(old_timeout)
|
||||
|
||||
|
||||
def create_unix_domain_name():
|
||||
"""
|
||||
Create a UNIX domain name: socket.bind() argument of a AF_UNIX socket.
|
||||
|
||||
Return a path relative to the current directory to get a short path
|
||||
(around 27 ASCII characters).
|
||||
"""
|
||||
return tempfile.mktemp(prefix="test_python_", suffix='.sock',
|
||||
dir=os.path.curdir)
|
||||
|
||||
|
||||
# consider that sysctl values should not change while tests are running
|
||||
_sysctl_cache = {}
|
||||
|
||||
def _get_sysctl(name):
|
||||
"""Get a sysctl value as an integer."""
|
||||
try:
|
||||
return _sysctl_cache[name]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# At least Linux and FreeBSD support the "-n" option
|
||||
cmd = ['sysctl', '-n', name]
|
||||
proc = subprocess.run(cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True)
|
||||
if proc.returncode:
|
||||
support.print_warning(f'{' '.join(cmd)!r} command failed with '
|
||||
f'exit code {proc.returncode}')
|
||||
# cache the error to only log the warning once
|
||||
_sysctl_cache[name] = None
|
||||
return None
|
||||
output = proc.stdout
|
||||
|
||||
# Parse '0\n' to get '0'
|
||||
try:
|
||||
value = int(output.strip())
|
||||
except Exception as exc:
|
||||
support.print_warning(f'Failed to parse {' '.join(cmd)!r} '
|
||||
f'command output {output!r}: {exc!r}')
|
||||
# cache the error to only log the warning once
|
||||
_sysctl_cache[name] = None
|
||||
return None
|
||||
|
||||
_sysctl_cache[name] = value
|
||||
return value
|
||||
|
||||
|
||||
def tcp_blackhole():
|
||||
if not sys.platform.startswith('freebsd'):
|
||||
return False
|
||||
|
||||
# gh-109015: test if FreeBSD TCP blackhole is enabled
|
||||
value = _get_sysctl('net.inet.tcp.blackhole')
|
||||
if value is None:
|
||||
# don't skip if we fail to get the sysctl value
|
||||
return False
|
||||
return (value != 0)
|
||||
|
||||
|
||||
def skip_if_tcp_blackhole(test):
|
||||
"""Decorator skipping test if TCP blackhole is enabled."""
|
||||
skip_if = unittest.skipIf(
|
||||
tcp_blackhole(),
|
||||
"TCP blackhole is enabled (sysctl net.inet.tcp.blackhole)"
|
||||
)
|
||||
return skip_if(test)
|
122
Dependencies/Python/Lib/test/support/testcase.py
vendored
Normal file
122
Dependencies/Python/Lib/test/support/testcase.py
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
from math import copysign, isnan
|
||||
|
||||
|
||||
class ExtraAssertions:
|
||||
|
||||
def assertIsSubclass(self, cls, superclass, msg=None):
|
||||
if issubclass(cls, superclass):
|
||||
return
|
||||
standardMsg = f'{cls!r} is not a subclass of {superclass!r}'
|
||||
self.fail(self._formatMessage(msg, standardMsg))
|
||||
|
||||
def assertNotIsSubclass(self, cls, superclass, msg=None):
|
||||
if not issubclass(cls, superclass):
|
||||
return
|
||||
standardMsg = f'{cls!r} is a subclass of {superclass!r}'
|
||||
self.fail(self._formatMessage(msg, standardMsg))
|
||||
|
||||
def assertHasAttr(self, obj, name, msg=None):
|
||||
if not hasattr(obj, name):
|
||||
if isinstance(obj, types.ModuleType):
|
||||
standardMsg = f'module {obj.__name__!r} has no attribute {name!r}'
|
||||
elif isinstance(obj, type):
|
||||
standardMsg = f'type object {obj.__name__!r} has no attribute {name!r}'
|
||||
else:
|
||||
standardMsg = f'{type(obj).__name__!r} object has no attribute {name!r}'
|
||||
self.fail(self._formatMessage(msg, standardMsg))
|
||||
|
||||
def assertNotHasAttr(self, obj, name, msg=None):
|
||||
if hasattr(obj, name):
|
||||
if isinstance(obj, types.ModuleType):
|
||||
standardMsg = f'module {obj.__name__!r} has unexpected attribute {name!r}'
|
||||
elif isinstance(obj, type):
|
||||
standardMsg = f'type object {obj.__name__!r} has unexpected attribute {name!r}'
|
||||
else:
|
||||
standardMsg = f'{type(obj).__name__!r} object has unexpected attribute {name!r}'
|
||||
self.fail(self._formatMessage(msg, standardMsg))
|
||||
|
||||
def assertStartsWith(self, s, prefix, msg=None):
|
||||
if s.startswith(prefix):
|
||||
return
|
||||
standardMsg = f"{s!r} doesn't start with {prefix!r}"
|
||||
self.fail(self._formatMessage(msg, standardMsg))
|
||||
|
||||
def assertNotStartsWith(self, s, prefix, msg=None):
|
||||
if not s.startswith(prefix):
|
||||
return
|
||||
self.fail(self._formatMessage(msg, f"{s!r} starts with {prefix!r}"))
|
||||
|
||||
def assertEndsWith(self, s, suffix, msg=None):
|
||||
if s.endswith(suffix):
|
||||
return
|
||||
standardMsg = f"{s!r} doesn't end with {suffix!r}"
|
||||
self.fail(self._formatMessage(msg, standardMsg))
|
||||
|
||||
def assertNotEndsWith(self, s, suffix, msg=None):
|
||||
if not s.endswith(suffix):
|
||||
return
|
||||
self.fail(self._formatMessage(msg, f"{s!r} ends with {suffix!r}"))
|
||||
|
||||
|
||||
class ExceptionIsLikeMixin:
|
||||
def assertExceptionIsLike(self, exc, template):
|
||||
"""
|
||||
Passes when the provided `exc` matches the structure of `template`.
|
||||
Individual exceptions don't have to be the same objects or even pass
|
||||
an equality test: they only need to be the same type and contain equal
|
||||
`exc_obj.args`.
|
||||
"""
|
||||
if exc is None and template is None:
|
||||
return
|
||||
|
||||
if template is None:
|
||||
self.fail(f"unexpected exception: {exc}")
|
||||
|
||||
if exc is None:
|
||||
self.fail(f"expected an exception like {template!r}, got None")
|
||||
|
||||
if not isinstance(exc, ExceptionGroup):
|
||||
self.assertEqual(exc.__class__, template.__class__)
|
||||
self.assertEqual(exc.args[0], template.args[0])
|
||||
else:
|
||||
self.assertEqual(exc.message, template.message)
|
||||
self.assertEqual(len(exc.exceptions), len(template.exceptions))
|
||||
for e, t in zip(exc.exceptions, template.exceptions):
|
||||
self.assertExceptionIsLike(e, t)
|
||||
|
||||
|
||||
class FloatsAreIdenticalMixin:
|
||||
def assertFloatsAreIdentical(self, x, y):
|
||||
"""Fail unless floats x and y are identical, in the sense that:
|
||||
(1) both x and y are nans, or
|
||||
(2) both x and y are infinities, with the same sign, or
|
||||
(3) both x and y are zeros, with the same sign, or
|
||||
(4) x and y are both finite and nonzero, and x == y
|
||||
|
||||
"""
|
||||
msg = 'floats {!r} and {!r} are not identical'
|
||||
|
||||
if isnan(x) or isnan(y):
|
||||
if isnan(x) and isnan(y):
|
||||
return
|
||||
elif x == y:
|
||||
if x != 0.0:
|
||||
return
|
||||
# both zero; check that signs match
|
||||
elif copysign(1.0, x) == copysign(1.0, y):
|
||||
return
|
||||
else:
|
||||
msg += ': zeros have different signs'
|
||||
self.fail(msg.format(x, y))
|
||||
|
||||
|
||||
class ComplexesAreIdenticalMixin(FloatsAreIdenticalMixin):
|
||||
def assertComplexesAreIdentical(self, x, y):
|
||||
"""Fail unless complex numbers x and y have equal values and signs.
|
||||
|
||||
In particular, if x and y both have real (or imaginary) part
|
||||
zero, but the zeros have different signs, this test will fail.
|
||||
|
||||
"""
|
||||
self.assertFloatsAreIdentical(x.real, y.real)
|
||||
self.assertFloatsAreIdentical(x.imag, y.imag)
|
250
Dependencies/Python/Lib/test/support/threading_helper.py
vendored
Normal file
250
Dependencies/Python/Lib/test/support/threading_helper.py
vendored
Normal file
@ -0,0 +1,250 @@
|
||||
import _thread
|
||||
import contextlib
|
||||
import functools
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import unittest
|
||||
|
||||
from test import support
|
||||
|
||||
|
||||
#=======================================================================
|
||||
# Threading support to prevent reporting refleaks when running regrtest.py -R
|
||||
|
||||
# NOTE: we use thread._count() rather than threading.enumerate() (or the
|
||||
# moral equivalent thereof) because a threading.Thread object is still alive
|
||||
# until its __bootstrap() method has returned, even after it has been
|
||||
# unregistered from the threading module.
|
||||
# thread._count(), on the other hand, only gets decremented *after* the
|
||||
# __bootstrap() method has returned, which gives us reliable reference counts
|
||||
# at the end of a test run.
|
||||
|
||||
|
||||
def threading_setup():
|
||||
return _thread._count(), len(threading._dangling)
|
||||
|
||||
|
||||
def threading_cleanup(*original_values):
|
||||
orig_count, orig_ndangling = original_values
|
||||
|
||||
timeout = 1.0
|
||||
for _ in support.sleeping_retry(timeout, error=False):
|
||||
# Copy the thread list to get a consistent output. threading._dangling
|
||||
# is a WeakSet, its value changes when it's read.
|
||||
dangling_threads = list(threading._dangling)
|
||||
count = _thread._count()
|
||||
|
||||
if count <= orig_count:
|
||||
return
|
||||
|
||||
# Timeout!
|
||||
support.environment_altered = True
|
||||
support.print_warning(
|
||||
f"threading_cleanup() failed to clean up threads "
|
||||
f"in {timeout:.1f} seconds\n"
|
||||
f" before: thread count={orig_count}, dangling={orig_ndangling}\n"
|
||||
f" after: thread count={count}, dangling={len(dangling_threads)}")
|
||||
for thread in dangling_threads:
|
||||
support.print_warning(f"Dangling thread: {thread!r}")
|
||||
|
||||
# The warning happens when a test spawns threads and some of these threads
|
||||
# are still running after the test completes. To fix this warning, join
|
||||
# threads explicitly to wait until they complete.
|
||||
#
|
||||
# To make the warning more likely, reduce the timeout.
|
||||
|
||||
|
||||
def reap_threads(func):
|
||||
"""Use this function when threads are being used. This will
|
||||
ensure that the threads are cleaned up even when the test fails.
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def decorator(*args):
|
||||
key = threading_setup()
|
||||
try:
|
||||
return func(*args)
|
||||
finally:
|
||||
threading_cleanup(*key)
|
||||
return decorator
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def wait_threads_exit(timeout=None):
|
||||
"""
|
||||
bpo-31234: Context manager to wait until all threads created in the with
|
||||
statement exit.
|
||||
|
||||
Use _thread.count() to check if threads exited. Indirectly, wait until
|
||||
threads exit the internal t_bootstrap() C function of the _thread module.
|
||||
|
||||
threading_setup() and threading_cleanup() are designed to emit a warning
|
||||
if a test leaves running threads in the background. This context manager
|
||||
is designed to cleanup threads started by the _thread.start_new_thread()
|
||||
which doesn't allow to wait for thread exit, whereas thread.Thread has a
|
||||
join() method.
|
||||
"""
|
||||
if timeout is None:
|
||||
timeout = support.SHORT_TIMEOUT
|
||||
old_count = _thread._count()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
start_time = time.monotonic()
|
||||
for _ in support.sleeping_retry(timeout, error=False):
|
||||
support.gc_collect()
|
||||
count = _thread._count()
|
||||
if count <= old_count:
|
||||
break
|
||||
else:
|
||||
dt = time.monotonic() - start_time
|
||||
msg = (f"wait_threads() failed to cleanup {count - old_count} "
|
||||
f"threads after {dt:.1f} seconds "
|
||||
f"(count: {count}, old count: {old_count})")
|
||||
raise AssertionError(msg)
|
||||
|
||||
|
||||
def join_thread(thread, timeout=None):
|
||||
"""Join a thread. Raise an AssertionError if the thread is still alive
|
||||
after timeout seconds.
|
||||
"""
|
||||
if timeout is None:
|
||||
timeout = support.SHORT_TIMEOUT
|
||||
thread.join(timeout)
|
||||
if thread.is_alive():
|
||||
msg = f"failed to join the thread in {timeout:.1f} seconds"
|
||||
raise AssertionError(msg)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def start_threads(threads, unlock=None):
|
||||
try:
|
||||
import faulthandler
|
||||
except ImportError:
|
||||
# It isn't supported on subinterpreters yet.
|
||||
faulthandler = None
|
||||
threads = list(threads)
|
||||
started = []
|
||||
try:
|
||||
try:
|
||||
for t in threads:
|
||||
t.start()
|
||||
started.append(t)
|
||||
except:
|
||||
if support.verbose:
|
||||
print("Can't start %d threads, only %d threads started" %
|
||||
(len(threads), len(started)))
|
||||
raise
|
||||
yield
|
||||
finally:
|
||||
try:
|
||||
if unlock:
|
||||
unlock()
|
||||
endtime = time.monotonic()
|
||||
for timeout in range(1, 16):
|
||||
endtime += 60
|
||||
for t in started:
|
||||
t.join(max(endtime - time.monotonic(), 0.01))
|
||||
started = [t for t in started if t.is_alive()]
|
||||
if not started:
|
||||
break
|
||||
if support.verbose:
|
||||
print('Unable to join %d threads during a period of '
|
||||
'%d minutes' % (len(started), timeout))
|
||||
finally:
|
||||
started = [t for t in started if t.is_alive()]
|
||||
if started:
|
||||
if faulthandler is not None:
|
||||
faulthandler.dump_traceback(sys.stdout)
|
||||
raise AssertionError('Unable to join %d threads' % len(started))
|
||||
|
||||
|
||||
class catch_threading_exception:
|
||||
"""
|
||||
Context manager catching threading.Thread exception using
|
||||
threading.excepthook.
|
||||
|
||||
Attributes set when an exception is caught:
|
||||
|
||||
* exc_type
|
||||
* exc_value
|
||||
* exc_traceback
|
||||
* thread
|
||||
|
||||
See threading.excepthook() documentation for these attributes.
|
||||
|
||||
These attributes are deleted at the context manager exit.
|
||||
|
||||
Usage:
|
||||
|
||||
with threading_helper.catch_threading_exception() as cm:
|
||||
# code spawning a thread which raises an exception
|
||||
...
|
||||
|
||||
# check the thread exception, use cm attributes:
|
||||
# exc_type, exc_value, exc_traceback, thread
|
||||
...
|
||||
|
||||
# exc_type, exc_value, exc_traceback, thread attributes of cm no longer
|
||||
# exists at this point
|
||||
# (to avoid reference cycles)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.exc_type = None
|
||||
self.exc_value = None
|
||||
self.exc_traceback = None
|
||||
self.thread = None
|
||||
self._old_hook = None
|
||||
|
||||
def _hook(self, args):
|
||||
self.exc_type = args.exc_type
|
||||
self.exc_value = args.exc_value
|
||||
self.exc_traceback = args.exc_traceback
|
||||
self.thread = args.thread
|
||||
|
||||
def __enter__(self):
|
||||
self._old_hook = threading.excepthook
|
||||
threading.excepthook = self._hook
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
threading.excepthook = self._old_hook
|
||||
del self.exc_type
|
||||
del self.exc_value
|
||||
del self.exc_traceback
|
||||
del self.thread
|
||||
|
||||
|
||||
def _can_start_thread() -> bool:
|
||||
"""Detect whether Python can start new threads.
|
||||
|
||||
Some WebAssembly platforms do not provide a working pthread
|
||||
implementation. Thread support is stubbed and any attempt
|
||||
to create a new thread fails.
|
||||
|
||||
- wasm32-wasi does not have threading.
|
||||
- wasm32-emscripten can be compiled with or without pthread
|
||||
support (-s USE_PTHREADS / __EMSCRIPTEN_PTHREADS__).
|
||||
"""
|
||||
if sys.platform == "emscripten":
|
||||
return sys._emscripten_info.pthreads
|
||||
elif sys.platform == "wasi":
|
||||
return False
|
||||
else:
|
||||
# assume all other platforms have working thread support.
|
||||
return True
|
||||
|
||||
can_start_thread = _can_start_thread()
|
||||
|
||||
def requires_working_threading(*, module=False):
|
||||
"""Skip tests or modules that require working threading.
|
||||
|
||||
Can be used as a function/class decorator or to skip an entire module.
|
||||
"""
|
||||
msg = "requires threading support"
|
||||
if module:
|
||||
if not can_start_thread:
|
||||
raise unittest.SkipTest(msg)
|
||||
else:
|
||||
return unittest.skipUnless(can_start_thread, msg)
|
70
Dependencies/Python/Lib/test/support/venv.py
vendored
Normal file
70
Dependencies/Python/Lib/test/support/venv.py
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import shlex
|
||||
import sys
|
||||
import sysconfig
|
||||
import tempfile
|
||||
import venv
|
||||
|
||||
|
||||
class VirtualEnvironment:
|
||||
def __init__(self, prefix, **venv_create_args):
|
||||
self._logger = logging.getLogger(self.__class__.__name__)
|
||||
venv.create(prefix, **venv_create_args)
|
||||
self._prefix = prefix
|
||||
self._paths = sysconfig.get_paths(
|
||||
scheme='venv',
|
||||
vars={'base': self.prefix},
|
||||
expand=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@contextlib.contextmanager
|
||||
def from_tmpdir(cls, *, prefix=None, dir=None, **venv_create_args):
|
||||
delete = not bool(os.environ.get('PYTHON_TESTS_KEEP_VENV'))
|
||||
with tempfile.TemporaryDirectory(prefix=prefix, dir=dir, delete=delete) as tmpdir:
|
||||
yield cls(tmpdir, **venv_create_args)
|
||||
|
||||
@property
|
||||
def prefix(self):
|
||||
return self._prefix
|
||||
|
||||
@property
|
||||
def paths(self):
|
||||
return self._paths
|
||||
|
||||
@property
|
||||
def interpreter(self):
|
||||
return os.path.join(self.paths['scripts'], os.path.basename(sys.executable))
|
||||
|
||||
def _format_output(self, name, data, indent='\t'):
|
||||
if not data:
|
||||
return indent + f'{name}: (none)'
|
||||
if len(data.splitlines()) == 1:
|
||||
return indent + f'{name}: {data}'
|
||||
else:
|
||||
prefixed_lines = '\n'.join(indent + '> ' + line for line in data.splitlines())
|
||||
return indent + f'{name}:\n' + prefixed_lines
|
||||
|
||||
def run(self, *args, **subprocess_args):
|
||||
if subprocess_args.get('shell'):
|
||||
raise ValueError('Running the subprocess in shell mode is not supported.')
|
||||
default_args = {
|
||||
'capture_output': True,
|
||||
'check': True,
|
||||
}
|
||||
try:
|
||||
result = subprocess.run([self.interpreter, *args], **default_args | subprocess_args)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if e.returncode != 0:
|
||||
self._logger.error(
|
||||
f'Interpreter returned non-zero exit status {e.returncode}.\n'
|
||||
+ self._format_output('COMMAND', shlex.join(e.cmd)) + '\n'
|
||||
+ self._format_output('STDOUT', e.stdout.decode()) + '\n'
|
||||
+ self._format_output('STDERR', e.stderr.decode()) + '\n'
|
||||
)
|
||||
raise
|
||||
else:
|
||||
return result
|
207
Dependencies/Python/Lib/test/support/warnings_helper.py
vendored
Normal file
207
Dependencies/Python/Lib/test/support/warnings_helper.py
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
import contextlib
|
||||
import functools
|
||||
import importlib
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
|
||||
def import_deprecated(name):
|
||||
"""Import *name* while suppressing DeprecationWarning."""
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore', category=DeprecationWarning)
|
||||
return importlib.import_module(name)
|
||||
|
||||
|
||||
def check_syntax_warning(testcase, statement, errtext='',
|
||||
*, lineno=1, offset=None):
|
||||
# Test also that a warning is emitted only once.
|
||||
from test.support import check_syntax_error
|
||||
with warnings.catch_warnings(record=True) as warns:
|
||||
warnings.simplefilter('always', SyntaxWarning)
|
||||
compile(statement, '<testcase>', 'exec')
|
||||
testcase.assertEqual(len(warns), 1, warns)
|
||||
|
||||
warn, = warns
|
||||
testcase.assertTrue(issubclass(warn.category, SyntaxWarning),
|
||||
warn.category)
|
||||
if errtext:
|
||||
testcase.assertRegex(str(warn.message), errtext)
|
||||
testcase.assertEqual(warn.filename, '<testcase>')
|
||||
testcase.assertIsNotNone(warn.lineno)
|
||||
if lineno is not None:
|
||||
testcase.assertEqual(warn.lineno, lineno)
|
||||
|
||||
# SyntaxWarning should be converted to SyntaxError when raised,
|
||||
# since the latter contains more information and provides better
|
||||
# error report.
|
||||
with warnings.catch_warnings(record=True) as warns:
|
||||
warnings.simplefilter('error', SyntaxWarning)
|
||||
check_syntax_error(testcase, statement, errtext,
|
||||
lineno=lineno, offset=offset)
|
||||
# No warnings are leaked when a SyntaxError is raised.
|
||||
testcase.assertEqual(warns, [])
|
||||
|
||||
|
||||
def ignore_warnings(*, category):
|
||||
"""Decorator to suppress warnings.
|
||||
|
||||
Use of context managers to hide warnings make diffs
|
||||
more noisy and tools like 'git blame' less useful.
|
||||
"""
|
||||
def decorator(test):
|
||||
@functools.wraps(test)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore', category=category)
|
||||
return test(self, *args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
class WarningsRecorder(object):
|
||||
"""Convenience wrapper for the warnings list returned on
|
||||
entry to the warnings.catch_warnings() context manager.
|
||||
"""
|
||||
def __init__(self, warnings_list):
|
||||
self._warnings = warnings_list
|
||||
self._last = 0
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if len(self._warnings) > self._last:
|
||||
return getattr(self._warnings[-1], attr)
|
||||
elif attr in warnings.WarningMessage._WARNING_DETAILS:
|
||||
return None
|
||||
raise AttributeError("%r has no attribute %r" % (self, attr))
|
||||
|
||||
@property
|
||||
def warnings(self):
|
||||
return self._warnings[self._last:]
|
||||
|
||||
def reset(self):
|
||||
self._last = len(self._warnings)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def check_warnings(*filters, **kwargs):
|
||||
"""Context manager to silence warnings.
|
||||
|
||||
Accept 2-tuples as positional arguments:
|
||||
("message regexp", WarningCategory)
|
||||
|
||||
Optional argument:
|
||||
- if 'quiet' is True, it does not fail if a filter catches nothing
|
||||
(default True without argument,
|
||||
default False if some filters are defined)
|
||||
|
||||
Without argument, it defaults to:
|
||||
check_warnings(("", Warning), quiet=True)
|
||||
"""
|
||||
quiet = kwargs.get('quiet')
|
||||
if not filters:
|
||||
filters = (("", Warning),)
|
||||
# Preserve backward compatibility
|
||||
if quiet is None:
|
||||
quiet = True
|
||||
return _filterwarnings(filters, quiet)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def check_no_warnings(testcase, message='', category=Warning, force_gc=False):
|
||||
"""Context manager to check that no warnings are emitted.
|
||||
|
||||
This context manager enables a given warning within its scope
|
||||
and checks that no warnings are emitted even with that warning
|
||||
enabled.
|
||||
|
||||
If force_gc is True, a garbage collection is attempted before checking
|
||||
for warnings. This may help to catch warnings emitted when objects
|
||||
are deleted, such as ResourceWarning.
|
||||
|
||||
Other keyword arguments are passed to warnings.filterwarnings().
|
||||
"""
|
||||
from test.support import gc_collect
|
||||
with warnings.catch_warnings(record=True) as warns:
|
||||
warnings.filterwarnings('always',
|
||||
message=message,
|
||||
category=category)
|
||||
yield
|
||||
if force_gc:
|
||||
gc_collect()
|
||||
testcase.assertEqual(warns, [])
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def check_no_resource_warning(testcase):
|
||||
"""Context manager to check that no ResourceWarning is emitted.
|
||||
|
||||
Usage:
|
||||
|
||||
with check_no_resource_warning(self):
|
||||
f = open(...)
|
||||
...
|
||||
del f
|
||||
|
||||
You must remove the object which may emit ResourceWarning before
|
||||
the end of the context manager.
|
||||
"""
|
||||
with check_no_warnings(testcase, category=ResourceWarning, force_gc=True):
|
||||
yield
|
||||
|
||||
|
||||
def _filterwarnings(filters, quiet=False):
|
||||
"""Catch the warnings, then check if all the expected
|
||||
warnings have been raised and re-raise unexpected warnings.
|
||||
If 'quiet' is True, only re-raise the unexpected warnings.
|
||||
"""
|
||||
# Clear the warning registry of the calling module
|
||||
# in order to re-raise the warnings.
|
||||
frame = sys._getframe(2)
|
||||
registry = frame.f_globals.get('__warningregistry__')
|
||||
if registry:
|
||||
registry.clear()
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
# Set filter "always" to record all warnings. Because
|
||||
# test_warnings swap the module, we need to look up in
|
||||
# the sys.modules dictionary.
|
||||
sys.modules['warnings'].simplefilter("always")
|
||||
yield WarningsRecorder(w)
|
||||
# Filter the recorded warnings
|
||||
reraise = list(w)
|
||||
missing = []
|
||||
for msg, cat in filters:
|
||||
seen = False
|
||||
for w in reraise[:]:
|
||||
warning = w.message
|
||||
# Filter out the matching messages
|
||||
if (re.match(msg, str(warning), re.I) and
|
||||
issubclass(warning.__class__, cat)):
|
||||
seen = True
|
||||
reraise.remove(w)
|
||||
if not seen and not quiet:
|
||||
# This filter caught nothing
|
||||
missing.append((msg, cat.__name__))
|
||||
if reraise:
|
||||
raise AssertionError("unhandled warning %s" % reraise[0])
|
||||
if missing:
|
||||
raise AssertionError("filter (%r, %s) did not catch any warning" %
|
||||
missing[0])
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def save_restore_warnings_filters():
|
||||
old_filters = warnings.filters[:]
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
warnings.filters[:] = old_filters
|
||||
|
||||
|
||||
def _warn_about_deprecation():
|
||||
warnings.warn(
|
||||
"This is used in test_support test to ensure"
|
||||
" support.ignore_deprecations_from() works as expected."
|
||||
" You should not be seeing this.",
|
||||
DeprecationWarning,
|
||||
stacklevel=0,
|
||||
)
|
Reference in New Issue
Block a user