Removed the Requirement to Install Python and NodeJS (Now Bundled with Borealis)

This commit is contained in:
2025-04-24 00:42:19 -06:00
parent 785265d3e7
commit 9c68cdea84
7786 changed files with 2386458 additions and 217 deletions

View File

@ -0,0 +1,9 @@
import os
from test.support import load_package_tests, Py_GIL_DISABLED
import unittest
if Py_GIL_DISABLED:
raise unittest.SkipTest("GIL disabled")
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)

View File

@ -0,0 +1,4 @@
from . import load_tests
import unittest
unittest.main()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,600 @@
import importlib
import pickle
import threading
from textwrap import dedent
import unittest
import time
from test.support import import_helper
# Raise SkipTest if subinterpreters not supported.
_channels = import_helper.import_module('_interpchannels')
from test.support import interpreters
from test.support.interpreters import channels
from .utils import _run_output, TestBase
class LowLevelTests(TestBase):
# The behaviors in the low-level module is important in as much
# as it is exercised by the high-level module. Therefore the
# most # important testing happens in the high-level tests.
# These low-level tests cover corner cases that are not
# encountered by the high-level module, thus they
# mostly shouldn't matter as much.
# Additional tests are found in Lib/test/test__interpchannels.py.
# XXX Those should be either moved to LowLevelTests or eliminated
# in favor of high-level tests in this file.
def test_highlevel_reloaded(self):
# See gh-115490 (https://github.com/python/cpython/issues/115490).
importlib.reload(channels)
class TestChannels(TestBase):
def test_create(self):
r, s = channels.create()
self.assertIsInstance(r, channels.RecvChannel)
self.assertIsInstance(s, channels.SendChannel)
def test_list_all(self):
self.assertEqual(channels.list_all(), [])
created = set()
for _ in range(3):
ch = channels.create()
created.add(ch)
after = set(channels.list_all())
self.assertEqual(after, created)
def test_shareable(self):
interp = interpreters.create()
rch, sch = channels.create()
self.assertTrue(
interpreters.is_shareable(rch))
self.assertTrue(
interpreters.is_shareable(sch))
sch.send_nowait(rch)
sch.send_nowait(sch)
rch2 = rch.recv()
sch2 = rch.recv()
interp.prepare_main(rch=rch, sch=sch)
sch.send_nowait(rch)
sch.send_nowait(sch)
interp.exec(dedent("""
rch2 = rch.recv()
sch2 = rch.recv()
assert rch2 == rch
assert sch2 == sch
sch.send_nowait(rch2)
sch.send_nowait(sch2)
"""))
rch3 = rch.recv()
sch3 = rch.recv()
self.assertEqual(rch2, rch)
self.assertEqual(sch2, sch)
self.assertEqual(rch3, rch)
self.assertEqual(sch3, sch)
def test_is_closed(self):
rch, sch = channels.create()
rbefore = rch.is_closed
sbefore = sch.is_closed
rch.close()
rafter = rch.is_closed
safter = sch.is_closed
self.assertFalse(rbefore)
self.assertFalse(sbefore)
self.assertTrue(rafter)
self.assertTrue(safter)
class TestRecvChannelAttrs(TestBase):
def test_id_type(self):
rch, _ = channels.create()
self.assertIsInstance(rch.id, _channels.ChannelID)
def test_custom_id(self):
rch = channels.RecvChannel(1)
self.assertEqual(rch.id, 1)
with self.assertRaises(TypeError):
channels.RecvChannel('1')
def test_id_readonly(self):
rch = channels.RecvChannel(1)
with self.assertRaises(AttributeError):
rch.id = 2
def test_equality(self):
ch1, _ = channels.create()
ch2, _ = channels.create()
self.assertEqual(ch1, ch1)
self.assertNotEqual(ch1, ch2)
def test_pickle(self):
ch, _ = channels.create()
data = pickle.dumps(ch)
unpickled = pickle.loads(data)
self.assertEqual(unpickled, ch)
class TestSendChannelAttrs(TestBase):
def test_id_type(self):
_, sch = channels.create()
self.assertIsInstance(sch.id, _channels.ChannelID)
def test_custom_id(self):
sch = channels.SendChannel(1)
self.assertEqual(sch.id, 1)
with self.assertRaises(TypeError):
channels.SendChannel('1')
def test_id_readonly(self):
sch = channels.SendChannel(1)
with self.assertRaises(AttributeError):
sch.id = 2
def test_equality(self):
_, ch1 = channels.create()
_, ch2 = channels.create()
self.assertEqual(ch1, ch1)
self.assertNotEqual(ch1, ch2)
def test_pickle(self):
_, ch = channels.create()
data = pickle.dumps(ch)
unpickled = pickle.loads(data)
self.assertEqual(unpickled, ch)
class TestSendRecv(TestBase):
def test_send_recv_main(self):
r, s = channels.create()
orig = b'spam'
s.send_nowait(orig)
obj = r.recv()
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_same_interpreter(self):
interp = interpreters.create()
interp.exec(dedent("""
from test.support.interpreters import channels
r, s = channels.create()
orig = b'spam'
s.send_nowait(orig)
obj = r.recv()
assert obj == orig, 'expected: obj == orig'
assert obj is not orig, 'expected: obj is not orig'
"""))
@unittest.skip('broken (see BPO-...)')
def test_send_recv_different_interpreters(self):
r1, s1 = channels.create()
r2, s2 = channels.create()
orig1 = b'spam'
s1.send_nowait(orig1)
out = _run_output(
interpreters.create(),
dedent(f"""
obj1 = r.recv()
assert obj1 == b'spam', 'expected: obj1 == orig1'
# When going to another interpreter we get a copy.
assert id(obj1) != {id(orig1)}, 'expected: obj1 is not orig1'
orig2 = b'eggs'
print(id(orig2))
s.send_nowait(orig2)
"""),
channels=dict(r=r1, s=s2),
)
obj2 = r2.recv()
self.assertEqual(obj2, b'eggs')
self.assertNotEqual(id(obj2), int(out))
def test_send_recv_different_threads(self):
r, s = channels.create()
def f():
while True:
try:
obj = r.recv()
break
except channels.ChannelEmptyError:
time.sleep(0.1)
s.send(obj)
t = threading.Thread(target=f)
t.start()
orig = b'spam'
s.send(orig)
obj = r.recv()
t.join()
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_nowait_main(self):
r, s = channels.create()
orig = b'spam'
s.send_nowait(orig)
obj = r.recv_nowait()
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_nowait_main_with_default(self):
r, _ = channels.create()
obj = r.recv_nowait(None)
self.assertIsNone(obj)
def test_send_recv_nowait_same_interpreter(self):
interp = interpreters.create()
interp.exec(dedent("""
from test.support.interpreters import channels
r, s = channels.create()
orig = b'spam'
s.send_nowait(orig)
obj = r.recv_nowait()
assert obj == orig, 'expected: obj == orig'
# When going back to the same interpreter we get the same object.
assert obj is not orig, 'expected: obj is not orig'
"""))
@unittest.skip('broken (see BPO-...)')
def test_send_recv_nowait_different_interpreters(self):
r1, s1 = channels.create()
r2, s2 = channels.create()
orig1 = b'spam'
s1.send_nowait(orig1)
out = _run_output(
interpreters.create(),
dedent(f"""
obj1 = r.recv_nowait()
assert obj1 == b'spam', 'expected: obj1 == orig1'
# When going to another interpreter we get a copy.
assert id(obj1) != {id(orig1)}, 'expected: obj1 is not orig1'
orig2 = b'eggs'
print(id(orig2))
s.send_nowait(orig2)
"""),
channels=dict(r=r1, s=s2),
)
obj2 = r2.recv_nowait()
self.assertEqual(obj2, b'eggs')
self.assertNotEqual(id(obj2), int(out))
def test_recv_timeout(self):
r, _ = channels.create()
with self.assertRaises(TimeoutError):
r.recv(timeout=1)
def test_recv_channel_does_not_exist(self):
ch = channels.RecvChannel(1_000_000)
with self.assertRaises(channels.ChannelNotFoundError):
ch.recv()
def test_send_channel_does_not_exist(self):
ch = channels.SendChannel(1_000_000)
with self.assertRaises(channels.ChannelNotFoundError):
ch.send(b'spam')
def test_recv_nowait_channel_does_not_exist(self):
ch = channels.RecvChannel(1_000_000)
with self.assertRaises(channels.ChannelNotFoundError):
ch.recv_nowait()
def test_send_nowait_channel_does_not_exist(self):
ch = channels.SendChannel(1_000_000)
with self.assertRaises(channels.ChannelNotFoundError):
ch.send_nowait(b'spam')
def test_recv_nowait_empty(self):
ch, _ = channels.create()
with self.assertRaises(channels.ChannelEmptyError):
ch.recv_nowait()
def test_recv_nowait_default(self):
default = object()
rch, sch = channels.create()
obj1 = rch.recv_nowait(default)
sch.send_nowait(None)
sch.send_nowait(1)
sch.send_nowait(b'spam')
sch.send_nowait(b'eggs')
obj2 = rch.recv_nowait(default)
obj3 = rch.recv_nowait(default)
obj4 = rch.recv_nowait()
obj5 = rch.recv_nowait(default)
obj6 = rch.recv_nowait(default)
self.assertIs(obj1, default)
self.assertIs(obj2, None)
self.assertEqual(obj3, 1)
self.assertEqual(obj4, b'spam')
self.assertEqual(obj5, b'eggs')
self.assertIs(obj6, default)
def test_send_buffer(self):
buf = bytearray(b'spamspamspam')
obj = None
rch, sch = channels.create()
def f():
nonlocal obj
while True:
try:
obj = rch.recv()
break
except channels.ChannelEmptyError:
time.sleep(0.1)
t = threading.Thread(target=f)
t.start()
sch.send_buffer(buf)
t.join()
self.assertIsNot(obj, buf)
self.assertIsInstance(obj, memoryview)
self.assertEqual(obj, buf)
buf[4:8] = b'eggs'
self.assertEqual(obj, buf)
obj[4:8] = b'ham.'
self.assertEqual(obj, buf)
def test_send_buffer_nowait(self):
buf = bytearray(b'spamspamspam')
rch, sch = channels.create()
sch.send_buffer_nowait(buf)
obj = rch.recv()
self.assertIsNot(obj, buf)
self.assertIsInstance(obj, memoryview)
self.assertEqual(obj, buf)
buf[4:8] = b'eggs'
self.assertEqual(obj, buf)
obj[4:8] = b'ham.'
self.assertEqual(obj, buf)
def test_send_cleared_with_subinterpreter(self):
def common(rch, sch, unbound=None, presize=0):
if not unbound:
extraargs = ''
elif unbound is channels.UNBOUND:
extraargs = ', unbound=channels.UNBOUND'
elif unbound is channels.UNBOUND_ERROR:
extraargs = ', unbound=channels.UNBOUND_ERROR'
elif unbound is channels.UNBOUND_REMOVE:
extraargs = ', unbound=channels.UNBOUND_REMOVE'
else:
raise NotImplementedError(repr(unbound))
interp = interpreters.create()
_run_output(interp, dedent(f"""
from test.support.interpreters import channels
sch = channels.SendChannel({sch.id})
obj1 = b'spam'
obj2 = b'eggs'
sch.send_nowait(obj1{extraargs})
sch.send_nowait(obj2{extraargs})
"""))
self.assertEqual(
_channels.get_count(rch.id),
presize + 2,
)
if presize == 0:
obj1 = rch.recv()
self.assertEqual(obj1, b'spam')
self.assertEqual(
_channels.get_count(rch.id),
presize + 1,
)
return interp
with self.subTest('default'): # UNBOUND
rch, sch = channels.create()
interp = common(rch, sch)
del interp
self.assertEqual(_channels.get_count(rch.id), 1)
obj1 = rch.recv()
self.assertEqual(_channels.get_count(rch.id), 0)
self.assertIs(obj1, channels.UNBOUND)
self.assertEqual(_channels.get_count(rch.id), 0)
with self.assertRaises(channels.ChannelEmptyError):
rch.recv_nowait()
with self.subTest('UNBOUND'):
rch, sch = channels.create()
interp = common(rch, sch, channels.UNBOUND)
del interp
self.assertEqual(_channels.get_count(rch.id), 1)
obj1 = rch.recv()
self.assertIs(obj1, channels.UNBOUND)
self.assertEqual(_channels.get_count(rch.id), 0)
with self.assertRaises(channels.ChannelEmptyError):
rch.recv_nowait()
with self.subTest('UNBOUND_ERROR'):
rch, sch = channels.create()
interp = common(rch, sch, channels.UNBOUND_ERROR)
del interp
self.assertEqual(_channels.get_count(rch.id), 1)
with self.assertRaises(channels.ItemInterpreterDestroyed):
rch.recv()
self.assertEqual(_channels.get_count(rch.id), 0)
with self.assertRaises(channels.ChannelEmptyError):
rch.recv_nowait()
with self.subTest('UNBOUND_REMOVE'):
rch, sch = channels.create()
interp = common(rch, sch, channels.UNBOUND_REMOVE)
del interp
self.assertEqual(_channels.get_count(rch.id), 0)
with self.assertRaises(channels.ChannelEmptyError):
rch.recv_nowait()
sch.send_nowait(b'ham', unbound=channels.UNBOUND_REMOVE)
self.assertEqual(_channels.get_count(rch.id), 1)
interp = common(rch, sch, channels.UNBOUND_REMOVE, 1)
self.assertEqual(_channels.get_count(rch.id), 3)
sch.send_nowait(42, unbound=channels.UNBOUND_REMOVE)
self.assertEqual(_channels.get_count(rch.id), 4)
del interp
self.assertEqual(_channels.get_count(rch.id), 2)
obj1 = rch.recv()
obj2 = rch.recv()
self.assertEqual(obj1, b'ham')
self.assertEqual(obj2, 42)
self.assertEqual(_channels.get_count(rch.id), 0)
with self.assertRaises(channels.ChannelEmptyError):
rch.recv_nowait()
def test_send_cleared_with_subinterpreter_mixed(self):
rch, sch = channels.create()
interp = interpreters.create()
# If we don't associate the main interpreter with the channel
# then the channel will be automatically closed when interp
# is destroyed.
sch.send_nowait(None)
rch.recv()
self.assertEqual(_channels.get_count(rch.id), 0)
_run_output(interp, dedent(f"""
from test.support.interpreters import channels
sch = channels.SendChannel({sch.id})
sch.send_nowait(1, unbound=channels.UNBOUND)
sch.send_nowait(2, unbound=channels.UNBOUND_ERROR)
sch.send_nowait(3)
sch.send_nowait(4, unbound=channels.UNBOUND_REMOVE)
sch.send_nowait(5, unbound=channels.UNBOUND)
"""))
self.assertEqual(_channels.get_count(rch.id), 5)
del interp
self.assertEqual(_channels.get_count(rch.id), 4)
obj1 = rch.recv()
self.assertIs(obj1, channels.UNBOUND)
self.assertEqual(_channels.get_count(rch.id), 3)
with self.assertRaises(channels.ItemInterpreterDestroyed):
rch.recv()
self.assertEqual(_channels.get_count(rch.id), 2)
obj2 = rch.recv()
self.assertIs(obj2, channels.UNBOUND)
self.assertEqual(_channels.get_count(rch.id), 1)
obj3 = rch.recv()
self.assertIs(obj3, channels.UNBOUND)
self.assertEqual(_channels.get_count(rch.id), 0)
def test_send_cleared_with_subinterpreter_multiple(self):
rch, sch = channels.create()
interp1 = interpreters.create()
interp2 = interpreters.create()
sch.send_nowait(1)
_run_output(interp1, dedent(f"""
from test.support.interpreters import channels
rch = channels.RecvChannel({rch.id})
sch = channels.SendChannel({sch.id})
obj1 = rch.recv()
sch.send_nowait(2, unbound=channels.UNBOUND)
sch.send_nowait(obj1, unbound=channels.UNBOUND_REMOVE)
"""))
_run_output(interp2, dedent(f"""
from test.support.interpreters import channels
rch = channels.RecvChannel({rch.id})
sch = channels.SendChannel({sch.id})
obj2 = rch.recv()
obj1 = rch.recv()
"""))
self.assertEqual(_channels.get_count(rch.id), 0)
sch.send_nowait(3)
_run_output(interp1, dedent("""
sch.send_nowait(4, unbound=channels.UNBOUND)
# interp closed here
sch.send_nowait(5, unbound=channels.UNBOUND_REMOVE)
sch.send_nowait(6, unbound=channels.UNBOUND)
"""))
_run_output(interp2, dedent("""
sch.send_nowait(7, unbound=channels.UNBOUND_ERROR)
# interp closed here
sch.send_nowait(obj1, unbound=channels.UNBOUND_ERROR)
sch.send_nowait(obj2, unbound=channels.UNBOUND_REMOVE)
sch.send_nowait(8, unbound=channels.UNBOUND)
"""))
_run_output(interp1, dedent("""
sch.send_nowait(9, unbound=channels.UNBOUND_REMOVE)
sch.send_nowait(10, unbound=channels.UNBOUND)
"""))
self.assertEqual(_channels.get_count(rch.id), 10)
obj3 = rch.recv()
self.assertEqual(obj3, 3)
self.assertEqual(_channels.get_count(rch.id), 9)
obj4 = rch.recv()
self.assertEqual(obj4, 4)
self.assertEqual(_channels.get_count(rch.id), 8)
del interp1
self.assertEqual(_channels.get_count(rch.id), 6)
# obj5 was removed
obj6 = rch.recv()
self.assertIs(obj6, channels.UNBOUND)
self.assertEqual(_channels.get_count(rch.id), 5)
obj7 = rch.recv()
self.assertEqual(obj7, 7)
self.assertEqual(_channels.get_count(rch.id), 4)
del interp2
self.assertEqual(_channels.get_count(rch.id), 3)
# obj1
with self.assertRaises(channels.ItemInterpreterDestroyed):
rch.recv()
self.assertEqual(_channels.get_count(rch.id), 2)
# obj2 was removed
obj8 = rch.recv()
self.assertIs(obj8, channels.UNBOUND)
self.assertEqual(_channels.get_count(rch.id), 1)
# obj9 was removed
obj10 = rch.recv()
self.assertIs(obj10, channels.UNBOUND)
self.assertEqual(_channels.get_count(rch.id), 0)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()

View File

@ -0,0 +1,190 @@
import contextlib
import json
import os
import os.path
import sys
from textwrap import dedent
import unittest
from test import support
from test.support import import_helper
from test.support import os_helper
# Raise SkipTest if subinterpreters not supported.
import_helper.import_module('_interpreters')
from .utils import TestBase
class StartupTests(TestBase):
# We want to ensure the initial state of subinterpreters
# matches expectations.
_subtest_count = 0
@contextlib.contextmanager
def subTest(self, *args):
with super().subTest(*args) as ctx:
self._subtest_count += 1
try:
yield ctx
finally:
if self._debugged_in_subtest:
if self._subtest_count == 1:
# The first subtest adds a leading newline, so we
# compensate here by not printing a trailing newline.
print('### end subtest debug ###', end='')
else:
print('### end subtest debug ###')
self._debugged_in_subtest = False
def debug(self, msg, *, header=None):
if header:
self._debug(f'--- {header} ---')
if msg:
if msg.endswith(os.linesep):
self._debug(msg[:-len(os.linesep)])
else:
self._debug(msg)
self._debug('<no newline>')
self._debug('------')
else:
self._debug(msg)
_debugged = False
_debugged_in_subtest = False
def _debug(self, msg):
if not self._debugged:
print()
self._debugged = True
if self._subtest is not None:
if True:
if not self._debugged_in_subtest:
self._debugged_in_subtest = True
print('### start subtest debug ###')
print(msg)
else:
print(msg)
def create_temp_dir(self):
import tempfile
tmp = tempfile.mkdtemp(prefix='test_interpreters_')
tmp = os.path.realpath(tmp)
self.addCleanup(os_helper.rmtree, tmp)
return tmp
def write_script(self, *path, text):
filename = os.path.join(*path)
dirname = os.path.dirname(filename)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(filename, 'w', encoding='utf-8') as outfile:
outfile.write(dedent(text))
return filename
@support.requires_subprocess()
def run_python(self, argv, *, cwd=None):
# This method is inspired by
# EmbeddingTestsMixin.run_embedded_interpreter() in test_embed.py.
import shlex
import subprocess
if isinstance(argv, str):
argv = shlex.split(argv)
argv = [sys.executable, *argv]
try:
proc = subprocess.run(
argv,
cwd=cwd,
capture_output=True,
text=True,
)
except Exception as exc:
self.debug(f'# cmd: {shlex.join(argv)}')
if isinstance(exc, FileNotFoundError) and not exc.filename:
if os.path.exists(argv[0]):
exists = 'exists'
else:
exists = 'does not exist'
self.debug(f'{argv[0]} {exists}')
raise # re-raise
assert proc.stderr == '' or proc.returncode != 0, proc.stderr
if proc.returncode != 0 and support.verbose:
self.debug(f'# python3 {shlex.join(argv[1:])} failed:')
self.debug(proc.stdout, header='stdout')
self.debug(proc.stderr, header='stderr')
self.assertEqual(proc.returncode, 0)
self.assertEqual(proc.stderr, '')
return proc.stdout
def test_sys_path_0(self):
# The main interpreter's sys.path[0] should be used by subinterpreters.
script = '''
import sys
from test.support import interpreters
orig = sys.path[0]
interp = interpreters.create()
interp.exec(f"""if True:
import json
import sys
print(json.dumps({{
'main': {orig!r},
'sub': sys.path[0],
}}, indent=4), flush=True)
""")
'''
# <tmp>/
# pkg/
# __init__.py
# __main__.py
# script.py
# script.py
cwd = self.create_temp_dir()
self.write_script(cwd, 'pkg', '__init__.py', text='')
self.write_script(cwd, 'pkg', '__main__.py', text=script)
self.write_script(cwd, 'pkg', 'script.py', text=script)
self.write_script(cwd, 'script.py', text=script)
cases = [
('script.py', cwd),
('-m script', cwd),
('-m pkg', cwd),
('-m pkg.script', cwd),
('-c "import script"', ''),
]
for argv, expected in cases:
with self.subTest(f'python3 {argv}'):
out = self.run_python(argv, cwd=cwd)
data = json.loads(out)
sp0_main, sp0_sub = data['main'], data['sub']
self.assertEqual(sp0_sub, sp0_main)
self.assertEqual(sp0_sub, expected)
# XXX Also check them all with the -P cmdline flag?
class FinalizationTests(TestBase):
@support.requires_subprocess()
def test_gh_109793(self):
# Make sure finalization finishes and the correct error code
# is reported, even when subinterpreters get cleaned up at the end.
import subprocess
argv = [sys.executable, '-c', '''if True:
from test.support import interpreters
interp = interpreters.create()
raise Exception
''']
proc = subprocess.run(argv, capture_output=True, text=True)
self.assertIn('Traceback', proc.stderr)
if proc.returncode == 0 and support.verbose:
print()
print("--- cmd unexpected succeeded ---")
print(f"stdout:\n{proc.stdout}")
print(f"stderr:\n{proc.stderr}")
print("------")
self.assertEqual(proc.returncode, 1)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()

View File

@ -0,0 +1,661 @@
import importlib
import pickle
import threading
from textwrap import dedent
import unittest
import time
from test.support import import_helper, Py_DEBUG
# Raise SkipTest if subinterpreters not supported.
_queues = import_helper.import_module('_interpqueues')
from test.support import interpreters
from test.support.interpreters import queues, _crossinterp
from .utils import _run_output, TestBase as _TestBase
REPLACE = _crossinterp._UNBOUND_CONSTANT_TO_FLAG[_crossinterp.UNBOUND]
def get_num_queues():
return len(_queues.list_all())
class TestBase(_TestBase):
def tearDown(self):
for qid, _, _ in _queues.list_all():
try:
_queues.destroy(qid)
except Exception:
pass
class LowLevelTests(TestBase):
# The behaviors in the low-level module are important in as much
# as they are exercised by the high-level module. Therefore the
# most important testing happens in the high-level tests.
# These low-level tests cover corner cases that are not
# encountered by the high-level module, thus they
# mostly shouldn't matter as much.
def test_highlevel_reloaded(self):
# See gh-115490 (https://github.com/python/cpython/issues/115490).
importlib.reload(queues)
def test_create_destroy(self):
qid = _queues.create(2, 0, REPLACE)
_queues.destroy(qid)
self.assertEqual(get_num_queues(), 0)
with self.assertRaises(queues.QueueNotFoundError):
_queues.get(qid)
with self.assertRaises(queues.QueueNotFoundError):
_queues.destroy(qid)
def test_not_destroyed(self):
# It should have cleaned up any remaining queues.
stdout, stderr = self.assert_python_ok(
'-c',
dedent(f"""
import {_queues.__name__} as _queues
_queues.create(2, 0, {REPLACE})
"""),
)
self.assertEqual(stdout, '')
if Py_DEBUG:
self.assertNotEqual(stderr, '')
else:
self.assertEqual(stderr, '')
def test_bind_release(self):
with self.subTest('typical'):
qid = _queues.create(2, 0, REPLACE)
_queues.bind(qid)
_queues.release(qid)
self.assertEqual(get_num_queues(), 0)
with self.subTest('bind too much'):
qid = _queues.create(2, 0, REPLACE)
_queues.bind(qid)
_queues.bind(qid)
_queues.release(qid)
_queues.destroy(qid)
self.assertEqual(get_num_queues(), 0)
with self.subTest('nested'):
qid = _queues.create(2, 0, REPLACE)
_queues.bind(qid)
_queues.bind(qid)
_queues.release(qid)
_queues.release(qid)
self.assertEqual(get_num_queues(), 0)
with self.subTest('release without binding'):
qid = _queues.create(2, 0, REPLACE)
with self.assertRaises(queues.QueueError):
_queues.release(qid)
class QueueTests(TestBase):
def test_create(self):
with self.subTest('vanilla'):
queue = queues.create()
self.assertEqual(queue.maxsize, 0)
with self.subTest('small maxsize'):
queue = queues.create(3)
self.assertEqual(queue.maxsize, 3)
with self.subTest('big maxsize'):
queue = queues.create(100)
self.assertEqual(queue.maxsize, 100)
with self.subTest('no maxsize'):
queue = queues.create(0)
self.assertEqual(queue.maxsize, 0)
with self.subTest('negative maxsize'):
queue = queues.create(-10)
self.assertEqual(queue.maxsize, -10)
with self.subTest('bad maxsize'):
with self.assertRaises(TypeError):
queues.create('1')
def test_shareable(self):
queue1 = queues.create()
interp = interpreters.create()
interp.exec(dedent(f"""
from test.support.interpreters import queues
queue1 = queues.Queue({queue1.id})
"""));
with self.subTest('same interpreter'):
queue2 = queues.create()
queue1.put(queue2, syncobj=True)
queue3 = queue1.get()
self.assertIs(queue3, queue2)
with self.subTest('from current interpreter'):
queue4 = queues.create()
queue1.put(queue4, syncobj=True)
out = _run_output(interp, dedent("""
queue4 = queue1.get()
print(queue4.id)
"""))
qid = int(out)
self.assertEqual(qid, queue4.id)
with self.subTest('from subinterpreter'):
out = _run_output(interp, dedent("""
queue5 = queues.create()
queue1.put(queue5, syncobj=True)
print(queue5.id)
"""))
qid = int(out)
queue5 = queue1.get()
self.assertEqual(queue5.id, qid)
def test_id_type(self):
queue = queues.create()
self.assertIsInstance(queue.id, int)
def test_custom_id(self):
with self.assertRaises(queues.QueueNotFoundError):
queues.Queue(1_000_000)
def test_id_readonly(self):
queue = queues.create()
with self.assertRaises(AttributeError):
queue.id = 1_000_000
def test_maxsize_readonly(self):
queue = queues.create(10)
with self.assertRaises(AttributeError):
queue.maxsize = 1_000_000
def test_hashable(self):
queue = queues.create()
expected = hash(queue.id)
actual = hash(queue)
self.assertEqual(actual, expected)
def test_equality(self):
queue1 = queues.create()
queue2 = queues.create()
self.assertEqual(queue1, queue1)
self.assertNotEqual(queue1, queue2)
def test_pickle(self):
queue = queues.create()
data = pickle.dumps(queue)
unpickled = pickle.loads(data)
self.assertEqual(unpickled, queue)
class TestQueueOps(TestBase):
def test_empty(self):
queue = queues.create()
before = queue.empty()
queue.put(None, syncobj=True)
during = queue.empty()
queue.get()
after = queue.empty()
self.assertIs(before, True)
self.assertIs(during, False)
self.assertIs(after, True)
def test_full(self):
expected = [False, False, False, True, False, False, False]
actual = []
queue = queues.create(3)
for _ in range(3):
actual.append(queue.full())
queue.put(None, syncobj=True)
actual.append(queue.full())
for _ in range(3):
queue.get()
actual.append(queue.full())
self.assertEqual(actual, expected)
def test_qsize(self):
expected = [0, 1, 2, 3, 2, 3, 2, 1, 0, 1, 0]
actual = []
queue = queues.create()
for _ in range(3):
actual.append(queue.qsize())
queue.put(None, syncobj=True)
actual.append(queue.qsize())
queue.get()
actual.append(queue.qsize())
queue.put(None, syncobj=True)
actual.append(queue.qsize())
for _ in range(3):
queue.get()
actual.append(queue.qsize())
queue.put(None, syncobj=True)
actual.append(queue.qsize())
queue.get()
actual.append(queue.qsize())
self.assertEqual(actual, expected)
def test_put_get_main(self):
expected = list(range(20))
for syncobj in (True, False):
kwds = dict(syncobj=syncobj)
with self.subTest(f'syncobj={syncobj}'):
queue = queues.create()
for i in range(20):
queue.put(i, **kwds)
actual = [queue.get() for _ in range(20)]
self.assertEqual(actual, expected)
def test_put_timeout(self):
for syncobj in (True, False):
kwds = dict(syncobj=syncobj)
with self.subTest(f'syncobj={syncobj}'):
queue = queues.create(2)
queue.put(None, **kwds)
queue.put(None, **kwds)
with self.assertRaises(queues.QueueFull):
queue.put(None, timeout=0.1, **kwds)
queue.get()
queue.put(None, **kwds)
def test_put_nowait(self):
for syncobj in (True, False):
kwds = dict(syncobj=syncobj)
with self.subTest(f'syncobj={syncobj}'):
queue = queues.create(2)
queue.put_nowait(None, **kwds)
queue.put_nowait(None, **kwds)
with self.assertRaises(queues.QueueFull):
queue.put_nowait(None, **kwds)
queue.get()
queue.put_nowait(None, **kwds)
def test_put_syncobj(self):
for obj in [
None,
True,
10,
'spam',
b'spam',
(0, 'a'),
]:
with self.subTest(repr(obj)):
queue = queues.create()
queue.put(obj, syncobj=True)
obj2 = queue.get()
self.assertEqual(obj2, obj)
queue.put(obj, syncobj=True)
obj2 = queue.get_nowait()
self.assertEqual(obj2, obj)
for obj in [
[1, 2, 3],
{'a': 13, 'b': 17},
]:
with self.subTest(repr(obj)):
queue = queues.create()
with self.assertRaises(interpreters.NotShareableError):
queue.put(obj, syncobj=True)
def test_put_not_syncobj(self):
for obj in [
None,
True,
10,
'spam',
b'spam',
(0, 'a'),
# not shareable
[1, 2, 3],
{'a': 13, 'b': 17},
]:
with self.subTest(repr(obj)):
queue = queues.create()
queue.put(obj, syncobj=False)
obj2 = queue.get()
self.assertEqual(obj2, obj)
queue.put(obj, syncobj=False)
obj2 = queue.get_nowait()
self.assertEqual(obj2, obj)
def test_get_timeout(self):
queue = queues.create()
with self.assertRaises(queues.QueueEmpty):
queue.get(timeout=0.1)
def test_get_nowait(self):
queue = queues.create()
with self.assertRaises(queues.QueueEmpty):
queue.get_nowait()
def test_put_get_default_syncobj(self):
expected = list(range(20))
queue = queues.create(syncobj=True)
for methname in ('get', 'get_nowait'):
with self.subTest(f'{methname}()'):
get = getattr(queue, methname)
for i in range(20):
queue.put(i)
actual = [get() for _ in range(20)]
self.assertEqual(actual, expected)
obj = [1, 2, 3] # lists are not shareable
with self.assertRaises(interpreters.NotShareableError):
queue.put(obj)
def test_put_get_default_not_syncobj(self):
expected = list(range(20))
queue = queues.create(syncobj=False)
for methname in ('get', 'get_nowait'):
with self.subTest(f'{methname}()'):
get = getattr(queue, methname)
for i in range(20):
queue.put(i)
actual = [get() for _ in range(20)]
self.assertEqual(actual, expected)
obj = [1, 2, 3] # lists are not shareable
queue.put(obj)
obj2 = get()
self.assertEqual(obj, obj2)
self.assertIsNot(obj, obj2)
def test_put_get_same_interpreter(self):
interp = interpreters.create()
interp.exec(dedent("""
from test.support.interpreters import queues
queue = queues.create()
"""))
for methname in ('get', 'get_nowait'):
with self.subTest(f'{methname}()'):
interp.exec(dedent(f"""
orig = b'spam'
queue.put(orig, syncobj=True)
obj = queue.{methname}()
assert obj == orig, 'expected: obj == orig'
assert obj is not orig, 'expected: obj is not orig'
"""))
def test_put_get_different_interpreters(self):
interp = interpreters.create()
queue1 = queues.create()
queue2 = queues.create()
self.assertEqual(len(queues.list_all()), 2)
for methname in ('get', 'get_nowait'):
with self.subTest(f'{methname}()'):
obj1 = b'spam'
queue1.put(obj1, syncobj=True)
out = _run_output(
interp,
dedent(f"""
from test.support.interpreters import queues
queue1 = queues.Queue({queue1.id})
queue2 = queues.Queue({queue2.id})
assert queue1.qsize() == 1, 'expected: queue1.qsize() == 1'
obj = queue1.{methname}()
assert queue1.qsize() == 0, 'expected: queue1.qsize() == 0'
assert obj == b'spam', 'expected: obj == obj1'
# When going to another interpreter we get a copy.
assert id(obj) != {id(obj1)}, 'expected: obj is not obj1'
obj2 = b'eggs'
print(id(obj2))
assert queue2.qsize() == 0, 'expected: queue2.qsize() == 0'
queue2.put(obj2, syncobj=True)
assert queue2.qsize() == 1, 'expected: queue2.qsize() == 1'
"""))
self.assertEqual(len(queues.list_all()), 2)
self.assertEqual(queue1.qsize(), 0)
self.assertEqual(queue2.qsize(), 1)
get = getattr(queue2, methname)
obj2 = get()
self.assertEqual(obj2, b'eggs')
self.assertNotEqual(id(obj2), int(out))
def test_put_cleared_with_subinterpreter(self):
def common(queue, unbound=None, presize=0):
if not unbound:
extraargs = ''
elif unbound is queues.UNBOUND:
extraargs = ', unbound=queues.UNBOUND'
elif unbound is queues.UNBOUND_ERROR:
extraargs = ', unbound=queues.UNBOUND_ERROR'
elif unbound is queues.UNBOUND_REMOVE:
extraargs = ', unbound=queues.UNBOUND_REMOVE'
else:
raise NotImplementedError(repr(unbound))
interp = interpreters.create()
_run_output(interp, dedent(f"""
from test.support.interpreters import queues
queue = queues.Queue({queue.id})
obj1 = b'spam'
obj2 = b'eggs'
queue.put(obj1, syncobj=True{extraargs})
queue.put(obj2, syncobj=True{extraargs})
"""))
self.assertEqual(queue.qsize(), presize + 2)
if presize == 0:
obj1 = queue.get()
self.assertEqual(obj1, b'spam')
self.assertEqual(queue.qsize(), presize + 1)
return interp
with self.subTest('default'): # UNBOUND
queue = queues.create()
interp = common(queue)
del interp
obj1 = queue.get()
self.assertIs(obj1, queues.UNBOUND)
self.assertEqual(queue.qsize(), 0)
with self.assertRaises(queues.QueueEmpty):
queue.get_nowait()
with self.subTest('UNBOUND'):
queue = queues.create()
interp = common(queue, queues.UNBOUND)
del interp
obj1 = queue.get()
self.assertIs(obj1, queues.UNBOUND)
self.assertEqual(queue.qsize(), 0)
with self.assertRaises(queues.QueueEmpty):
queue.get_nowait()
with self.subTest('UNBOUND_ERROR'):
queue = queues.create()
interp = common(queue, queues.UNBOUND_ERROR)
del interp
self.assertEqual(queue.qsize(), 1)
with self.assertRaises(queues.ItemInterpreterDestroyed):
queue.get()
self.assertEqual(queue.qsize(), 0)
with self.assertRaises(queues.QueueEmpty):
queue.get_nowait()
with self.subTest('UNBOUND_REMOVE'):
queue = queues.create()
interp = common(queue, queues.UNBOUND_REMOVE)
del interp
self.assertEqual(queue.qsize(), 0)
with self.assertRaises(queues.QueueEmpty):
queue.get_nowait()
queue.put(b'ham', unbound=queues.UNBOUND_REMOVE)
self.assertEqual(queue.qsize(), 1)
interp = common(queue, queues.UNBOUND_REMOVE, 1)
self.assertEqual(queue.qsize(), 3)
queue.put(42, unbound=queues.UNBOUND_REMOVE)
self.assertEqual(queue.qsize(), 4)
del interp
self.assertEqual(queue.qsize(), 2)
obj1 = queue.get()
obj2 = queue.get()
self.assertEqual(obj1, b'ham')
self.assertEqual(obj2, 42)
self.assertEqual(queue.qsize(), 0)
with self.assertRaises(queues.QueueEmpty):
queue.get_nowait()
def test_put_cleared_with_subinterpreter_mixed(self):
queue = queues.create()
interp = interpreters.create()
_run_output(interp, dedent(f"""
from test.support.interpreters import queues
queue = queues.Queue({queue.id})
queue.put(1, syncobj=True, unbound=queues.UNBOUND)
queue.put(2, syncobj=True, unbound=queues.UNBOUND_ERROR)
queue.put(3, syncobj=True)
queue.put(4, syncobj=True, unbound=queues.UNBOUND_REMOVE)
queue.put(5, syncobj=True, unbound=queues.UNBOUND)
"""))
self.assertEqual(queue.qsize(), 5)
del interp
self.assertEqual(queue.qsize(), 4)
obj1 = queue.get()
self.assertIs(obj1, queues.UNBOUND)
self.assertEqual(queue.qsize(), 3)
with self.assertRaises(queues.ItemInterpreterDestroyed):
queue.get()
self.assertEqual(queue.qsize(), 2)
obj2 = queue.get()
self.assertIs(obj2, queues.UNBOUND)
self.assertEqual(queue.qsize(), 1)
obj3 = queue.get()
self.assertIs(obj3, queues.UNBOUND)
self.assertEqual(queue.qsize(), 0)
def test_put_cleared_with_subinterpreter_multiple(self):
queue = queues.create()
interp1 = interpreters.create()
interp2 = interpreters.create()
queue.put(1, syncobj=True)
_run_output(interp1, dedent(f"""
from test.support.interpreters import queues
queue = queues.Queue({queue.id})
obj1 = queue.get()
queue.put(2, syncobj=True, unbound=queues.UNBOUND)
queue.put(obj1, syncobj=True, unbound=queues.UNBOUND_REMOVE)
"""))
_run_output(interp2, dedent(f"""
from test.support.interpreters import queues
queue = queues.Queue({queue.id})
obj2 = queue.get()
obj1 = queue.get()
"""))
self.assertEqual(queue.qsize(), 0)
queue.put(3)
_run_output(interp1, dedent("""
queue.put(4, syncobj=True, unbound=queues.UNBOUND)
# interp closed here
queue.put(5, syncobj=True, unbound=queues.UNBOUND_REMOVE)
queue.put(6, syncobj=True, unbound=queues.UNBOUND)
"""))
_run_output(interp2, dedent("""
queue.put(7, syncobj=True, unbound=queues.UNBOUND_ERROR)
# interp closed here
queue.put(obj1, syncobj=True, unbound=queues.UNBOUND_ERROR)
queue.put(obj2, syncobj=True, unbound=queues.UNBOUND_REMOVE)
queue.put(8, syncobj=True, unbound=queues.UNBOUND)
"""))
_run_output(interp1, dedent("""
queue.put(9, syncobj=True, unbound=queues.UNBOUND_REMOVE)
queue.put(10, syncobj=True, unbound=queues.UNBOUND)
"""))
self.assertEqual(queue.qsize(), 10)
obj3 = queue.get()
self.assertEqual(obj3, 3)
self.assertEqual(queue.qsize(), 9)
obj4 = queue.get()
self.assertEqual(obj4, 4)
self.assertEqual(queue.qsize(), 8)
del interp1
self.assertEqual(queue.qsize(), 6)
# obj5 was removed
obj6 = queue.get()
self.assertIs(obj6, queues.UNBOUND)
self.assertEqual(queue.qsize(), 5)
obj7 = queue.get()
self.assertEqual(obj7, 7)
self.assertEqual(queue.qsize(), 4)
del interp2
self.assertEqual(queue.qsize(), 3)
# obj1
with self.assertRaises(queues.ItemInterpreterDestroyed):
queue.get()
self.assertEqual(queue.qsize(), 2)
# obj2 was removed
obj8 = queue.get()
self.assertIs(obj8, queues.UNBOUND)
self.assertEqual(queue.qsize(), 1)
# obj9 was removed
obj10 = queue.get()
self.assertIs(obj10, queues.UNBOUND)
self.assertEqual(queue.qsize(), 0)
def test_put_get_different_threads(self):
queue1 = queues.create()
queue2 = queues.create()
def f():
while True:
try:
obj = queue1.get(timeout=0.1)
break
except queues.QueueEmpty:
continue
queue2.put(obj, syncobj=True)
t = threading.Thread(target=f)
t.start()
orig = b'spam'
queue1.put(orig, syncobj=True)
obj = queue2.get()
t.join()
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()

View File

@ -0,0 +1,68 @@
import threading
import unittest
from test import support
from test.support import import_helper
from test.support import threading_helper
# Raise SkipTest if subinterpreters not supported.
import_helper.import_module('_interpreters')
from test.support import interpreters
from .utils import TestBase
class StressTests(TestBase):
# In these tests we generally want a lot of interpreters,
# but not so many that any test takes too long.
@support.requires_resource('cpu')
def test_create_many_sequential(self):
alive = []
for _ in range(100):
interp = interpreters.create()
alive.append(interp)
@support.requires_resource('cpu')
@threading_helper.requires_working_threading()
def test_create_many_threaded(self):
alive = []
def task():
interp = interpreters.create()
alive.append(interp)
threads = (threading.Thread(target=task) for _ in range(200))
with threading_helper.start_threads(threads):
pass
@support.requires_resource('cpu')
@threading_helper.requires_working_threading()
def test_many_threads_running_interp_in_other_interp(self):
interp = interpreters.create()
script = f"""if True:
import _interpreters
_interpreters.run_string({interp.id}, '1')
"""
def run():
interp = interpreters.create()
alreadyrunning = (f'{interpreters.InterpreterError}: '
'interpreter already running')
success = False
while not success:
try:
interp.exec(script)
except interpreters.ExecutionFailed as exc:
if exc.excinfo.msg != 'interpreter already running':
raise # re-raise
assert exc.excinfo.type.__name__ == 'InterpreterError'
else:
success = True
threads = (threading.Thread(target=run) for _ in range(200))
with threading_helper.start_threads(threads):
pass
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()

View File

@ -0,0 +1,690 @@
from collections import namedtuple
import contextlib
import json
import io
import logging
import os
import os.path
import pickle
import queue
#import select
import subprocess
import sys
import tempfile
from textwrap import dedent, indent
import threading
import types
import unittest
import warnings
from test import support
# We would use test.support.import_helper.import_module(),
# but the indirect import of test.support.os_helper causes refleaks.
try:
import _interpreters
except ImportError as exc:
raise unittest.SkipTest(str(exc))
from test.support import interpreters
try:
import _testinternalcapi
import _testcapi
except ImportError:
_testinternalcapi = None
_testcapi = None
def requires_test_modules(func):
return unittest.skipIf(_testinternalcapi is None, "test requires _testinternalcapi module")(func)
def _dump_script(text):
lines = text.splitlines()
print()
print('-' * 20)
for i, line in enumerate(lines, 1):
print(f' {i:>{len(str(len(lines)))}} {line}')
print('-' * 20)
def _close_file(file):
try:
if hasattr(file, 'close'):
file.close()
else:
os.close(file)
except OSError as exc:
if exc.errno != 9:
raise # re-raise
# It was closed already.
def pack_exception(exc=None):
captured = _interpreters.capture_exception(exc)
data = dict(captured.__dict__)
data['type'] = dict(captured.type.__dict__)
return json.dumps(data)
def unpack_exception(packed):
try:
data = json.loads(packed)
except json.decoder.JSONDecodeError as e:
logging.getLogger(__name__).warning('incomplete exception data', exc_info=e)
print(packed if isinstance(packed, str) else packed.decode('utf-8'))
return None
exc = types.SimpleNamespace(**data)
exc.type = types.SimpleNamespace(**exc.type)
return exc;
class CapturingResults:
STDIO = dedent("""\
with open({w_pipe}, 'wb', buffering=0) as _spipe_{stream}:
_captured_std{stream} = io.StringIO()
with contextlib.redirect_std{stream}(_captured_std{stream}):
#########################
# begin wrapped script
{indented}
# end wrapped script
#########################
text = _captured_std{stream}.getvalue()
_spipe_{stream}.write(text.encode('utf-8'))
""")[:-1]
EXC = dedent("""\
with open({w_pipe}, 'wb', buffering=0) as _spipe_exc:
try:
#########################
# begin wrapped script
{indented}
# end wrapped script
#########################
except Exception as exc:
text = _interp_utils.pack_exception(exc)
_spipe_exc.write(text.encode('utf-8'))
""")[:-1]
@classmethod
def wrap_script(cls, script, *, stdout=True, stderr=False, exc=False):
script = dedent(script).strip(os.linesep)
imports = [
f'import {__name__} as _interp_utils',
]
wrapped = script
# Handle exc.
if exc:
exc = os.pipe()
r_exc, w_exc = exc
indented = wrapped.replace('\n', '\n ')
wrapped = cls.EXC.format(
w_pipe=w_exc,
indented=indented,
)
else:
exc = None
# Handle stdout.
if stdout:
imports.extend([
'import contextlib, io',
])
stdout = os.pipe()
r_out, w_out = stdout
indented = wrapped.replace('\n', '\n ')
wrapped = cls.STDIO.format(
w_pipe=w_out,
indented=indented,
stream='out',
)
else:
stdout = None
# Handle stderr.
if stderr == 'stdout':
stderr = None
elif stderr:
if not stdout:
imports.extend([
'import contextlib, io',
])
stderr = os.pipe()
r_err, w_err = stderr
indented = wrapped.replace('\n', '\n ')
wrapped = cls.STDIO.format(
w_pipe=w_err,
indented=indented,
stream='err',
)
else:
stderr = None
if wrapped == script:
raise NotImplementedError
else:
for line in imports:
wrapped = f'{line}{os.linesep}{wrapped}'
results = cls(stdout, stderr, exc)
return wrapped, results
def __init__(self, out, err, exc):
self._rf_out = None
self._rf_err = None
self._rf_exc = None
self._w_out = None
self._w_err = None
self._w_exc = None
if out is not None:
r_out, w_out = out
self._rf_out = open(r_out, 'rb', buffering=0)
self._w_out = w_out
if err is not None:
r_err, w_err = err
self._rf_err = open(r_err, 'rb', buffering=0)
self._w_err = w_err
if exc is not None:
r_exc, w_exc = exc
self._rf_exc = open(r_exc, 'rb', buffering=0)
self._w_exc = w_exc
self._buf_out = b''
self._buf_err = b''
self._buf_exc = b''
self._exc = None
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
@property
def closed(self):
return self._closed
def close(self):
if self._closed:
return
self._closed = True
if self._w_out is not None:
_close_file(self._w_out)
self._w_out = None
if self._w_err is not None:
_close_file(self._w_err)
self._w_err = None
if self._w_exc is not None:
_close_file(self._w_exc)
self._w_exc = None
self._capture()
if self._rf_out is not None:
_close_file(self._rf_out)
self._rf_out = None
if self._rf_err is not None:
_close_file(self._rf_err)
self._rf_err = None
if self._rf_exc is not None:
_close_file(self._rf_exc)
self._rf_exc = None
def _capture(self):
# Ideally this is called only after the script finishes
# (and thus has closed the write end of the pipe.
if self._rf_out is not None:
chunk = self._rf_out.read(100)
while chunk:
self._buf_out += chunk
chunk = self._rf_out.read(100)
if self._rf_err is not None:
chunk = self._rf_err.read(100)
while chunk:
self._buf_err += chunk
chunk = self._rf_err.read(100)
if self._rf_exc is not None:
chunk = self._rf_exc.read(100)
while chunk:
self._buf_exc += chunk
chunk = self._rf_exc.read(100)
def _unpack_stdout(self):
return self._buf_out.decode('utf-8')
def _unpack_stderr(self):
return self._buf_err.decode('utf-8')
def _unpack_exc(self):
if self._exc is not None:
return self._exc
if not self._buf_exc:
return None
self._exc = unpack_exception(self._buf_exc)
return self._exc
def stdout(self):
if self.closed:
return self.final().stdout
self._capture()
return self._unpack_stdout()
def stderr(self):
if self.closed:
return self.final().stderr
self._capture()
return self._unpack_stderr()
def exc(self):
if self.closed:
return self.final().exc
self._capture()
return self._unpack_exc()
def final(self, *, force=False):
try:
return self._final
except AttributeError:
if not self._closed:
if not force:
raise Exception('no final results available yet')
else:
return CapturedResults.Proxy(self)
self._final = CapturedResults(
self._unpack_stdout(),
self._unpack_stderr(),
self._unpack_exc(),
)
return self._final
class CapturedResults(namedtuple('CapturedResults', 'stdout stderr exc')):
class Proxy:
def __init__(self, capturing):
self._capturing = capturing
def _finish(self):
if self._capturing is None:
return
self._final = self._capturing.final()
self._capturing = None
def __iter__(self):
self._finish()
yield from self._final
def __len__(self):
self._finish()
return len(self._final)
def __getattr__(self, name):
self._finish()
if name.startswith('_'):
raise AttributeError(name)
return getattr(self._final, name)
def raise_if_failed(self):
if self.exc is not None:
raise interpreters.ExecutionFailed(self.exc)
def _captured_script(script, *, stdout=True, stderr=False, exc=False):
return CapturingResults.wrap_script(
script,
stdout=stdout,
stderr=stderr,
exc=exc,
)
def clean_up_interpreters():
for interp in interpreters.list_all():
if interp.id == 0: # main
continue
try:
interp.close()
except _interpreters.InterpreterError:
pass # already destroyed
def _run_output(interp, request, init=None):
script, results = _captured_script(request)
with results:
if init:
interp.prepare_main(init)
interp.exec(script)
return results.stdout()
@contextlib.contextmanager
def _running(interp):
r, w = os.pipe()
def run():
interp.exec(dedent(f"""
# wait for "signal"
with open({r}) as rpipe:
rpipe.read()
"""))
t = threading.Thread(target=run)
t.start()
yield
with open(w, 'w') as spipe:
spipe.write('done')
t.join()
class TestBase(unittest.TestCase):
def tearDown(self):
clean_up_interpreters()
def pipe(self):
def ensure_closed(fd):
try:
os.close(fd)
except OSError:
pass
r, w = os.pipe()
self.addCleanup(lambda: ensure_closed(r))
self.addCleanup(lambda: ensure_closed(w))
return r, w
def temp_dir(self):
tempdir = tempfile.mkdtemp()
tempdir = os.path.realpath(tempdir)
from test.support import os_helper
self.addCleanup(lambda: os_helper.rmtree(tempdir))
return tempdir
@contextlib.contextmanager
def captured_thread_exception(self):
ctx = types.SimpleNamespace(caught=None)
def excepthook(args):
ctx.caught = args
orig_excepthook = threading.excepthook
threading.excepthook = excepthook
try:
yield ctx
finally:
threading.excepthook = orig_excepthook
def make_script(self, filename, dirname=None, text=None):
if text:
text = dedent(text)
if dirname is None:
dirname = self.temp_dir()
filename = os.path.join(dirname, filename)
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w', encoding='utf-8') as outfile:
outfile.write(text or '')
return filename
def make_module(self, name, pathentry=None, text=None):
if text:
text = dedent(text)
if pathentry is None:
pathentry = self.temp_dir()
else:
os.makedirs(pathentry, exist_ok=True)
*subnames, basename = name.split('.')
dirname = pathentry
for subname in subnames:
dirname = os.path.join(dirname, subname)
if os.path.isdir(dirname):
pass
elif os.path.exists(dirname):
raise Exception(dirname)
else:
os.mkdir(dirname)
initfile = os.path.join(dirname, '__init__.py')
if not os.path.exists(initfile):
with open(initfile, 'w'):
pass
filename = os.path.join(dirname, basename + '.py')
with open(filename, 'w', encoding='utf-8') as outfile:
outfile.write(text or '')
return filename
@support.requires_subprocess()
def run_python(self, *argv):
proc = subprocess.run(
[sys.executable, *argv],
capture_output=True,
text=True,
)
return proc.returncode, proc.stdout, proc.stderr
def assert_python_ok(self, *argv):
exitcode, stdout, stderr = self.run_python(*argv)
self.assertNotEqual(exitcode, 1)
return stdout, stderr
def assert_python_failure(self, *argv):
exitcode, stdout, stderr = self.run_python(*argv)
self.assertNotEqual(exitcode, 0)
return stdout, stderr
def assert_ns_equal(self, ns1, ns2, msg=None):
# This is mostly copied from TestCase.assertDictEqual.
self.assertEqual(type(ns1), type(ns2))
if ns1 == ns2:
return
import difflib
import pprint
from unittest.util import _common_shorten_repr
standardMsg = '%s != %s' % _common_shorten_repr(ns1, ns2)
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(vars(ns1)).splitlines(),
pprint.pformat(vars(ns2)).splitlines())))
diff = f'namespace({diff})'
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def _run_string(self, interp, script):
wrapped, results = _captured_script(script, exc=False)
#_dump_script(wrapped)
with results:
if isinstance(interp, interpreters.Interpreter):
interp.exec(script)
else:
err = _interpreters.run_string(interp, wrapped)
if err is not None:
return None, err
return results.stdout(), None
def run_and_capture(self, interp, script):
text, err = self._run_string(interp, script)
if err is not None:
raise interpreters.ExecutionFailed(err)
else:
return text
def interp_exists(self, interpid):
try:
_interpreters.whence(interpid)
except _interpreters.InterpreterNotFoundError:
return False
else:
return True
@requires_test_modules
@contextlib.contextmanager
def interpreter_from_capi(self, config=None, whence=None):
if config is False:
if whence is None:
whence = _interpreters.WHENCE_LEGACY_CAPI
else:
assert whence in (_interpreters.WHENCE_LEGACY_CAPI,
_interpreters.WHENCE_UNKNOWN), repr(whence)
config = None
elif config is True:
config = _interpreters.new_config('default')
elif config is None:
if whence not in (
_interpreters.WHENCE_LEGACY_CAPI,
_interpreters.WHENCE_UNKNOWN,
):
config = _interpreters.new_config('legacy')
elif isinstance(config, str):
config = _interpreters.new_config(config)
if whence is None:
whence = _interpreters.WHENCE_XI
interpid = _testinternalcapi.create_interpreter(config, whence=whence)
try:
yield interpid
finally:
try:
_testinternalcapi.destroy_interpreter(interpid)
except _interpreters.InterpreterNotFoundError:
pass
@contextlib.contextmanager
def interpreter_obj_from_capi(self, config='legacy'):
with self.interpreter_from_capi(config) as interpid:
interp = interpreters.Interpreter(
interpid,
_whence=_interpreters.WHENCE_CAPI,
_ownsref=False,
)
yield interp, interpid
@contextlib.contextmanager
def capturing(self, script):
wrapped, capturing = _captured_script(script, stdout=True, exc=True)
#_dump_script(wrapped)
with capturing:
yield wrapped, capturing.final(force=True)
@requires_test_modules
def run_from_capi(self, interpid, script, *, main=False):
with self.capturing(script) as (wrapped, results):
rc = _testinternalcapi.exec_interpreter(interpid, wrapped, main=main)
assert rc == 0, rc
results.raise_if_failed()
return results.stdout
@contextlib.contextmanager
def _running(self, run_interp, exec_interp):
token = b'\0'
r_in, w_in = self.pipe()
r_out, w_out = self.pipe()
def close():
_close_file(r_in)
_close_file(w_in)
_close_file(r_out)
_close_file(w_out)
# Start running (and wait).
script = dedent(f"""
import os
try:
# handshake
token = os.read({r_in}, 1)
os.write({w_out}, token)
# Wait for the "done" message.
os.read({r_in}, 1)
except BrokenPipeError:
pass
except OSError as exc:
if exc.errno != 9:
raise # re-raise
# It was closed already.
""")
failed = None
def run():
nonlocal failed
try:
run_interp(script)
except Exception as exc:
failed = exc
close()
t = threading.Thread(target=run)
t.start()
# handshake
try:
os.write(w_in, token)
token2 = os.read(r_out, 1)
assert token2 == token, (token2, token)
except OSError:
t.join()
if failed is not None:
raise failed
# CM __exit__()
try:
try:
yield
finally:
# Send "done".
os.write(w_in, b'\0')
finally:
close()
t.join()
if failed is not None:
raise failed
@contextlib.contextmanager
def running(self, interp):
if isinstance(interp, int):
interpid = interp
def exec_interp(script):
exc = _interpreters.exec(interpid, script)
assert exc is None, exc
run_interp = exec_interp
else:
def run_interp(script):
text = self.run_and_capture(interp, script)
assert text == '', repr(text)
def exec_interp(script):
interp.exec(script)
with self._running(run_interp, exec_interp):
yield
@requires_test_modules
@contextlib.contextmanager
def running_from_capi(self, interpid, *, main=False):
def run_interp(script):
text = self.run_from_capi(interpid, script, main=main)
assert text == '', repr(text)
def exec_interp(script):
rc = _testinternalcapi.exec_interpreter(interpid, script)
assert rc == 0, rc
with self._running(run_interp, exec_interp):
yield
@requires_test_modules
def run_temp_from_capi(self, script, config='legacy'):
if config is False:
# Force using Py_NewInterpreter().
run_in_interp = (lambda s, c: _testcapi.run_in_subinterp(s))
config = None
else:
run_in_interp = _testinternalcapi.run_in_subinterp_with_config
if config is True:
config = 'default'
if isinstance(config, str):
config = _interpreters.new_config(config)
with self.capturing(script) as (wrapped, results):
rc = run_in_interp(wrapped, config)
assert rc == 0, rc
results.raise_if_failed()
return results.stdout