mirror of
https://github.com/bunny-lab-io/Borealis.git
synced 2025-10-27 11:01:57 -06:00
Removed the Requirement to Install Python and NodeJS (Now Bundled with Borealis)
This commit is contained in:
47
Dependencies/Python/Lib/asyncio/__init__.py
vendored
Normal file
47
Dependencies/Python/Lib/asyncio/__init__.py
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
"""The asyncio package, tracking PEP 3156."""
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
import sys
|
||||
|
||||
# This relies on each of the submodules having an __all__ variable.
|
||||
from .base_events import *
|
||||
from .coroutines import *
|
||||
from .events import *
|
||||
from .exceptions import *
|
||||
from .futures import *
|
||||
from .locks import *
|
||||
from .protocols import *
|
||||
from .runners import *
|
||||
from .queues import *
|
||||
from .streams import *
|
||||
from .subprocess import *
|
||||
from .tasks import *
|
||||
from .taskgroups import *
|
||||
from .timeouts import *
|
||||
from .threads import *
|
||||
from .transports import *
|
||||
|
||||
__all__ = (base_events.__all__ +
|
||||
coroutines.__all__ +
|
||||
events.__all__ +
|
||||
exceptions.__all__ +
|
||||
futures.__all__ +
|
||||
locks.__all__ +
|
||||
protocols.__all__ +
|
||||
runners.__all__ +
|
||||
queues.__all__ +
|
||||
streams.__all__ +
|
||||
subprocess.__all__ +
|
||||
tasks.__all__ +
|
||||
taskgroups.__all__ +
|
||||
threads.__all__ +
|
||||
timeouts.__all__ +
|
||||
transports.__all__)
|
||||
|
||||
if sys.platform == 'win32': # pragma: no cover
|
||||
from .windows_events import *
|
||||
__all__ += windows_events.__all__
|
||||
else:
|
||||
from .unix_events import * # pragma: no cover
|
||||
__all__ += unix_events.__all__
|
||||
204
Dependencies/Python/Lib/asyncio/__main__.py
vendored
Normal file
204
Dependencies/Python/Lib/asyncio/__main__.py
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
import ast
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import contextvars
|
||||
import inspect
|
||||
import os
|
||||
import site
|
||||
import sys
|
||||
import threading
|
||||
import types
|
||||
import warnings
|
||||
|
||||
from _colorize import can_colorize, ANSIColors # type: ignore[import-not-found]
|
||||
from _pyrepl.console import InteractiveColoredConsole
|
||||
|
||||
from . import futures
|
||||
|
||||
|
||||
class AsyncIOInteractiveConsole(InteractiveColoredConsole):
|
||||
|
||||
def __init__(self, locals, loop):
|
||||
super().__init__(locals, filename="<stdin>")
|
||||
self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
|
||||
|
||||
self.loop = loop
|
||||
self.context = contextvars.copy_context()
|
||||
|
||||
def runcode(self, code):
|
||||
global return_code
|
||||
future = concurrent.futures.Future()
|
||||
|
||||
def callback():
|
||||
global return_code
|
||||
global repl_future
|
||||
global keyboard_interrupted
|
||||
|
||||
repl_future = None
|
||||
keyboard_interrupted = False
|
||||
|
||||
func = types.FunctionType(code, self.locals)
|
||||
try:
|
||||
coro = func()
|
||||
except SystemExit as se:
|
||||
return_code = se.code
|
||||
self.loop.stop()
|
||||
return
|
||||
except KeyboardInterrupt as ex:
|
||||
keyboard_interrupted = True
|
||||
future.set_exception(ex)
|
||||
return
|
||||
except BaseException as ex:
|
||||
future.set_exception(ex)
|
||||
return
|
||||
|
||||
if not inspect.iscoroutine(coro):
|
||||
future.set_result(coro)
|
||||
return
|
||||
|
||||
try:
|
||||
repl_future = self.loop.create_task(coro, context=self.context)
|
||||
futures._chain_future(repl_future, future)
|
||||
except BaseException as exc:
|
||||
future.set_exception(exc)
|
||||
|
||||
loop.call_soon_threadsafe(callback, context=self.context)
|
||||
|
||||
try:
|
||||
return future.result()
|
||||
except SystemExit as se:
|
||||
return_code = se.code
|
||||
self.loop.stop()
|
||||
return
|
||||
except BaseException:
|
||||
if keyboard_interrupted:
|
||||
self.write("\nKeyboardInterrupt\n")
|
||||
else:
|
||||
self.showtraceback()
|
||||
return self.STATEMENT_FAILED
|
||||
|
||||
class REPLThread(threading.Thread):
|
||||
|
||||
def run(self):
|
||||
global return_code
|
||||
|
||||
try:
|
||||
banner = (
|
||||
f'asyncio REPL {sys.version} on {sys.platform}\n'
|
||||
f'Use "await" directly instead of "asyncio.run()".\n'
|
||||
f'Type "help", "copyright", "credits" or "license" '
|
||||
f'for more information.\n'
|
||||
)
|
||||
|
||||
console.write(banner)
|
||||
|
||||
if startup_path := os.getenv("PYTHONSTARTUP"):
|
||||
sys.audit("cpython.run_startup", startup_path)
|
||||
|
||||
import tokenize
|
||||
with tokenize.open(startup_path) as f:
|
||||
startup_code = compile(f.read(), startup_path, "exec")
|
||||
exec(startup_code, console.locals)
|
||||
|
||||
ps1 = getattr(sys, "ps1", ">>> ")
|
||||
if can_colorize() and CAN_USE_PYREPL:
|
||||
ps1 = f"{ANSIColors.BOLD_MAGENTA}{ps1}{ANSIColors.RESET}"
|
||||
console.write(f"{ps1}import asyncio\n")
|
||||
|
||||
if CAN_USE_PYREPL:
|
||||
from _pyrepl.simple_interact import (
|
||||
run_multiline_interactive_console,
|
||||
)
|
||||
try:
|
||||
run_multiline_interactive_console(console)
|
||||
except SystemExit:
|
||||
# expected via the `exit` and `quit` commands
|
||||
pass
|
||||
except BaseException:
|
||||
# unexpected issue
|
||||
console.showtraceback()
|
||||
console.write("Internal error, ")
|
||||
return_code = 1
|
||||
else:
|
||||
console.interact(banner="", exitmsg="")
|
||||
finally:
|
||||
warnings.filterwarnings(
|
||||
'ignore',
|
||||
message=r'^coroutine .* was never awaited$',
|
||||
category=RuntimeWarning)
|
||||
|
||||
loop.call_soon_threadsafe(loop.stop)
|
||||
|
||||
def interrupt(self) -> None:
|
||||
if not CAN_USE_PYREPL:
|
||||
return
|
||||
|
||||
from _pyrepl.simple_interact import _get_reader
|
||||
r = _get_reader()
|
||||
if r.threading_hook is not None:
|
||||
r.threading_hook.add("") # type: ignore
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.audit("cpython.run_stdin")
|
||||
|
||||
if os.getenv('PYTHON_BASIC_REPL'):
|
||||
CAN_USE_PYREPL = False
|
||||
else:
|
||||
from _pyrepl.main import CAN_USE_PYREPL
|
||||
|
||||
return_code = 0
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
repl_locals = {'asyncio': asyncio}
|
||||
for key in {'__name__', '__package__',
|
||||
'__loader__', '__spec__',
|
||||
'__builtins__', '__file__'}:
|
||||
repl_locals[key] = locals()[key]
|
||||
|
||||
console = AsyncIOInteractiveConsole(repl_locals, loop)
|
||||
|
||||
repl_future = None
|
||||
keyboard_interrupted = False
|
||||
|
||||
try:
|
||||
import readline # NoQA
|
||||
except ImportError:
|
||||
readline = None
|
||||
|
||||
interactive_hook = getattr(sys, "__interactivehook__", None)
|
||||
|
||||
if interactive_hook is not None:
|
||||
sys.audit("cpython.run_interactivehook", interactive_hook)
|
||||
interactive_hook()
|
||||
|
||||
if interactive_hook is site.register_readline:
|
||||
# Fix the completer function to use the interactive console locals
|
||||
try:
|
||||
import rlcompleter
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
if readline is not None:
|
||||
completer = rlcompleter.Completer(console.locals)
|
||||
readline.set_completer(completer.complete)
|
||||
|
||||
repl_thread = REPLThread(name="Interactive thread")
|
||||
repl_thread.daemon = True
|
||||
repl_thread.start()
|
||||
|
||||
while True:
|
||||
try:
|
||||
loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
keyboard_interrupted = True
|
||||
if repl_future and not repl_future.done():
|
||||
repl_future.cancel()
|
||||
repl_thread.interrupt()
|
||||
continue
|
||||
else:
|
||||
break
|
||||
|
||||
console.write('exiting asyncio REPL...\n')
|
||||
sys.exit(return_code)
|
||||
BIN
Dependencies/Python/Lib/asyncio/__pycache__/__init__.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/__init__.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/base_events.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/base_events.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/base_futures.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/base_futures.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/base_subprocess.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/base_subprocess.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/base_tasks.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/base_tasks.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/constants.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/constants.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/coroutines.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/coroutines.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/events.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/events.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/exceptions.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/exceptions.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/format_helpers.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/format_helpers.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/futures.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/futures.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/locks.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/locks.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/log.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/log.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/mixins.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/mixins.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/proactor_events.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/proactor_events.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/protocols.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/protocols.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/queues.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/queues.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/runners.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/runners.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/selector_events.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/selector_events.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/sslproto.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/sslproto.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/staggered.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/staggered.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/streams.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/streams.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/subprocess.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/subprocess.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/taskgroups.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/taskgroups.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/tasks.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/tasks.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/threads.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/threads.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/timeouts.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/timeouts.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/transports.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/transports.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/trsock.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/trsock.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/windows_events.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/windows_events.cpython-313.pyc
vendored
Normal file
Binary file not shown.
BIN
Dependencies/Python/Lib/asyncio/__pycache__/windows_utils.cpython-313.pyc
vendored
Normal file
BIN
Dependencies/Python/Lib/asyncio/__pycache__/windows_utils.cpython-313.pyc
vendored
Normal file
Binary file not shown.
2059
Dependencies/Python/Lib/asyncio/base_events.py
vendored
Normal file
2059
Dependencies/Python/Lib/asyncio/base_events.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
67
Dependencies/Python/Lib/asyncio/base_futures.py
vendored
Normal file
67
Dependencies/Python/Lib/asyncio/base_futures.py
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
__all__ = ()
|
||||
|
||||
import reprlib
|
||||
|
||||
from . import format_helpers
|
||||
|
||||
# States for Future.
|
||||
_PENDING = 'PENDING'
|
||||
_CANCELLED = 'CANCELLED'
|
||||
_FINISHED = 'FINISHED'
|
||||
|
||||
|
||||
def isfuture(obj):
|
||||
"""Check for a Future.
|
||||
|
||||
This returns True when obj is a Future instance or is advertising
|
||||
itself as duck-type compatible by setting _asyncio_future_blocking.
|
||||
See comment in Future for more details.
|
||||
"""
|
||||
return (hasattr(obj.__class__, '_asyncio_future_blocking') and
|
||||
obj._asyncio_future_blocking is not None)
|
||||
|
||||
|
||||
def _format_callbacks(cb):
|
||||
"""helper function for Future.__repr__"""
|
||||
size = len(cb)
|
||||
if not size:
|
||||
cb = ''
|
||||
|
||||
def format_cb(callback):
|
||||
return format_helpers._format_callback_source(callback, ())
|
||||
|
||||
if size == 1:
|
||||
cb = format_cb(cb[0][0])
|
||||
elif size == 2:
|
||||
cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0]))
|
||||
elif size > 2:
|
||||
cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]),
|
||||
size - 2,
|
||||
format_cb(cb[-1][0]))
|
||||
return f'cb=[{cb}]'
|
||||
|
||||
|
||||
def _future_repr_info(future):
|
||||
# (Future) -> str
|
||||
"""helper function for Future.__repr__"""
|
||||
info = [future._state.lower()]
|
||||
if future._state == _FINISHED:
|
||||
if future._exception is not None:
|
||||
info.append(f'exception={future._exception!r}')
|
||||
else:
|
||||
# use reprlib to limit the length of the output, especially
|
||||
# for very long strings
|
||||
result = reprlib.repr(future._result)
|
||||
info.append(f'result={result}')
|
||||
if future._callbacks:
|
||||
info.append(_format_callbacks(future._callbacks))
|
||||
if future._source_traceback:
|
||||
frame = future._source_traceback[-1]
|
||||
info.append(f'created at {frame[0]}:{frame[1]}')
|
||||
return info
|
||||
|
||||
|
||||
@reprlib.recursive_repr()
|
||||
def _future_repr(future):
|
||||
info = ' '.join(_future_repr_info(future))
|
||||
return f'<{future.__class__.__name__} {info}>'
|
||||
303
Dependencies/Python/Lib/asyncio/base_subprocess.py
vendored
Normal file
303
Dependencies/Python/Lib/asyncio/base_subprocess.py
vendored
Normal file
@@ -0,0 +1,303 @@
|
||||
import collections
|
||||
import subprocess
|
||||
import warnings
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
|
||||
from . import protocols
|
||||
from . import transports
|
||||
from .log import logger
|
||||
|
||||
|
||||
class BaseSubprocessTransport(transports.SubprocessTransport):
|
||||
|
||||
def __init__(self, loop, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
waiter=None, extra=None, **kwargs):
|
||||
super().__init__(extra)
|
||||
self._closed = False
|
||||
self._protocol = protocol
|
||||
self._loop = loop
|
||||
self._proc = None
|
||||
self._pid = None
|
||||
self._returncode = None
|
||||
self._exit_waiters = []
|
||||
self._pending_calls = collections.deque()
|
||||
self._pipes = {}
|
||||
self._finished = False
|
||||
|
||||
if stdin == subprocess.PIPE:
|
||||
self._pipes[0] = None
|
||||
if stdout == subprocess.PIPE:
|
||||
self._pipes[1] = None
|
||||
if stderr == subprocess.PIPE:
|
||||
self._pipes[2] = None
|
||||
|
||||
# Create the child process: set the _proc attribute
|
||||
try:
|
||||
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, bufsize=bufsize, **kwargs)
|
||||
except:
|
||||
self.close()
|
||||
raise
|
||||
|
||||
self._pid = self._proc.pid
|
||||
self._extra['subprocess'] = self._proc
|
||||
|
||||
if self._loop.get_debug():
|
||||
if isinstance(args, (bytes, str)):
|
||||
program = args
|
||||
else:
|
||||
program = args[0]
|
||||
logger.debug('process %r created: pid %s',
|
||||
program, self._pid)
|
||||
|
||||
self._loop.create_task(self._connect_pipes(waiter))
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self._closed:
|
||||
info.append('closed')
|
||||
if self._pid is not None:
|
||||
info.append(f'pid={self._pid}')
|
||||
if self._returncode is not None:
|
||||
info.append(f'returncode={self._returncode}')
|
||||
elif self._pid is not None:
|
||||
info.append('running')
|
||||
else:
|
||||
info.append('not started')
|
||||
|
||||
stdin = self._pipes.get(0)
|
||||
if stdin is not None:
|
||||
info.append(f'stdin={stdin.pipe}')
|
||||
|
||||
stdout = self._pipes.get(1)
|
||||
stderr = self._pipes.get(2)
|
||||
if stdout is not None and stderr is stdout:
|
||||
info.append(f'stdout=stderr={stdout.pipe}')
|
||||
else:
|
||||
if stdout is not None:
|
||||
info.append(f'stdout={stdout.pipe}')
|
||||
if stderr is not None:
|
||||
info.append(f'stderr={stderr.pipe}')
|
||||
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
self._protocol = protocol
|
||||
|
||||
def get_protocol(self):
|
||||
return self._protocol
|
||||
|
||||
def is_closing(self):
|
||||
return self._closed
|
||||
|
||||
def close(self):
|
||||
if self._closed:
|
||||
return
|
||||
self._closed = True
|
||||
|
||||
for proto in self._pipes.values():
|
||||
if proto is None:
|
||||
continue
|
||||
proto.pipe.close()
|
||||
|
||||
if (self._proc is not None and
|
||||
# has the child process finished?
|
||||
self._returncode is None and
|
||||
# the child process has finished, but the
|
||||
# transport hasn't been notified yet?
|
||||
self._proc.poll() is None):
|
||||
|
||||
if self._loop.get_debug():
|
||||
logger.warning('Close running child process: kill %r', self)
|
||||
|
||||
try:
|
||||
self._proc.kill()
|
||||
except (ProcessLookupError, PermissionError):
|
||||
# the process may have already exited or may be running setuid
|
||||
pass
|
||||
|
||||
# Don't clear the _proc reference yet: _post_init() may still run
|
||||
|
||||
def __del__(self, _warn=warnings.warn):
|
||||
if not self._closed:
|
||||
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
||||
self.close()
|
||||
|
||||
def get_pid(self):
|
||||
return self._pid
|
||||
|
||||
def get_returncode(self):
|
||||
return self._returncode
|
||||
|
||||
def get_pipe_transport(self, fd):
|
||||
if fd in self._pipes:
|
||||
return self._pipes[fd].pipe
|
||||
else:
|
||||
return None
|
||||
|
||||
def _check_proc(self):
|
||||
if self._proc is None:
|
||||
raise ProcessLookupError()
|
||||
|
||||
if sys.platform == 'win32':
|
||||
def send_signal(self, signal):
|
||||
self._check_proc()
|
||||
self._proc.send_signal(signal)
|
||||
|
||||
def terminate(self):
|
||||
self._check_proc()
|
||||
self._proc.terminate()
|
||||
|
||||
def kill(self):
|
||||
self._check_proc()
|
||||
self._proc.kill()
|
||||
else:
|
||||
def send_signal(self, signal):
|
||||
self._check_proc()
|
||||
try:
|
||||
os.kill(self._proc.pid, signal)
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
def terminate(self):
|
||||
self.send_signal(signal.SIGTERM)
|
||||
|
||||
def kill(self):
|
||||
self.send_signal(signal.SIGKILL)
|
||||
|
||||
async def _connect_pipes(self, waiter):
|
||||
try:
|
||||
proc = self._proc
|
||||
loop = self._loop
|
||||
|
||||
if proc.stdin is not None:
|
||||
_, pipe = await loop.connect_write_pipe(
|
||||
lambda: WriteSubprocessPipeProto(self, 0),
|
||||
proc.stdin)
|
||||
self._pipes[0] = pipe
|
||||
|
||||
if proc.stdout is not None:
|
||||
_, pipe = await loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 1),
|
||||
proc.stdout)
|
||||
self._pipes[1] = pipe
|
||||
|
||||
if proc.stderr is not None:
|
||||
_, pipe = await loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 2),
|
||||
proc.stderr)
|
||||
self._pipes[2] = pipe
|
||||
|
||||
assert self._pending_calls is not None
|
||||
|
||||
loop.call_soon(self._protocol.connection_made, self)
|
||||
for callback, data in self._pending_calls:
|
||||
loop.call_soon(callback, *data)
|
||||
self._pending_calls = None
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
if waiter is not None and not waiter.cancelled():
|
||||
waiter.set_exception(exc)
|
||||
else:
|
||||
if waiter is not None and not waiter.cancelled():
|
||||
waiter.set_result(None)
|
||||
|
||||
def _call(self, cb, *data):
|
||||
if self._pending_calls is not None:
|
||||
self._pending_calls.append((cb, data))
|
||||
else:
|
||||
self._loop.call_soon(cb, *data)
|
||||
|
||||
def _pipe_connection_lost(self, fd, exc):
|
||||
self._call(self._protocol.pipe_connection_lost, fd, exc)
|
||||
self._try_finish()
|
||||
|
||||
def _pipe_data_received(self, fd, data):
|
||||
self._call(self._protocol.pipe_data_received, fd, data)
|
||||
|
||||
def _process_exited(self, returncode):
|
||||
assert returncode is not None, returncode
|
||||
assert self._returncode is None, self._returncode
|
||||
if self._loop.get_debug():
|
||||
logger.info('%r exited with return code %r', self, returncode)
|
||||
self._returncode = returncode
|
||||
if self._proc.returncode is None:
|
||||
# asyncio uses a child watcher: copy the status into the Popen
|
||||
# object. On Python 3.6, it is required to avoid a ResourceWarning.
|
||||
self._proc.returncode = returncode
|
||||
self._call(self._protocol.process_exited)
|
||||
|
||||
self._try_finish()
|
||||
|
||||
async def _wait(self):
|
||||
"""Wait until the process exit and return the process return code.
|
||||
|
||||
This method is a coroutine."""
|
||||
if self._returncode is not None:
|
||||
return self._returncode
|
||||
|
||||
waiter = self._loop.create_future()
|
||||
self._exit_waiters.append(waiter)
|
||||
return await waiter
|
||||
|
||||
def _try_finish(self):
|
||||
assert not self._finished
|
||||
if self._returncode is None:
|
||||
return
|
||||
if all(p is not None and p.disconnected
|
||||
for p in self._pipes.values()):
|
||||
self._finished = True
|
||||
self._call(self._call_connection_lost, None)
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
# wake up futures waiting for wait()
|
||||
for waiter in self._exit_waiters:
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(self._returncode)
|
||||
self._exit_waiters = None
|
||||
self._loop = None
|
||||
self._proc = None
|
||||
self._protocol = None
|
||||
|
||||
|
||||
class WriteSubprocessPipeProto(protocols.BaseProtocol):
|
||||
|
||||
def __init__(self, proc, fd):
|
||||
self.proc = proc
|
||||
self.fd = fd
|
||||
self.pipe = None
|
||||
self.disconnected = False
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.pipe = transport
|
||||
|
||||
def __repr__(self):
|
||||
return f'<{self.__class__.__name__} fd={self.fd} pipe={self.pipe!r}>'
|
||||
|
||||
def connection_lost(self, exc):
|
||||
self.disconnected = True
|
||||
self.proc._pipe_connection_lost(self.fd, exc)
|
||||
self.proc = None
|
||||
|
||||
def pause_writing(self):
|
||||
self.proc._protocol.pause_writing()
|
||||
|
||||
def resume_writing(self):
|
||||
self.proc._protocol.resume_writing()
|
||||
|
||||
|
||||
class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
|
||||
protocols.Protocol):
|
||||
|
||||
def data_received(self, data):
|
||||
self.proc._pipe_data_received(self.fd, data)
|
||||
94
Dependencies/Python/Lib/asyncio/base_tasks.py
vendored
Normal file
94
Dependencies/Python/Lib/asyncio/base_tasks.py
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
import linecache
|
||||
import reprlib
|
||||
import traceback
|
||||
|
||||
from . import base_futures
|
||||
from . import coroutines
|
||||
|
||||
|
||||
def _task_repr_info(task):
|
||||
info = base_futures._future_repr_info(task)
|
||||
|
||||
if task.cancelling() and not task.done():
|
||||
# replace status
|
||||
info[0] = 'cancelling'
|
||||
|
||||
info.insert(1, 'name=%r' % task.get_name())
|
||||
|
||||
if task._fut_waiter is not None:
|
||||
info.insert(2, f'wait_for={task._fut_waiter!r}')
|
||||
|
||||
if task._coro:
|
||||
coro = coroutines._format_coroutine(task._coro)
|
||||
info.insert(2, f'coro=<{coro}>')
|
||||
|
||||
return info
|
||||
|
||||
|
||||
@reprlib.recursive_repr()
|
||||
def _task_repr(task):
|
||||
info = ' '.join(_task_repr_info(task))
|
||||
return f'<{task.__class__.__name__} {info}>'
|
||||
|
||||
|
||||
def _task_get_stack(task, limit):
|
||||
frames = []
|
||||
if hasattr(task._coro, 'cr_frame'):
|
||||
# case 1: 'async def' coroutines
|
||||
f = task._coro.cr_frame
|
||||
elif hasattr(task._coro, 'gi_frame'):
|
||||
# case 2: legacy coroutines
|
||||
f = task._coro.gi_frame
|
||||
elif hasattr(task._coro, 'ag_frame'):
|
||||
# case 3: async generators
|
||||
f = task._coro.ag_frame
|
||||
else:
|
||||
# case 4: unknown objects
|
||||
f = None
|
||||
if f is not None:
|
||||
while f is not None:
|
||||
if limit is not None:
|
||||
if limit <= 0:
|
||||
break
|
||||
limit -= 1
|
||||
frames.append(f)
|
||||
f = f.f_back
|
||||
frames.reverse()
|
||||
elif task._exception is not None:
|
||||
tb = task._exception.__traceback__
|
||||
while tb is not None:
|
||||
if limit is not None:
|
||||
if limit <= 0:
|
||||
break
|
||||
limit -= 1
|
||||
frames.append(tb.tb_frame)
|
||||
tb = tb.tb_next
|
||||
return frames
|
||||
|
||||
|
||||
def _task_print_stack(task, limit, file):
|
||||
extracted_list = []
|
||||
checked = set()
|
||||
for f in task.get_stack(limit=limit):
|
||||
lineno = f.f_lineno
|
||||
co = f.f_code
|
||||
filename = co.co_filename
|
||||
name = co.co_name
|
||||
if filename not in checked:
|
||||
checked.add(filename)
|
||||
linecache.checkcache(filename)
|
||||
line = linecache.getline(filename, lineno, f.f_globals)
|
||||
extracted_list.append((filename, lineno, name, line))
|
||||
|
||||
exc = task._exception
|
||||
if not extracted_list:
|
||||
print(f'No stack for {task!r}', file=file)
|
||||
elif exc is not None:
|
||||
print(f'Traceback for {task!r} (most recent call last):', file=file)
|
||||
else:
|
||||
print(f'Stack for {task!r} (most recent call last):', file=file)
|
||||
|
||||
traceback.print_list(extracted_list, file=file)
|
||||
if exc is not None:
|
||||
for line in traceback.format_exception_only(exc.__class__, exc):
|
||||
print(line, file=file, end='')
|
||||
41
Dependencies/Python/Lib/asyncio/constants.py
vendored
Normal file
41
Dependencies/Python/Lib/asyncio/constants.py
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
# Contains code from https://github.com/MagicStack/uvloop/tree/v0.16.0
|
||||
# SPDX-License-Identifier: PSF-2.0 AND (MIT OR Apache-2.0)
|
||||
# SPDX-FileCopyrightText: Copyright (c) 2015-2021 MagicStack Inc. http://magic.io
|
||||
|
||||
import enum
|
||||
|
||||
# After the connection is lost, log warnings after this many write()s.
|
||||
LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
|
||||
|
||||
# Seconds to wait before retrying accept().
|
||||
ACCEPT_RETRY_DELAY = 1
|
||||
|
||||
# Number of stack entries to capture in debug mode.
|
||||
# The larger the number, the slower the operation in debug mode
|
||||
# (see extract_stack() in format_helpers.py).
|
||||
DEBUG_STACK_DEPTH = 10
|
||||
|
||||
# Number of seconds to wait for SSL handshake to complete
|
||||
# The default timeout matches that of Nginx.
|
||||
SSL_HANDSHAKE_TIMEOUT = 60.0
|
||||
|
||||
# Number of seconds to wait for SSL shutdown to complete
|
||||
# The default timeout mimics lingering_time
|
||||
SSL_SHUTDOWN_TIMEOUT = 30.0
|
||||
|
||||
# Used in sendfile fallback code. We use fallback for platforms
|
||||
# that don't support sendfile, or for TLS connections.
|
||||
SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 256
|
||||
|
||||
FLOW_CONTROL_HIGH_WATER_SSL_READ = 256 # KiB
|
||||
FLOW_CONTROL_HIGH_WATER_SSL_WRITE = 512 # KiB
|
||||
|
||||
# Default timeout for joining the threads in the threadpool
|
||||
THREAD_JOIN_TIMEOUT = 300
|
||||
|
||||
# The enum should be here to break circular dependencies between
|
||||
# base_events and sslproto
|
||||
class _SendfileMode(enum.Enum):
|
||||
UNSUPPORTED = enum.auto()
|
||||
TRY_NATIVE = enum.auto()
|
||||
FALLBACK = enum.auto()
|
||||
109
Dependencies/Python/Lib/asyncio/coroutines.py
vendored
Normal file
109
Dependencies/Python/Lib/asyncio/coroutines.py
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
__all__ = 'iscoroutinefunction', 'iscoroutine'
|
||||
|
||||
import collections.abc
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
|
||||
|
||||
def _is_debug_mode():
|
||||
# See: https://docs.python.org/3/library/asyncio-dev.html#asyncio-debug-mode.
|
||||
return sys.flags.dev_mode or (not sys.flags.ignore_environment and
|
||||
bool(os.environ.get('PYTHONASYNCIODEBUG')))
|
||||
|
||||
|
||||
# A marker for iscoroutinefunction.
|
||||
_is_coroutine = object()
|
||||
|
||||
|
||||
def iscoroutinefunction(func):
|
||||
"""Return True if func is a decorated coroutine function."""
|
||||
return (inspect.iscoroutinefunction(func) or
|
||||
getattr(func, '_is_coroutine', None) is _is_coroutine)
|
||||
|
||||
|
||||
# Prioritize native coroutine check to speed-up
|
||||
# asyncio.iscoroutine.
|
||||
_COROUTINE_TYPES = (types.CoroutineType, collections.abc.Coroutine)
|
||||
_iscoroutine_typecache = set()
|
||||
|
||||
|
||||
def iscoroutine(obj):
|
||||
"""Return True if obj is a coroutine object."""
|
||||
if type(obj) in _iscoroutine_typecache:
|
||||
return True
|
||||
|
||||
if isinstance(obj, _COROUTINE_TYPES):
|
||||
# Just in case we don't want to cache more than 100
|
||||
# positive types. That shouldn't ever happen, unless
|
||||
# someone stressing the system on purpose.
|
||||
if len(_iscoroutine_typecache) < 100:
|
||||
_iscoroutine_typecache.add(type(obj))
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def _format_coroutine(coro):
|
||||
assert iscoroutine(coro)
|
||||
|
||||
def get_name(coro):
|
||||
# Coroutines compiled with Cython sometimes don't have
|
||||
# proper __qualname__ or __name__. While that is a bug
|
||||
# in Cython, asyncio shouldn't crash with an AttributeError
|
||||
# in its __repr__ functions.
|
||||
if hasattr(coro, '__qualname__') and coro.__qualname__:
|
||||
coro_name = coro.__qualname__
|
||||
elif hasattr(coro, '__name__') and coro.__name__:
|
||||
coro_name = coro.__name__
|
||||
else:
|
||||
# Stop masking Cython bugs, expose them in a friendly way.
|
||||
coro_name = f'<{type(coro).__name__} without __name__>'
|
||||
return f'{coro_name}()'
|
||||
|
||||
def is_running(coro):
|
||||
try:
|
||||
return coro.cr_running
|
||||
except AttributeError:
|
||||
try:
|
||||
return coro.gi_running
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
coro_code = None
|
||||
if hasattr(coro, 'cr_code') and coro.cr_code:
|
||||
coro_code = coro.cr_code
|
||||
elif hasattr(coro, 'gi_code') and coro.gi_code:
|
||||
coro_code = coro.gi_code
|
||||
|
||||
coro_name = get_name(coro)
|
||||
|
||||
if not coro_code:
|
||||
# Built-in types might not have __qualname__ or __name__.
|
||||
if is_running(coro):
|
||||
return f'{coro_name} running'
|
||||
else:
|
||||
return coro_name
|
||||
|
||||
coro_frame = None
|
||||
if hasattr(coro, 'gi_frame') and coro.gi_frame:
|
||||
coro_frame = coro.gi_frame
|
||||
elif hasattr(coro, 'cr_frame') and coro.cr_frame:
|
||||
coro_frame = coro.cr_frame
|
||||
|
||||
# If Cython's coroutine has a fake code object without proper
|
||||
# co_filename -- expose that.
|
||||
filename = coro_code.co_filename or '<empty co_filename>'
|
||||
|
||||
lineno = 0
|
||||
|
||||
if coro_frame is not None:
|
||||
lineno = coro_frame.f_lineno
|
||||
coro_repr = f'{coro_name} running at {filename}:{lineno}'
|
||||
|
||||
else:
|
||||
lineno = coro_code.co_firstlineno
|
||||
coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
|
||||
|
||||
return coro_repr
|
||||
882
Dependencies/Python/Lib/asyncio/events.py
vendored
Normal file
882
Dependencies/Python/Lib/asyncio/events.py
vendored
Normal file
@@ -0,0 +1,882 @@
|
||||
"""Event loop and event loop policy."""
|
||||
|
||||
# Contains code from https://github.com/MagicStack/uvloop/tree/v0.16.0
|
||||
# SPDX-License-Identifier: PSF-2.0 AND (MIT OR Apache-2.0)
|
||||
# SPDX-FileCopyrightText: Copyright (c) 2015-2021 MagicStack Inc. http://magic.io
|
||||
|
||||
__all__ = (
|
||||
'AbstractEventLoopPolicy',
|
||||
'AbstractEventLoop', 'AbstractServer',
|
||||
'Handle', 'TimerHandle',
|
||||
'get_event_loop_policy', 'set_event_loop_policy',
|
||||
'get_event_loop', 'set_event_loop', 'new_event_loop',
|
||||
'get_child_watcher', 'set_child_watcher',
|
||||
'_set_running_loop', 'get_running_loop',
|
||||
'_get_running_loop',
|
||||
)
|
||||
|
||||
import contextvars
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
|
||||
from . import format_helpers
|
||||
|
||||
|
||||
class Handle:
|
||||
"""Object returned by callback registration methods."""
|
||||
|
||||
__slots__ = ('_callback', '_args', '_cancelled', '_loop',
|
||||
'_source_traceback', '_repr', '__weakref__',
|
||||
'_context')
|
||||
|
||||
def __init__(self, callback, args, loop, context=None):
|
||||
if context is None:
|
||||
context = contextvars.copy_context()
|
||||
self._context = context
|
||||
self._loop = loop
|
||||
self._callback = callback
|
||||
self._args = args
|
||||
self._cancelled = False
|
||||
self._repr = None
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = format_helpers.extract_stack(
|
||||
sys._getframe(1))
|
||||
else:
|
||||
self._source_traceback = None
|
||||
|
||||
def _repr_info(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self._cancelled:
|
||||
info.append('cancelled')
|
||||
if self._callback is not None:
|
||||
info.append(format_helpers._format_callback_source(
|
||||
self._callback, self._args,
|
||||
debug=self._loop.get_debug()))
|
||||
if self._source_traceback:
|
||||
frame = self._source_traceback[-1]
|
||||
info.append(f'created at {frame[0]}:{frame[1]}')
|
||||
return info
|
||||
|
||||
def __repr__(self):
|
||||
if self._repr is not None:
|
||||
return self._repr
|
||||
info = self._repr_info()
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
def get_context(self):
|
||||
return self._context
|
||||
|
||||
def cancel(self):
|
||||
if not self._cancelled:
|
||||
self._cancelled = True
|
||||
if self._loop.get_debug():
|
||||
# Keep a representation in debug mode to keep callback and
|
||||
# parameters. For example, to log the warning
|
||||
# "Executing <Handle...> took 2.5 second"
|
||||
self._repr = repr(self)
|
||||
self._callback = None
|
||||
self._args = None
|
||||
|
||||
def cancelled(self):
|
||||
return self._cancelled
|
||||
|
||||
def _run(self):
|
||||
try:
|
||||
self._context.run(self._callback, *self._args)
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
cb = format_helpers._format_callback_source(
|
||||
self._callback, self._args,
|
||||
debug=self._loop.get_debug())
|
||||
msg = f'Exception in callback {cb}'
|
||||
context = {
|
||||
'message': msg,
|
||||
'exception': exc,
|
||||
'handle': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
|
||||
class TimerHandle(Handle):
|
||||
"""Object returned by timed callback registration methods."""
|
||||
|
||||
__slots__ = ['_scheduled', '_when']
|
||||
|
||||
def __init__(self, when, callback, args, loop, context=None):
|
||||
super().__init__(callback, args, loop, context)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._when = when
|
||||
self._scheduled = False
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
pos = 2 if self._cancelled else 1
|
||||
info.insert(pos, f'when={self._when}')
|
||||
return info
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._when)
|
||||
|
||||
def __lt__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return self._when < other._when
|
||||
return NotImplemented
|
||||
|
||||
def __le__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return self._when < other._when or self.__eq__(other)
|
||||
return NotImplemented
|
||||
|
||||
def __gt__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return self._when > other._when
|
||||
return NotImplemented
|
||||
|
||||
def __ge__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return self._when > other._when or self.__eq__(other)
|
||||
return NotImplemented
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return (self._when == other._when and
|
||||
self._callback == other._callback and
|
||||
self._args == other._args and
|
||||
self._cancelled == other._cancelled)
|
||||
return NotImplemented
|
||||
|
||||
def cancel(self):
|
||||
if not self._cancelled:
|
||||
self._loop._timer_handle_cancelled(self)
|
||||
super().cancel()
|
||||
|
||||
def when(self):
|
||||
"""Return a scheduled callback time.
|
||||
|
||||
The time is an absolute timestamp, using the same time
|
||||
reference as loop.time().
|
||||
"""
|
||||
return self._when
|
||||
|
||||
|
||||
class AbstractServer:
|
||||
"""Abstract server returned by create_server()."""
|
||||
|
||||
def close(self):
|
||||
"""Stop serving. This leaves existing connections open."""
|
||||
raise NotImplementedError
|
||||
|
||||
def close_clients(self):
|
||||
"""Close all active connections."""
|
||||
raise NotImplementedError
|
||||
|
||||
def abort_clients(self):
|
||||
"""Close all active connections immediately."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_loop(self):
|
||||
"""Get the event loop the Server object is attached to."""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_serving(self):
|
||||
"""Return True if the server is accepting connections."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def start_serving(self):
|
||||
"""Start accepting connections.
|
||||
|
||||
This method is idempotent, so it can be called when
|
||||
the server is already being serving.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def serve_forever(self):
|
||||
"""Start accepting connections until the coroutine is cancelled.
|
||||
|
||||
The server is closed when the coroutine is cancelled.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def wait_closed(self):
|
||||
"""Coroutine to wait until service is closed."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *exc):
|
||||
self.close()
|
||||
await self.wait_closed()
|
||||
|
||||
|
||||
class AbstractEventLoop:
|
||||
"""Abstract event loop."""
|
||||
|
||||
# Running and stopping the event loop.
|
||||
|
||||
def run_forever(self):
|
||||
"""Run the event loop until stop() is called."""
|
||||
raise NotImplementedError
|
||||
|
||||
def run_until_complete(self, future):
|
||||
"""Run the event loop until a Future is done.
|
||||
|
||||
Return the Future's result, or raise its exception.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def stop(self):
|
||||
"""Stop the event loop as soon as reasonable.
|
||||
|
||||
Exactly how soon that is may depend on the implementation, but
|
||||
no more I/O callbacks should be scheduled.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_running(self):
|
||||
"""Return whether the event loop is currently running."""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_closed(self):
|
||||
"""Returns True if the event loop was closed."""
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
"""Close the loop.
|
||||
|
||||
The loop should not be running.
|
||||
|
||||
This is idempotent and irreversible.
|
||||
|
||||
No other methods should be called after this one.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def shutdown_asyncgens(self):
|
||||
"""Shutdown all active asynchronous generators."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def shutdown_default_executor(self):
|
||||
"""Schedule the shutdown of the default executor."""
|
||||
raise NotImplementedError
|
||||
|
||||
# Methods scheduling callbacks. All these return Handles.
|
||||
|
||||
def _timer_handle_cancelled(self, handle):
|
||||
"""Notification that a TimerHandle has been cancelled."""
|
||||
raise NotImplementedError
|
||||
|
||||
def call_soon(self, callback, *args, context=None):
|
||||
return self.call_later(0, callback, *args, context=context)
|
||||
|
||||
def call_later(self, delay, callback, *args, context=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def call_at(self, when, callback, *args, context=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def time(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def create_future(self):
|
||||
raise NotImplementedError
|
||||
|
||||
# Method scheduling a coroutine object: create a task.
|
||||
|
||||
def create_task(self, coro, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
# Methods for interacting with threads.
|
||||
|
||||
def call_soon_threadsafe(self, callback, *args, context=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def run_in_executor(self, executor, func, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_default_executor(self, executor):
|
||||
raise NotImplementedError
|
||||
|
||||
# Network I/O methods returning Futures.
|
||||
|
||||
async def getaddrinfo(self, host, port, *,
|
||||
family=0, type=0, proto=0, flags=0):
|
||||
raise NotImplementedError
|
||||
|
||||
async def getnameinfo(self, sockaddr, flags=0):
|
||||
raise NotImplementedError
|
||||
|
||||
async def create_connection(
|
||||
self, protocol_factory, host=None, port=None,
|
||||
*, ssl=None, family=0, proto=0,
|
||||
flags=0, sock=None, local_addr=None,
|
||||
server_hostname=None,
|
||||
ssl_handshake_timeout=None,
|
||||
ssl_shutdown_timeout=None,
|
||||
happy_eyeballs_delay=None, interleave=None):
|
||||
raise NotImplementedError
|
||||
|
||||
async def create_server(
|
||||
self, protocol_factory, host=None, port=None,
|
||||
*, family=socket.AF_UNSPEC,
|
||||
flags=socket.AI_PASSIVE, sock=None, backlog=100,
|
||||
ssl=None, reuse_address=None, reuse_port=None,
|
||||
keep_alive=None,
|
||||
ssl_handshake_timeout=None,
|
||||
ssl_shutdown_timeout=None,
|
||||
start_serving=True):
|
||||
"""A coroutine which creates a TCP server bound to host and port.
|
||||
|
||||
The return value is a Server object which can be used to stop
|
||||
the service.
|
||||
|
||||
If host is an empty string or None all interfaces are assumed
|
||||
and a list of multiple sockets will be returned (most likely
|
||||
one for IPv4 and another one for IPv6). The host parameter can also be
|
||||
a sequence (e.g. list) of hosts to bind to.
|
||||
|
||||
family can be set to either AF_INET or AF_INET6 to force the
|
||||
socket to use IPv4 or IPv6. If not set it will be determined
|
||||
from host (defaults to AF_UNSPEC).
|
||||
|
||||
flags is a bitmask for getaddrinfo().
|
||||
|
||||
sock can optionally be specified in order to use a preexisting
|
||||
socket object.
|
||||
|
||||
backlog is the maximum number of queued connections passed to
|
||||
listen() (defaults to 100).
|
||||
|
||||
ssl can be set to an SSLContext to enable SSL over the
|
||||
accepted connections.
|
||||
|
||||
reuse_address tells the kernel to reuse a local socket in
|
||||
TIME_WAIT state, without waiting for its natural timeout to
|
||||
expire. If not specified will automatically be set to True on
|
||||
UNIX.
|
||||
|
||||
reuse_port tells the kernel to allow this endpoint to be bound to
|
||||
the same port as other existing endpoints are bound to, so long as
|
||||
they all set this flag when being created. This option is not
|
||||
supported on Windows.
|
||||
|
||||
keep_alive set to True keeps connections active by enabling the
|
||||
periodic transmission of messages.
|
||||
|
||||
ssl_handshake_timeout is the time in seconds that an SSL server
|
||||
will wait for completion of the SSL handshake before aborting the
|
||||
connection. Default is 60s.
|
||||
|
||||
ssl_shutdown_timeout is the time in seconds that an SSL server
|
||||
will wait for completion of the SSL shutdown procedure
|
||||
before aborting the connection. Default is 30s.
|
||||
|
||||
start_serving set to True (default) causes the created server
|
||||
to start accepting connections immediately. When set to False,
|
||||
the user should await Server.start_serving() or Server.serve_forever()
|
||||
to make the server to start accepting connections.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def sendfile(self, transport, file, offset=0, count=None,
|
||||
*, fallback=True):
|
||||
"""Send a file through a transport.
|
||||
|
||||
Return an amount of sent bytes.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def start_tls(self, transport, protocol, sslcontext, *,
|
||||
server_side=False,
|
||||
server_hostname=None,
|
||||
ssl_handshake_timeout=None,
|
||||
ssl_shutdown_timeout=None):
|
||||
"""Upgrade a transport to TLS.
|
||||
|
||||
Return a new transport that *protocol* should start using
|
||||
immediately.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def create_unix_connection(
|
||||
self, protocol_factory, path=None, *,
|
||||
ssl=None, sock=None,
|
||||
server_hostname=None,
|
||||
ssl_handshake_timeout=None,
|
||||
ssl_shutdown_timeout=None):
|
||||
raise NotImplementedError
|
||||
|
||||
async def create_unix_server(
|
||||
self, protocol_factory, path=None, *,
|
||||
sock=None, backlog=100, ssl=None,
|
||||
ssl_handshake_timeout=None,
|
||||
ssl_shutdown_timeout=None,
|
||||
start_serving=True):
|
||||
"""A coroutine which creates a UNIX Domain Socket server.
|
||||
|
||||
The return value is a Server object, which can be used to stop
|
||||
the service.
|
||||
|
||||
path is a str, representing a file system path to bind the
|
||||
server socket to.
|
||||
|
||||
sock can optionally be specified in order to use a preexisting
|
||||
socket object.
|
||||
|
||||
backlog is the maximum number of queued connections passed to
|
||||
listen() (defaults to 100).
|
||||
|
||||
ssl can be set to an SSLContext to enable SSL over the
|
||||
accepted connections.
|
||||
|
||||
ssl_handshake_timeout is the time in seconds that an SSL server
|
||||
will wait for the SSL handshake to complete (defaults to 60s).
|
||||
|
||||
ssl_shutdown_timeout is the time in seconds that an SSL server
|
||||
will wait for the SSL shutdown to finish (defaults to 30s).
|
||||
|
||||
start_serving set to True (default) causes the created server
|
||||
to start accepting connections immediately. When set to False,
|
||||
the user should await Server.start_serving() or Server.serve_forever()
|
||||
to make the server to start accepting connections.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def connect_accepted_socket(
|
||||
self, protocol_factory, sock,
|
||||
*, ssl=None,
|
||||
ssl_handshake_timeout=None,
|
||||
ssl_shutdown_timeout=None):
|
||||
"""Handle an accepted connection.
|
||||
|
||||
This is used by servers that accept connections outside of
|
||||
asyncio, but use asyncio to handle connections.
|
||||
|
||||
This method is a coroutine. When completed, the coroutine
|
||||
returns a (transport, protocol) pair.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def create_datagram_endpoint(self, protocol_factory,
|
||||
local_addr=None, remote_addr=None, *,
|
||||
family=0, proto=0, flags=0,
|
||||
reuse_address=None, reuse_port=None,
|
||||
allow_broadcast=None, sock=None):
|
||||
"""A coroutine which creates a datagram endpoint.
|
||||
|
||||
This method will try to establish the endpoint in the background.
|
||||
When successful, the coroutine returns a (transport, protocol) pair.
|
||||
|
||||
protocol_factory must be a callable returning a protocol instance.
|
||||
|
||||
socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on
|
||||
host (or family if specified), socket type SOCK_DGRAM.
|
||||
|
||||
reuse_address tells the kernel to reuse a local socket in
|
||||
TIME_WAIT state, without waiting for its natural timeout to
|
||||
expire. If not specified it will automatically be set to True on
|
||||
UNIX.
|
||||
|
||||
reuse_port tells the kernel to allow this endpoint to be bound to
|
||||
the same port as other existing endpoints are bound to, so long as
|
||||
they all set this flag when being created. This option is not
|
||||
supported on Windows and some UNIX's. If the
|
||||
:py:data:`~socket.SO_REUSEPORT` constant is not defined then this
|
||||
capability is unsupported.
|
||||
|
||||
allow_broadcast tells the kernel to allow this endpoint to send
|
||||
messages to the broadcast address.
|
||||
|
||||
sock can optionally be specified in order to use a preexisting
|
||||
socket object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
# Pipes and subprocesses.
|
||||
|
||||
async def connect_read_pipe(self, protocol_factory, pipe):
|
||||
"""Register read pipe in event loop. Set the pipe to non-blocking mode.
|
||||
|
||||
protocol_factory should instantiate object with Protocol interface.
|
||||
pipe is a file-like object.
|
||||
Return pair (transport, protocol), where transport supports the
|
||||
ReadTransport interface."""
|
||||
# The reason to accept file-like object instead of just file descriptor
|
||||
# is: we need to own pipe and close it at transport finishing
|
||||
# Can got complicated errors if pass f.fileno(),
|
||||
# close fd in pipe transport then close f and vice versa.
|
||||
raise NotImplementedError
|
||||
|
||||
async def connect_write_pipe(self, protocol_factory, pipe):
|
||||
"""Register write pipe in event loop.
|
||||
|
||||
protocol_factory should instantiate object with BaseProtocol interface.
|
||||
Pipe is file-like object already switched to nonblocking.
|
||||
Return pair (transport, protocol), where transport support
|
||||
WriteTransport interface."""
|
||||
# The reason to accept file-like object instead of just file descriptor
|
||||
# is: we need to own pipe and close it at transport finishing
|
||||
# Can got complicated errors if pass f.fileno(),
|
||||
# close fd in pipe transport then close f and vice versa.
|
||||
raise NotImplementedError
|
||||
|
||||
async def subprocess_shell(self, protocol_factory, cmd, *,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
**kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
async def subprocess_exec(self, protocol_factory, *args,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
**kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
# Ready-based callback registration methods.
|
||||
# The add_*() methods return None.
|
||||
# The remove_*() methods return True if something was removed,
|
||||
# False if there was nothing to delete.
|
||||
|
||||
def add_reader(self, fd, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_reader(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
def add_writer(self, fd, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_writer(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
# Completion based I/O methods returning Futures.
|
||||
|
||||
async def sock_recv(self, sock, nbytes):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_recv_into(self, sock, buf):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_recvfrom(self, sock, bufsize):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_recvfrom_into(self, sock, buf, nbytes=0):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_sendall(self, sock, data):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_sendto(self, sock, data, address):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_connect(self, sock, address):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_accept(self, sock):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_sendfile(self, sock, file, offset=0, count=None,
|
||||
*, fallback=None):
|
||||
raise NotImplementedError
|
||||
|
||||
# Signal handling.
|
||||
|
||||
def add_signal_handler(self, sig, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_signal_handler(self, sig):
|
||||
raise NotImplementedError
|
||||
|
||||
# Task factory.
|
||||
|
||||
def set_task_factory(self, factory):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_task_factory(self):
|
||||
raise NotImplementedError
|
||||
|
||||
# Error handlers.
|
||||
|
||||
def get_exception_handler(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_exception_handler(self, handler):
|
||||
raise NotImplementedError
|
||||
|
||||
def default_exception_handler(self, context):
|
||||
raise NotImplementedError
|
||||
|
||||
def call_exception_handler(self, context):
|
||||
raise NotImplementedError
|
||||
|
||||
# Debug flag management.
|
||||
|
||||
def get_debug(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_debug(self, enabled):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AbstractEventLoopPolicy:
|
||||
"""Abstract policy for accessing the event loop."""
|
||||
|
||||
def get_event_loop(self):
|
||||
"""Get the event loop for the current context.
|
||||
|
||||
Returns an event loop object implementing the AbstractEventLoop interface,
|
||||
or raises an exception in case no event loop has been set for the
|
||||
current context and the current policy does not specify to create one.
|
||||
|
||||
It should never return None."""
|
||||
raise NotImplementedError
|
||||
|
||||
def set_event_loop(self, loop):
|
||||
"""Set the event loop for the current context to loop."""
|
||||
raise NotImplementedError
|
||||
|
||||
def new_event_loop(self):
|
||||
"""Create and return a new event loop object according to this
|
||||
policy's rules. If there's need to set this loop as the event loop for
|
||||
the current context, set_event_loop must be called explicitly."""
|
||||
raise NotImplementedError
|
||||
|
||||
# Child processes handling (Unix only).
|
||||
|
||||
def get_child_watcher(self):
|
||||
"Get the watcher for child processes."
|
||||
raise NotImplementedError
|
||||
|
||||
def set_child_watcher(self, watcher):
|
||||
"""Set the watcher for child processes."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
|
||||
"""Default policy implementation for accessing the event loop.
|
||||
|
||||
In this policy, each thread has its own event loop. However, we
|
||||
only automatically create an event loop by default for the main
|
||||
thread; other threads by default have no event loop.
|
||||
|
||||
Other policies may have different rules (e.g. a single global
|
||||
event loop, or automatically creating an event loop per thread, or
|
||||
using some other notion of context to which an event loop is
|
||||
associated).
|
||||
"""
|
||||
|
||||
_loop_factory = None
|
||||
|
||||
class _Local(threading.local):
|
||||
_loop = None
|
||||
_set_called = False
|
||||
|
||||
def __init__(self):
|
||||
self._local = self._Local()
|
||||
|
||||
def get_event_loop(self):
|
||||
"""Get the event loop for the current context.
|
||||
|
||||
Returns an instance of EventLoop or raises an exception.
|
||||
"""
|
||||
if (self._local._loop is None and
|
||||
not self._local._set_called and
|
||||
threading.current_thread() is threading.main_thread()):
|
||||
stacklevel = 2
|
||||
try:
|
||||
f = sys._getframe(1)
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
# Move up the call stack so that the warning is attached
|
||||
# to the line outside asyncio itself.
|
||||
while f:
|
||||
module = f.f_globals.get('__name__')
|
||||
if not (module == 'asyncio' or module.startswith('asyncio.')):
|
||||
break
|
||||
f = f.f_back
|
||||
stacklevel += 1
|
||||
import warnings
|
||||
warnings.warn('There is no current event loop',
|
||||
DeprecationWarning, stacklevel=stacklevel)
|
||||
self.set_event_loop(self.new_event_loop())
|
||||
|
||||
if self._local._loop is None:
|
||||
raise RuntimeError('There is no current event loop in thread %r.'
|
||||
% threading.current_thread().name)
|
||||
|
||||
return self._local._loop
|
||||
|
||||
def set_event_loop(self, loop):
|
||||
"""Set the event loop."""
|
||||
self._local._set_called = True
|
||||
if loop is not None and not isinstance(loop, AbstractEventLoop):
|
||||
raise TypeError(f"loop must be an instance of AbstractEventLoop or None, not '{type(loop).__name__}'")
|
||||
self._local._loop = loop
|
||||
|
||||
def new_event_loop(self):
|
||||
"""Create a new event loop.
|
||||
|
||||
You must call set_event_loop() to make this the current event
|
||||
loop.
|
||||
"""
|
||||
return self._loop_factory()
|
||||
|
||||
|
||||
# Event loop policy. The policy itself is always global, even if the
|
||||
# policy's rules say that there is an event loop per thread (or other
|
||||
# notion of context). The default policy is installed by the first
|
||||
# call to get_event_loop_policy().
|
||||
_event_loop_policy = None
|
||||
|
||||
# Lock for protecting the on-the-fly creation of the event loop policy.
|
||||
_lock = threading.Lock()
|
||||
|
||||
|
||||
# A TLS for the running event loop, used by _get_running_loop.
|
||||
class _RunningLoop(threading.local):
|
||||
loop_pid = (None, None)
|
||||
|
||||
|
||||
_running_loop = _RunningLoop()
|
||||
|
||||
|
||||
def get_running_loop():
|
||||
"""Return the running event loop. Raise a RuntimeError if there is none.
|
||||
|
||||
This function is thread-specific.
|
||||
"""
|
||||
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
||||
loop = _get_running_loop()
|
||||
if loop is None:
|
||||
raise RuntimeError('no running event loop')
|
||||
return loop
|
||||
|
||||
|
||||
def _get_running_loop():
|
||||
"""Return the running event loop or None.
|
||||
|
||||
This is a low-level function intended to be used by event loops.
|
||||
This function is thread-specific.
|
||||
"""
|
||||
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
||||
running_loop, pid = _running_loop.loop_pid
|
||||
if running_loop is not None and pid == os.getpid():
|
||||
return running_loop
|
||||
|
||||
|
||||
def _set_running_loop(loop):
|
||||
"""Set the running event loop.
|
||||
|
||||
This is a low-level function intended to be used by event loops.
|
||||
This function is thread-specific.
|
||||
"""
|
||||
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
||||
_running_loop.loop_pid = (loop, os.getpid())
|
||||
|
||||
|
||||
def _init_event_loop_policy():
|
||||
global _event_loop_policy
|
||||
with _lock:
|
||||
if _event_loop_policy is None: # pragma: no branch
|
||||
from . import DefaultEventLoopPolicy
|
||||
_event_loop_policy = DefaultEventLoopPolicy()
|
||||
|
||||
|
||||
def get_event_loop_policy():
|
||||
"""Get the current event loop policy."""
|
||||
if _event_loop_policy is None:
|
||||
_init_event_loop_policy()
|
||||
return _event_loop_policy
|
||||
|
||||
|
||||
def set_event_loop_policy(policy):
|
||||
"""Set the current event loop policy.
|
||||
|
||||
If policy is None, the default policy is restored."""
|
||||
global _event_loop_policy
|
||||
if policy is not None and not isinstance(policy, AbstractEventLoopPolicy):
|
||||
raise TypeError(f"policy must be an instance of AbstractEventLoopPolicy or None, not '{type(policy).__name__}'")
|
||||
_event_loop_policy = policy
|
||||
|
||||
|
||||
def get_event_loop():
|
||||
"""Return an asyncio event loop.
|
||||
|
||||
When called from a coroutine or a callback (e.g. scheduled with call_soon
|
||||
or similar API), this function will always return the running event loop.
|
||||
|
||||
If there is no running event loop set, the function will return
|
||||
the result of `get_event_loop_policy().get_event_loop()` call.
|
||||
"""
|
||||
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
||||
current_loop = _get_running_loop()
|
||||
if current_loop is not None:
|
||||
return current_loop
|
||||
return get_event_loop_policy().get_event_loop()
|
||||
|
||||
|
||||
def set_event_loop(loop):
|
||||
"""Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
|
||||
get_event_loop_policy().set_event_loop(loop)
|
||||
|
||||
|
||||
def new_event_loop():
|
||||
"""Equivalent to calling get_event_loop_policy().new_event_loop()."""
|
||||
return get_event_loop_policy().new_event_loop()
|
||||
|
||||
|
||||
def get_child_watcher():
|
||||
"""Equivalent to calling get_event_loop_policy().get_child_watcher()."""
|
||||
return get_event_loop_policy().get_child_watcher()
|
||||
|
||||
|
||||
def set_child_watcher(watcher):
|
||||
"""Equivalent to calling
|
||||
get_event_loop_policy().set_child_watcher(watcher)."""
|
||||
return get_event_loop_policy().set_child_watcher(watcher)
|
||||
|
||||
|
||||
# Alias pure-Python implementations for testing purposes.
|
||||
_py__get_running_loop = _get_running_loop
|
||||
_py__set_running_loop = _set_running_loop
|
||||
_py_get_running_loop = get_running_loop
|
||||
_py_get_event_loop = get_event_loop
|
||||
|
||||
|
||||
try:
|
||||
# get_event_loop() is one of the most frequently called
|
||||
# functions in asyncio. Pure Python implementation is
|
||||
# about 4 times slower than C-accelerated.
|
||||
from _asyncio import (_get_running_loop, _set_running_loop,
|
||||
get_running_loop, get_event_loop)
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# Alias C implementations for testing purposes.
|
||||
_c__get_running_loop = _get_running_loop
|
||||
_c__set_running_loop = _set_running_loop
|
||||
_c_get_running_loop = get_running_loop
|
||||
_c_get_event_loop = get_event_loop
|
||||
|
||||
|
||||
if hasattr(os, 'fork'):
|
||||
def on_fork():
|
||||
# Reset the loop and wakeupfd in the forked child process.
|
||||
if _event_loop_policy is not None:
|
||||
_event_loop_policy._local = BaseDefaultEventLoopPolicy._Local()
|
||||
_set_running_loop(None)
|
||||
signal.set_wakeup_fd(-1)
|
||||
|
||||
os.register_at_fork(after_in_child=on_fork)
|
||||
62
Dependencies/Python/Lib/asyncio/exceptions.py
vendored
Normal file
62
Dependencies/Python/Lib/asyncio/exceptions.py
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
"""asyncio exceptions."""
|
||||
|
||||
|
||||
__all__ = ('BrokenBarrierError',
|
||||
'CancelledError', 'InvalidStateError', 'TimeoutError',
|
||||
'IncompleteReadError', 'LimitOverrunError',
|
||||
'SendfileNotAvailableError')
|
||||
|
||||
|
||||
class CancelledError(BaseException):
|
||||
"""The Future or Task was cancelled."""
|
||||
|
||||
|
||||
TimeoutError = TimeoutError # make local alias for the standard exception
|
||||
|
||||
|
||||
class InvalidStateError(Exception):
|
||||
"""The operation is not allowed in this state."""
|
||||
|
||||
|
||||
class SendfileNotAvailableError(RuntimeError):
|
||||
"""Sendfile syscall is not available.
|
||||
|
||||
Raised if OS does not support sendfile syscall for given socket or
|
||||
file type.
|
||||
"""
|
||||
|
||||
|
||||
class IncompleteReadError(EOFError):
|
||||
"""
|
||||
Incomplete read error. Attributes:
|
||||
|
||||
- partial: read bytes string before the end of stream was reached
|
||||
- expected: total number of expected bytes (or None if unknown)
|
||||
"""
|
||||
def __init__(self, partial, expected):
|
||||
r_expected = 'undefined' if expected is None else repr(expected)
|
||||
super().__init__(f'{len(partial)} bytes read on a total of '
|
||||
f'{r_expected} expected bytes')
|
||||
self.partial = partial
|
||||
self.expected = expected
|
||||
|
||||
def __reduce__(self):
|
||||
return type(self), (self.partial, self.expected)
|
||||
|
||||
|
||||
class LimitOverrunError(Exception):
|
||||
"""Reached the buffer limit while looking for a separator.
|
||||
|
||||
Attributes:
|
||||
- consumed: total number of to be consumed bytes.
|
||||
"""
|
||||
def __init__(self, message, consumed):
|
||||
super().__init__(message)
|
||||
self.consumed = consumed
|
||||
|
||||
def __reduce__(self):
|
||||
return type(self), (self.args[0], self.consumed)
|
||||
|
||||
|
||||
class BrokenBarrierError(RuntimeError):
|
||||
"""Barrier is broken by barrier.abort() call."""
|
||||
84
Dependencies/Python/Lib/asyncio/format_helpers.py
vendored
Normal file
84
Dependencies/Python/Lib/asyncio/format_helpers.py
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
import functools
|
||||
import inspect
|
||||
import reprlib
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from . import constants
|
||||
|
||||
|
||||
def _get_function_source(func):
|
||||
func = inspect.unwrap(func)
|
||||
if inspect.isfunction(func):
|
||||
code = func.__code__
|
||||
return (code.co_filename, code.co_firstlineno)
|
||||
if isinstance(func, functools.partial):
|
||||
return _get_function_source(func.func)
|
||||
if isinstance(func, functools.partialmethod):
|
||||
return _get_function_source(func.func)
|
||||
return None
|
||||
|
||||
|
||||
def _format_callback_source(func, args, *, debug=False):
|
||||
func_repr = _format_callback(func, args, None, debug=debug)
|
||||
source = _get_function_source(func)
|
||||
if source:
|
||||
func_repr += f' at {source[0]}:{source[1]}'
|
||||
return func_repr
|
||||
|
||||
|
||||
def _format_args_and_kwargs(args, kwargs, *, debug=False):
|
||||
"""Format function arguments and keyword arguments.
|
||||
|
||||
Special case for a single parameter: ('hello',) is formatted as ('hello').
|
||||
|
||||
Note that this function only returns argument details when
|
||||
debug=True is specified, as arguments may contain sensitive
|
||||
information.
|
||||
"""
|
||||
if not debug:
|
||||
return '()'
|
||||
|
||||
# use reprlib to limit the length of the output
|
||||
items = []
|
||||
if args:
|
||||
items.extend(reprlib.repr(arg) for arg in args)
|
||||
if kwargs:
|
||||
items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items())
|
||||
return '({})'.format(', '.join(items))
|
||||
|
||||
|
||||
def _format_callback(func, args, kwargs, *, debug=False, suffix=''):
|
||||
if isinstance(func, functools.partial):
|
||||
suffix = _format_args_and_kwargs(args, kwargs, debug=debug) + suffix
|
||||
return _format_callback(func.func, func.args, func.keywords,
|
||||
debug=debug, suffix=suffix)
|
||||
|
||||
if hasattr(func, '__qualname__') and func.__qualname__:
|
||||
func_repr = func.__qualname__
|
||||
elif hasattr(func, '__name__') and func.__name__:
|
||||
func_repr = func.__name__
|
||||
else:
|
||||
func_repr = repr(func)
|
||||
|
||||
func_repr += _format_args_and_kwargs(args, kwargs, debug=debug)
|
||||
if suffix:
|
||||
func_repr += suffix
|
||||
return func_repr
|
||||
|
||||
|
||||
def extract_stack(f=None, limit=None):
|
||||
"""Replacement for traceback.extract_stack() that only does the
|
||||
necessary work for asyncio debug mode.
|
||||
"""
|
||||
if f is None:
|
||||
f = sys._getframe().f_back
|
||||
if limit is None:
|
||||
# Limit the amount of work to a reasonable amount, as extract_stack()
|
||||
# can be called for each coroutine and future in debug mode.
|
||||
limit = constants.DEBUG_STACK_DEPTH
|
||||
stack = traceback.StackSummary.extract(traceback.walk_stack(f),
|
||||
limit=limit,
|
||||
lookup_lines=False)
|
||||
stack.reverse()
|
||||
return stack
|
||||
425
Dependencies/Python/Lib/asyncio/futures.py
vendored
Normal file
425
Dependencies/Python/Lib/asyncio/futures.py
vendored
Normal file
@@ -0,0 +1,425 @@
|
||||
"""A Future class similar to the one in PEP 3148."""
|
||||
|
||||
__all__ = (
|
||||
'Future', 'wrap_future', 'isfuture',
|
||||
)
|
||||
|
||||
import concurrent.futures
|
||||
import contextvars
|
||||
import logging
|
||||
import sys
|
||||
from types import GenericAlias
|
||||
|
||||
from . import base_futures
|
||||
from . import events
|
||||
from . import exceptions
|
||||
from . import format_helpers
|
||||
|
||||
|
||||
isfuture = base_futures.isfuture
|
||||
|
||||
|
||||
_PENDING = base_futures._PENDING
|
||||
_CANCELLED = base_futures._CANCELLED
|
||||
_FINISHED = base_futures._FINISHED
|
||||
|
||||
|
||||
STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
|
||||
|
||||
|
||||
class Future:
|
||||
"""This class is *almost* compatible with concurrent.futures.Future.
|
||||
|
||||
Differences:
|
||||
|
||||
- This class is not thread-safe.
|
||||
|
||||
- result() and exception() do not take a timeout argument and
|
||||
raise an exception when the future isn't done yet.
|
||||
|
||||
- Callbacks registered with add_done_callback() are always called
|
||||
via the event loop's call_soon().
|
||||
|
||||
- This class is not compatible with the wait() and as_completed()
|
||||
methods in the concurrent.futures package.
|
||||
|
||||
(In Python 3.4 or later we may be able to unify the implementations.)
|
||||
"""
|
||||
|
||||
# Class variables serving as defaults for instance variables.
|
||||
_state = _PENDING
|
||||
_result = None
|
||||
_exception = None
|
||||
_loop = None
|
||||
_source_traceback = None
|
||||
_cancel_message = None
|
||||
# A saved CancelledError for later chaining as an exception context.
|
||||
_cancelled_exc = None
|
||||
|
||||
# This field is used for a dual purpose:
|
||||
# - Its presence is a marker to declare that a class implements
|
||||
# the Future protocol (i.e. is intended to be duck-type compatible).
|
||||
# The value must also be not-None, to enable a subclass to declare
|
||||
# that it is not compatible by setting this to None.
|
||||
# - It is set by __iter__() below so that Task._step() can tell
|
||||
# the difference between
|
||||
# `await Future()` or`yield from Future()` (correct) vs.
|
||||
# `yield Future()` (incorrect).
|
||||
_asyncio_future_blocking = False
|
||||
|
||||
__log_traceback = False
|
||||
|
||||
def __init__(self, *, loop=None):
|
||||
"""Initialize the future.
|
||||
|
||||
The optional event_loop argument allows explicitly setting the event
|
||||
loop object used by the future. If it's not provided, the future uses
|
||||
the default event loop.
|
||||
"""
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._callbacks = []
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = format_helpers.extract_stack(
|
||||
sys._getframe(1))
|
||||
|
||||
def __repr__(self):
|
||||
return base_futures._future_repr(self)
|
||||
|
||||
def __del__(self):
|
||||
if not self.__log_traceback:
|
||||
# set_exception() was not called, or result() or exception()
|
||||
# has consumed the exception
|
||||
return
|
||||
exc = self._exception
|
||||
context = {
|
||||
'message':
|
||||
f'{self.__class__.__name__} exception was never retrieved',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
|
||||
__class_getitem__ = classmethod(GenericAlias)
|
||||
|
||||
@property
|
||||
def _log_traceback(self):
|
||||
return self.__log_traceback
|
||||
|
||||
@_log_traceback.setter
|
||||
def _log_traceback(self, val):
|
||||
if val:
|
||||
raise ValueError('_log_traceback can only be set to False')
|
||||
self.__log_traceback = False
|
||||
|
||||
def get_loop(self):
|
||||
"""Return the event loop the Future is bound to."""
|
||||
loop = self._loop
|
||||
if loop is None:
|
||||
raise RuntimeError("Future object is not initialized.")
|
||||
return loop
|
||||
|
||||
def _make_cancelled_error(self):
|
||||
"""Create the CancelledError to raise if the Future is cancelled.
|
||||
|
||||
This should only be called once when handling a cancellation since
|
||||
it erases the saved context exception value.
|
||||
"""
|
||||
if self._cancelled_exc is not None:
|
||||
exc = self._cancelled_exc
|
||||
self._cancelled_exc = None
|
||||
return exc
|
||||
|
||||
if self._cancel_message is None:
|
||||
exc = exceptions.CancelledError()
|
||||
else:
|
||||
exc = exceptions.CancelledError(self._cancel_message)
|
||||
return exc
|
||||
|
||||
def cancel(self, msg=None):
|
||||
"""Cancel the future and schedule callbacks.
|
||||
|
||||
If the future is already done or cancelled, return False. Otherwise,
|
||||
change the future's state to cancelled, schedule the callbacks and
|
||||
return True.
|
||||
"""
|
||||
self.__log_traceback = False
|
||||
if self._state != _PENDING:
|
||||
return False
|
||||
self._state = _CANCELLED
|
||||
self._cancel_message = msg
|
||||
self.__schedule_callbacks()
|
||||
return True
|
||||
|
||||
def __schedule_callbacks(self):
|
||||
"""Internal: Ask the event loop to call all callbacks.
|
||||
|
||||
The callbacks are scheduled to be called as soon as possible. Also
|
||||
clears the callback list.
|
||||
"""
|
||||
callbacks = self._callbacks[:]
|
||||
if not callbacks:
|
||||
return
|
||||
|
||||
self._callbacks[:] = []
|
||||
for callback, ctx in callbacks:
|
||||
self._loop.call_soon(callback, self, context=ctx)
|
||||
|
||||
def cancelled(self):
|
||||
"""Return True if the future was cancelled."""
|
||||
return self._state == _CANCELLED
|
||||
|
||||
# Don't implement running(); see http://bugs.python.org/issue18699
|
||||
|
||||
def done(self):
|
||||
"""Return True if the future is done.
|
||||
|
||||
Done means either that a result / exception are available, or that the
|
||||
future was cancelled.
|
||||
"""
|
||||
return self._state != _PENDING
|
||||
|
||||
def result(self):
|
||||
"""Return the result this future represents.
|
||||
|
||||
If the future has been cancelled, raises CancelledError. If the
|
||||
future's result isn't yet available, raises InvalidStateError. If
|
||||
the future is done and has an exception set, this exception is raised.
|
||||
"""
|
||||
if self._state == _CANCELLED:
|
||||
raise self._make_cancelled_error()
|
||||
if self._state != _FINISHED:
|
||||
raise exceptions.InvalidStateError('Result is not ready.')
|
||||
self.__log_traceback = False
|
||||
if self._exception is not None:
|
||||
raise self._exception.with_traceback(self._exception_tb)
|
||||
return self._result
|
||||
|
||||
def exception(self):
|
||||
"""Return the exception that was set on this future.
|
||||
|
||||
The exception (or None if no exception was set) is returned only if
|
||||
the future is done. If the future has been cancelled, raises
|
||||
CancelledError. If the future isn't done yet, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state == _CANCELLED:
|
||||
raise self._make_cancelled_error()
|
||||
if self._state != _FINISHED:
|
||||
raise exceptions.InvalidStateError('Exception is not set.')
|
||||
self.__log_traceback = False
|
||||
return self._exception
|
||||
|
||||
def add_done_callback(self, fn, *, context=None):
|
||||
"""Add a callback to be run when the future becomes done.
|
||||
|
||||
The callback is called with a single argument - the future object. If
|
||||
the future is already done when this is called, the callback is
|
||||
scheduled with call_soon.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
self._loop.call_soon(fn, self, context=context)
|
||||
else:
|
||||
if context is None:
|
||||
context = contextvars.copy_context()
|
||||
self._callbacks.append((fn, context))
|
||||
|
||||
# New method not in PEP 3148.
|
||||
|
||||
def remove_done_callback(self, fn):
|
||||
"""Remove all instances of a callback from the "call when done" list.
|
||||
|
||||
Returns the number of callbacks removed.
|
||||
"""
|
||||
filtered_callbacks = [(f, ctx)
|
||||
for (f, ctx) in self._callbacks
|
||||
if f != fn]
|
||||
removed_count = len(self._callbacks) - len(filtered_callbacks)
|
||||
if removed_count:
|
||||
self._callbacks[:] = filtered_callbacks
|
||||
return removed_count
|
||||
|
||||
# So-called internal methods (note: no set_running_or_notify_cancel()).
|
||||
|
||||
def set_result(self, result):
|
||||
"""Mark the future done and set its result.
|
||||
|
||||
If the future is already done when this method is called, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
|
||||
self._result = result
|
||||
self._state = _FINISHED
|
||||
self.__schedule_callbacks()
|
||||
|
||||
def set_exception(self, exception):
|
||||
"""Mark the future done and set an exception.
|
||||
|
||||
If the future is already done when this method is called, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
|
||||
if isinstance(exception, type):
|
||||
exception = exception()
|
||||
if isinstance(exception, StopIteration):
|
||||
new_exc = RuntimeError("StopIteration interacts badly with "
|
||||
"generators and cannot be raised into a "
|
||||
"Future")
|
||||
new_exc.__cause__ = exception
|
||||
new_exc.__context__ = exception
|
||||
exception = new_exc
|
||||
self._exception = exception
|
||||
self._exception_tb = exception.__traceback__
|
||||
self._state = _FINISHED
|
||||
self.__schedule_callbacks()
|
||||
self.__log_traceback = True
|
||||
|
||||
def __await__(self):
|
||||
if not self.done():
|
||||
self._asyncio_future_blocking = True
|
||||
yield self # This tells Task to wait for completion.
|
||||
if not self.done():
|
||||
raise RuntimeError("await wasn't used with future")
|
||||
return self.result() # May raise too.
|
||||
|
||||
__iter__ = __await__ # make compatible with 'yield from'.
|
||||
|
||||
|
||||
# Needed for testing purposes.
|
||||
_PyFuture = Future
|
||||
|
||||
|
||||
def _get_loop(fut):
|
||||
# Tries to call Future.get_loop() if it's available.
|
||||
# Otherwise fallbacks to using the old '_loop' property.
|
||||
try:
|
||||
get_loop = fut.get_loop
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
return get_loop()
|
||||
return fut._loop
|
||||
|
||||
|
||||
def _set_result_unless_cancelled(fut, result):
|
||||
"""Helper setting the result only if the future was not cancelled."""
|
||||
if fut.cancelled():
|
||||
return
|
||||
fut.set_result(result)
|
||||
|
||||
|
||||
def _convert_future_exc(exc):
|
||||
exc_class = type(exc)
|
||||
if exc_class is concurrent.futures.CancelledError:
|
||||
return exceptions.CancelledError(*exc.args).with_traceback(exc.__traceback__)
|
||||
elif exc_class is concurrent.futures.InvalidStateError:
|
||||
return exceptions.InvalidStateError(*exc.args).with_traceback(exc.__traceback__)
|
||||
else:
|
||||
return exc
|
||||
|
||||
|
||||
def _set_concurrent_future_state(concurrent, source):
|
||||
"""Copy state from a future to a concurrent.futures.Future."""
|
||||
assert source.done()
|
||||
if source.cancelled():
|
||||
concurrent.cancel()
|
||||
if not concurrent.set_running_or_notify_cancel():
|
||||
return
|
||||
exception = source.exception()
|
||||
if exception is not None:
|
||||
concurrent.set_exception(_convert_future_exc(exception))
|
||||
else:
|
||||
result = source.result()
|
||||
concurrent.set_result(result)
|
||||
|
||||
|
||||
def _copy_future_state(source, dest):
|
||||
"""Internal helper to copy state from another Future.
|
||||
|
||||
The other Future may be a concurrent.futures.Future.
|
||||
"""
|
||||
assert source.done()
|
||||
if dest.cancelled():
|
||||
return
|
||||
assert not dest.done()
|
||||
if source.cancelled():
|
||||
dest.cancel()
|
||||
else:
|
||||
exception = source.exception()
|
||||
if exception is not None:
|
||||
dest.set_exception(_convert_future_exc(exception))
|
||||
else:
|
||||
result = source.result()
|
||||
dest.set_result(result)
|
||||
|
||||
|
||||
def _chain_future(source, destination):
|
||||
"""Chain two futures so that when one completes, so does the other.
|
||||
|
||||
The result (or exception) of source will be copied to destination.
|
||||
If destination is cancelled, source gets cancelled too.
|
||||
Compatible with both asyncio.Future and concurrent.futures.Future.
|
||||
"""
|
||||
if not isfuture(source) and not isinstance(source,
|
||||
concurrent.futures.Future):
|
||||
raise TypeError('A future is required for source argument')
|
||||
if not isfuture(destination) and not isinstance(destination,
|
||||
concurrent.futures.Future):
|
||||
raise TypeError('A future is required for destination argument')
|
||||
source_loop = _get_loop(source) if isfuture(source) else None
|
||||
dest_loop = _get_loop(destination) if isfuture(destination) else None
|
||||
|
||||
def _set_state(future, other):
|
||||
if isfuture(future):
|
||||
_copy_future_state(other, future)
|
||||
else:
|
||||
_set_concurrent_future_state(future, other)
|
||||
|
||||
def _call_check_cancel(destination):
|
||||
if destination.cancelled():
|
||||
if source_loop is None or source_loop is dest_loop:
|
||||
source.cancel()
|
||||
else:
|
||||
source_loop.call_soon_threadsafe(source.cancel)
|
||||
|
||||
def _call_set_state(source):
|
||||
if (destination.cancelled() and
|
||||
dest_loop is not None and dest_loop.is_closed()):
|
||||
return
|
||||
if dest_loop is None or dest_loop is source_loop:
|
||||
_set_state(destination, source)
|
||||
else:
|
||||
if dest_loop.is_closed():
|
||||
return
|
||||
dest_loop.call_soon_threadsafe(_set_state, destination, source)
|
||||
|
||||
destination.add_done_callback(_call_check_cancel)
|
||||
source.add_done_callback(_call_set_state)
|
||||
|
||||
|
||||
def wrap_future(future, *, loop=None):
|
||||
"""Wrap concurrent.futures.Future object."""
|
||||
if isfuture(future):
|
||||
return future
|
||||
assert isinstance(future, concurrent.futures.Future), \
|
||||
f'concurrent.futures.Future is expected, got {future!r}'
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
new_future = loop.create_future()
|
||||
_chain_future(future, new_future)
|
||||
return new_future
|
||||
|
||||
|
||||
try:
|
||||
import _asyncio
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# _CFuture is needed for tests.
|
||||
Future = _CFuture = _asyncio.Future
|
||||
617
Dependencies/Python/Lib/asyncio/locks.py
vendored
Normal file
617
Dependencies/Python/Lib/asyncio/locks.py
vendored
Normal file
@@ -0,0 +1,617 @@
|
||||
"""Synchronization primitives."""
|
||||
|
||||
__all__ = ('Lock', 'Event', 'Condition', 'Semaphore',
|
||||
'BoundedSemaphore', 'Barrier')
|
||||
|
||||
import collections
|
||||
import enum
|
||||
|
||||
from . import exceptions
|
||||
from . import mixins
|
||||
|
||||
class _ContextManagerMixin:
|
||||
async def __aenter__(self):
|
||||
await self.acquire()
|
||||
# We have no use for the "as ..." clause in the with
|
||||
# statement for locks.
|
||||
return None
|
||||
|
||||
async def __aexit__(self, exc_type, exc, tb):
|
||||
self.release()
|
||||
|
||||
|
||||
class Lock(_ContextManagerMixin, mixins._LoopBoundMixin):
|
||||
"""Primitive lock objects.
|
||||
|
||||
A primitive lock is a synchronization primitive that is not owned
|
||||
by a particular task when locked. A primitive lock is in one
|
||||
of two states, 'locked' or 'unlocked'.
|
||||
|
||||
It is created in the unlocked state. It has two basic methods,
|
||||
acquire() and release(). When the state is unlocked, acquire()
|
||||
changes the state to locked and returns immediately. When the
|
||||
state is locked, acquire() blocks until a call to release() in
|
||||
another task changes it to unlocked, then the acquire() call
|
||||
resets it to locked and returns. The release() method should only
|
||||
be called in the locked state; it changes the state to unlocked
|
||||
and returns immediately. If an attempt is made to release an
|
||||
unlocked lock, a RuntimeError will be raised.
|
||||
|
||||
When more than one task is blocked in acquire() waiting for
|
||||
the state to turn to unlocked, only one task proceeds when a
|
||||
release() call resets the state to unlocked; successive release()
|
||||
calls will unblock tasks in FIFO order.
|
||||
|
||||
Locks also support the asynchronous context management protocol.
|
||||
'async with lock' statement should be used.
|
||||
|
||||
Usage:
|
||||
|
||||
lock = Lock()
|
||||
...
|
||||
await lock.acquire()
|
||||
try:
|
||||
...
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
Context manager usage:
|
||||
|
||||
lock = Lock()
|
||||
...
|
||||
async with lock:
|
||||
...
|
||||
|
||||
Lock objects can be tested for locking state:
|
||||
|
||||
if not lock.locked():
|
||||
await lock.acquire()
|
||||
else:
|
||||
# lock is acquired
|
||||
...
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._waiters = None
|
||||
self._locked = False
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self._locked else 'unlocked'
|
||||
if self._waiters:
|
||||
extra = f'{extra}, waiters:{len(self._waiters)}'
|
||||
return f'<{res[1:-1]} [{extra}]>'
|
||||
|
||||
def locked(self):
|
||||
"""Return True if lock is acquired."""
|
||||
return self._locked
|
||||
|
||||
async def acquire(self):
|
||||
"""Acquire a lock.
|
||||
|
||||
This method blocks until the lock is unlocked, then sets it to
|
||||
locked and returns True.
|
||||
"""
|
||||
# Implement fair scheduling, where thread always waits
|
||||
# its turn. Jumping the queue if all are cancelled is an optimization.
|
||||
if (not self._locked and (self._waiters is None or
|
||||
all(w.cancelled() for w in self._waiters))):
|
||||
self._locked = True
|
||||
return True
|
||||
|
||||
if self._waiters is None:
|
||||
self._waiters = collections.deque()
|
||||
fut = self._get_loop().create_future()
|
||||
self._waiters.append(fut)
|
||||
|
||||
try:
|
||||
try:
|
||||
await fut
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
except exceptions.CancelledError:
|
||||
# Currently the only exception designed be able to occur here.
|
||||
|
||||
# Ensure the lock invariant: If lock is not claimed (or about
|
||||
# to be claimed by us) and there is a Task in waiters,
|
||||
# ensure that the Task at the head will run.
|
||||
if not self._locked:
|
||||
self._wake_up_first()
|
||||
raise
|
||||
|
||||
# assert self._locked is False
|
||||
self._locked = True
|
||||
return True
|
||||
|
||||
def release(self):
|
||||
"""Release a lock.
|
||||
|
||||
When the lock is locked, reset it to unlocked, and return.
|
||||
If any other tasks are blocked waiting for the lock to become
|
||||
unlocked, allow exactly one of them to proceed.
|
||||
|
||||
When invoked on an unlocked lock, a RuntimeError is raised.
|
||||
|
||||
There is no return value.
|
||||
"""
|
||||
if self._locked:
|
||||
self._locked = False
|
||||
self._wake_up_first()
|
||||
else:
|
||||
raise RuntimeError('Lock is not acquired.')
|
||||
|
||||
def _wake_up_first(self):
|
||||
"""Ensure that the first waiter will wake up."""
|
||||
if not self._waiters:
|
||||
return
|
||||
try:
|
||||
fut = next(iter(self._waiters))
|
||||
except StopIteration:
|
||||
return
|
||||
|
||||
# .done() means that the waiter is already set to wake up.
|
||||
if not fut.done():
|
||||
fut.set_result(True)
|
||||
|
||||
|
||||
class Event(mixins._LoopBoundMixin):
|
||||
"""Asynchronous equivalent to threading.Event.
|
||||
|
||||
Class implementing event objects. An event manages a flag that can be set
|
||||
to true with the set() method and reset to false with the clear() method.
|
||||
The wait() method blocks until the flag is true. The flag is initially
|
||||
false.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._waiters = collections.deque()
|
||||
self._value = False
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'set' if self._value else 'unset'
|
||||
if self._waiters:
|
||||
extra = f'{extra}, waiters:{len(self._waiters)}'
|
||||
return f'<{res[1:-1]} [{extra}]>'
|
||||
|
||||
def is_set(self):
|
||||
"""Return True if and only if the internal flag is true."""
|
||||
return self._value
|
||||
|
||||
def set(self):
|
||||
"""Set the internal flag to true. All tasks waiting for it to
|
||||
become true are awakened. Tasks that call wait() once the flag is
|
||||
true will not block at all.
|
||||
"""
|
||||
if not self._value:
|
||||
self._value = True
|
||||
|
||||
for fut in self._waiters:
|
||||
if not fut.done():
|
||||
fut.set_result(True)
|
||||
|
||||
def clear(self):
|
||||
"""Reset the internal flag to false. Subsequently, tasks calling
|
||||
wait() will block until set() is called to set the internal flag
|
||||
to true again."""
|
||||
self._value = False
|
||||
|
||||
async def wait(self):
|
||||
"""Block until the internal flag is true.
|
||||
|
||||
If the internal flag is true on entry, return True
|
||||
immediately. Otherwise, block until another task calls
|
||||
set() to set the flag to true, then return True.
|
||||
"""
|
||||
if self._value:
|
||||
return True
|
||||
|
||||
fut = self._get_loop().create_future()
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
await fut
|
||||
return True
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
|
||||
|
||||
class Condition(_ContextManagerMixin, mixins._LoopBoundMixin):
|
||||
"""Asynchronous equivalent to threading.Condition.
|
||||
|
||||
This class implements condition variable objects. A condition variable
|
||||
allows one or more tasks to wait until they are notified by another
|
||||
task.
|
||||
|
||||
A new Lock object is created and used as the underlying lock.
|
||||
"""
|
||||
|
||||
def __init__(self, lock=None):
|
||||
if lock is None:
|
||||
lock = Lock()
|
||||
|
||||
self._lock = lock
|
||||
# Export the lock's locked(), acquire() and release() methods.
|
||||
self.locked = lock.locked
|
||||
self.acquire = lock.acquire
|
||||
self.release = lock.release
|
||||
|
||||
self._waiters = collections.deque()
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self.locked() else 'unlocked'
|
||||
if self._waiters:
|
||||
extra = f'{extra}, waiters:{len(self._waiters)}'
|
||||
return f'<{res[1:-1]} [{extra}]>'
|
||||
|
||||
async def wait(self):
|
||||
"""Wait until notified.
|
||||
|
||||
If the calling task has not acquired the lock when this
|
||||
method is called, a RuntimeError is raised.
|
||||
|
||||
This method releases the underlying lock, and then blocks
|
||||
until it is awakened by a notify() or notify_all() call for
|
||||
the same condition variable in another task. Once
|
||||
awakened, it re-acquires the lock and returns True.
|
||||
|
||||
This method may return spuriously,
|
||||
which is why the caller should always
|
||||
re-check the state and be prepared to wait() again.
|
||||
"""
|
||||
if not self.locked():
|
||||
raise RuntimeError('cannot wait on un-acquired lock')
|
||||
|
||||
fut = self._get_loop().create_future()
|
||||
self.release()
|
||||
try:
|
||||
try:
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
await fut
|
||||
return True
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
|
||||
finally:
|
||||
# Must re-acquire lock even if wait is cancelled.
|
||||
# We only catch CancelledError here, since we don't want any
|
||||
# other (fatal) errors with the future to cause us to spin.
|
||||
err = None
|
||||
while True:
|
||||
try:
|
||||
await self.acquire()
|
||||
break
|
||||
except exceptions.CancelledError as e:
|
||||
err = e
|
||||
|
||||
if err is not None:
|
||||
try:
|
||||
raise err # Re-raise most recent exception instance.
|
||||
finally:
|
||||
err = None # Break reference cycles.
|
||||
except BaseException:
|
||||
# Any error raised out of here _may_ have occurred after this Task
|
||||
# believed to have been successfully notified.
|
||||
# Make sure to notify another Task instead. This may result
|
||||
# in a "spurious wakeup", which is allowed as part of the
|
||||
# Condition Variable protocol.
|
||||
self._notify(1)
|
||||
raise
|
||||
|
||||
async def wait_for(self, predicate):
|
||||
"""Wait until a predicate becomes true.
|
||||
|
||||
The predicate should be a callable whose result will be
|
||||
interpreted as a boolean value. The method will repeatedly
|
||||
wait() until it evaluates to true. The final predicate value is
|
||||
the return value.
|
||||
"""
|
||||
result = predicate()
|
||||
while not result:
|
||||
await self.wait()
|
||||
result = predicate()
|
||||
return result
|
||||
|
||||
def notify(self, n=1):
|
||||
"""By default, wake up one task waiting on this condition, if any.
|
||||
If the calling task has not acquired the lock when this method
|
||||
is called, a RuntimeError is raised.
|
||||
|
||||
This method wakes up n of the tasks waiting for the condition
|
||||
variable; if fewer than n are waiting, they are all awoken.
|
||||
|
||||
Note: an awakened task does not actually return from its
|
||||
wait() call until it can reacquire the lock. Since notify() does
|
||||
not release the lock, its caller should.
|
||||
"""
|
||||
if not self.locked():
|
||||
raise RuntimeError('cannot notify on un-acquired lock')
|
||||
self._notify(n)
|
||||
|
||||
def _notify(self, n):
|
||||
idx = 0
|
||||
for fut in self._waiters:
|
||||
if idx >= n:
|
||||
break
|
||||
|
||||
if not fut.done():
|
||||
idx += 1
|
||||
fut.set_result(False)
|
||||
|
||||
def notify_all(self):
|
||||
"""Wake up all threads waiting on this condition. This method acts
|
||||
like notify(), but wakes up all waiting threads instead of one. If the
|
||||
calling thread has not acquired the lock when this method is called,
|
||||
a RuntimeError is raised.
|
||||
"""
|
||||
self.notify(len(self._waiters))
|
||||
|
||||
|
||||
class Semaphore(_ContextManagerMixin, mixins._LoopBoundMixin):
|
||||
"""A Semaphore implementation.
|
||||
|
||||
A semaphore manages an internal counter which is decremented by each
|
||||
acquire() call and incremented by each release() call. The counter
|
||||
can never go below zero; when acquire() finds that it is zero, it blocks,
|
||||
waiting until some other thread calls release().
|
||||
|
||||
Semaphores also support the context management protocol.
|
||||
|
||||
The optional argument gives the initial value for the internal
|
||||
counter; it defaults to 1. If the value given is less than 0,
|
||||
ValueError is raised.
|
||||
"""
|
||||
|
||||
def __init__(self, value=1):
|
||||
if value < 0:
|
||||
raise ValueError("Semaphore initial value must be >= 0")
|
||||
self._waiters = None
|
||||
self._value = value
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self.locked() else f'unlocked, value:{self._value}'
|
||||
if self._waiters:
|
||||
extra = f'{extra}, waiters:{len(self._waiters)}'
|
||||
return f'<{res[1:-1]} [{extra}]>'
|
||||
|
||||
def locked(self):
|
||||
"""Returns True if semaphore cannot be acquired immediately."""
|
||||
# Due to state, or FIFO rules (must allow others to run first).
|
||||
return self._value == 0 or (
|
||||
any(not w.cancelled() for w in (self._waiters or ())))
|
||||
|
||||
async def acquire(self):
|
||||
"""Acquire a semaphore.
|
||||
|
||||
If the internal counter is larger than zero on entry,
|
||||
decrement it by one and return True immediately. If it is
|
||||
zero on entry, block, waiting until some other task has
|
||||
called release() to make it larger than 0, and then return
|
||||
True.
|
||||
"""
|
||||
if not self.locked():
|
||||
# Maintain FIFO, wait for others to start even if _value > 0.
|
||||
self._value -= 1
|
||||
return True
|
||||
|
||||
if self._waiters is None:
|
||||
self._waiters = collections.deque()
|
||||
fut = self._get_loop().create_future()
|
||||
self._waiters.append(fut)
|
||||
|
||||
try:
|
||||
try:
|
||||
await fut
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
except exceptions.CancelledError:
|
||||
# Currently the only exception designed be able to occur here.
|
||||
if fut.done() and not fut.cancelled():
|
||||
# Our Future was successfully set to True via _wake_up_next(),
|
||||
# but we are not about to successfully acquire(). Therefore we
|
||||
# must undo the bookkeeping already done and attempt to wake
|
||||
# up someone else.
|
||||
self._value += 1
|
||||
raise
|
||||
|
||||
finally:
|
||||
# New waiters may have arrived but had to wait due to FIFO.
|
||||
# Wake up as many as are allowed.
|
||||
while self._value > 0:
|
||||
if not self._wake_up_next():
|
||||
break # There was no-one to wake up.
|
||||
return True
|
||||
|
||||
def release(self):
|
||||
"""Release a semaphore, incrementing the internal counter by one.
|
||||
|
||||
When it was zero on entry and another task is waiting for it to
|
||||
become larger than zero again, wake up that task.
|
||||
"""
|
||||
self._value += 1
|
||||
self._wake_up_next()
|
||||
|
||||
def _wake_up_next(self):
|
||||
"""Wake up the first waiter that isn't done."""
|
||||
if not self._waiters:
|
||||
return False
|
||||
|
||||
for fut in self._waiters:
|
||||
if not fut.done():
|
||||
self._value -= 1
|
||||
fut.set_result(True)
|
||||
# `fut` is now `done()` and not `cancelled()`.
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
"""A bounded semaphore implementation.
|
||||
|
||||
This raises ValueError in release() if it would increase the value
|
||||
above the initial value.
|
||||
"""
|
||||
|
||||
def __init__(self, value=1):
|
||||
self._bound_value = value
|
||||
super().__init__(value)
|
||||
|
||||
def release(self):
|
||||
if self._value >= self._bound_value:
|
||||
raise ValueError('BoundedSemaphore released too many times')
|
||||
super().release()
|
||||
|
||||
|
||||
|
||||
class _BarrierState(enum.Enum):
|
||||
FILLING = 'filling'
|
||||
DRAINING = 'draining'
|
||||
RESETTING = 'resetting'
|
||||
BROKEN = 'broken'
|
||||
|
||||
|
||||
class Barrier(mixins._LoopBoundMixin):
|
||||
"""Asyncio equivalent to threading.Barrier
|
||||
|
||||
Implements a Barrier primitive.
|
||||
Useful for synchronizing a fixed number of tasks at known synchronization
|
||||
points. Tasks block on 'wait()' and are simultaneously awoken once they
|
||||
have all made their call.
|
||||
"""
|
||||
|
||||
def __init__(self, parties):
|
||||
"""Create a barrier, initialised to 'parties' tasks."""
|
||||
if parties < 1:
|
||||
raise ValueError('parties must be >= 1')
|
||||
|
||||
self._cond = Condition() # notify all tasks when state changes
|
||||
|
||||
self._parties = parties
|
||||
self._state = _BarrierState.FILLING
|
||||
self._count = 0 # count tasks in Barrier
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = f'{self._state.value}'
|
||||
if not self.broken:
|
||||
extra += f', waiters:{self.n_waiting}/{self.parties}'
|
||||
return f'<{res[1:-1]} [{extra}]>'
|
||||
|
||||
async def __aenter__(self):
|
||||
# wait for the barrier reaches the parties number
|
||||
# when start draining release and return index of waited task
|
||||
return await self.wait()
|
||||
|
||||
async def __aexit__(self, *args):
|
||||
pass
|
||||
|
||||
async def wait(self):
|
||||
"""Wait for the barrier.
|
||||
|
||||
When the specified number of tasks have started waiting, they are all
|
||||
simultaneously awoken.
|
||||
Returns an unique and individual index number from 0 to 'parties-1'.
|
||||
"""
|
||||
async with self._cond:
|
||||
await self._block() # Block while the barrier drains or resets.
|
||||
try:
|
||||
index = self._count
|
||||
self._count += 1
|
||||
if index + 1 == self._parties:
|
||||
# We release the barrier
|
||||
await self._release()
|
||||
else:
|
||||
await self._wait()
|
||||
return index
|
||||
finally:
|
||||
self._count -= 1
|
||||
# Wake up any tasks waiting for barrier to drain.
|
||||
self._exit()
|
||||
|
||||
async def _block(self):
|
||||
# Block until the barrier is ready for us,
|
||||
# or raise an exception if it is broken.
|
||||
#
|
||||
# It is draining or resetting, wait until done
|
||||
# unless a CancelledError occurs
|
||||
await self._cond.wait_for(
|
||||
lambda: self._state not in (
|
||||
_BarrierState.DRAINING, _BarrierState.RESETTING
|
||||
)
|
||||
)
|
||||
|
||||
# see if the barrier is in a broken state
|
||||
if self._state is _BarrierState.BROKEN:
|
||||
raise exceptions.BrokenBarrierError("Barrier aborted")
|
||||
|
||||
async def _release(self):
|
||||
# Release the tasks waiting in the barrier.
|
||||
|
||||
# Enter draining state.
|
||||
# Next waiting tasks will be blocked until the end of draining.
|
||||
self._state = _BarrierState.DRAINING
|
||||
self._cond.notify_all()
|
||||
|
||||
async def _wait(self):
|
||||
# Wait in the barrier until we are released. Raise an exception
|
||||
# if the barrier is reset or broken.
|
||||
|
||||
# wait for end of filling
|
||||
# unless a CancelledError occurs
|
||||
await self._cond.wait_for(lambda: self._state is not _BarrierState.FILLING)
|
||||
|
||||
if self._state in (_BarrierState.BROKEN, _BarrierState.RESETTING):
|
||||
raise exceptions.BrokenBarrierError("Abort or reset of barrier")
|
||||
|
||||
def _exit(self):
|
||||
# If we are the last tasks to exit the barrier, signal any tasks
|
||||
# waiting for the barrier to drain.
|
||||
if self._count == 0:
|
||||
if self._state in (_BarrierState.RESETTING, _BarrierState.DRAINING):
|
||||
self._state = _BarrierState.FILLING
|
||||
self._cond.notify_all()
|
||||
|
||||
async def reset(self):
|
||||
"""Reset the barrier to the initial state.
|
||||
|
||||
Any tasks currently waiting will get the BrokenBarrier exception
|
||||
raised.
|
||||
"""
|
||||
async with self._cond:
|
||||
if self._count > 0:
|
||||
if self._state is not _BarrierState.RESETTING:
|
||||
#reset the barrier, waking up tasks
|
||||
self._state = _BarrierState.RESETTING
|
||||
else:
|
||||
self._state = _BarrierState.FILLING
|
||||
self._cond.notify_all()
|
||||
|
||||
async def abort(self):
|
||||
"""Place the barrier into a 'broken' state.
|
||||
|
||||
Useful in case of error. Any currently waiting tasks and tasks
|
||||
attempting to 'wait()' will have BrokenBarrierError raised.
|
||||
"""
|
||||
async with self._cond:
|
||||
self._state = _BarrierState.BROKEN
|
||||
self._cond.notify_all()
|
||||
|
||||
@property
|
||||
def parties(self):
|
||||
"""Return the number of tasks required to trip the barrier."""
|
||||
return self._parties
|
||||
|
||||
@property
|
||||
def n_waiting(self):
|
||||
"""Return the number of tasks currently waiting at the barrier."""
|
||||
if self._state is _BarrierState.FILLING:
|
||||
return self._count
|
||||
return 0
|
||||
|
||||
@property
|
||||
def broken(self):
|
||||
"""Return True if the barrier is in a broken state."""
|
||||
return self._state is _BarrierState.BROKEN
|
||||
7
Dependencies/Python/Lib/asyncio/log.py
vendored
Normal file
7
Dependencies/Python/Lib/asyncio/log.py
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Logging configuration."""
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
# Name the logger after the package.
|
||||
logger = logging.getLogger(__package__)
|
||||
21
Dependencies/Python/Lib/asyncio/mixins.py
vendored
Normal file
21
Dependencies/Python/Lib/asyncio/mixins.py
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
"""Event loop mixins."""
|
||||
|
||||
import threading
|
||||
from . import events
|
||||
|
||||
_global_lock = threading.Lock()
|
||||
|
||||
|
||||
class _LoopBoundMixin:
|
||||
_loop = None
|
||||
|
||||
def _get_loop(self):
|
||||
loop = events._get_running_loop()
|
||||
|
||||
if self._loop is None:
|
||||
with _global_lock:
|
||||
if self._loop is None:
|
||||
self._loop = loop
|
||||
if loop is not self._loop:
|
||||
raise RuntimeError(f'{self!r} is bound to a different event loop')
|
||||
return loop
|
||||
894
Dependencies/Python/Lib/asyncio/proactor_events.py
vendored
Normal file
894
Dependencies/Python/Lib/asyncio/proactor_events.py
vendored
Normal file
@@ -0,0 +1,894 @@
|
||||
"""Event loop using a proactor and related classes.
|
||||
|
||||
A proactor is a "notify-on-completion" multiplexer. Currently a
|
||||
proactor is only implemented on Windows with IOCP.
|
||||
"""
|
||||
|
||||
__all__ = 'BaseProactorEventLoop',
|
||||
|
||||
import io
|
||||
import os
|
||||
import socket
|
||||
import warnings
|
||||
import signal
|
||||
import threading
|
||||
import collections
|
||||
|
||||
from . import base_events
|
||||
from . import constants
|
||||
from . import futures
|
||||
from . import exceptions
|
||||
from . import protocols
|
||||
from . import sslproto
|
||||
from . import transports
|
||||
from . import trsock
|
||||
from .log import logger
|
||||
|
||||
|
||||
def _set_socket_extra(transport, sock):
|
||||
transport._extra['socket'] = trsock.TransportSocket(sock)
|
||||
|
||||
try:
|
||||
transport._extra['sockname'] = sock.getsockname()
|
||||
except socket.error:
|
||||
if transport._loop.get_debug():
|
||||
logger.warning(
|
||||
"getsockname() failed on %r", sock, exc_info=True)
|
||||
|
||||
if 'peername' not in transport._extra:
|
||||
try:
|
||||
transport._extra['peername'] = sock.getpeername()
|
||||
except socket.error:
|
||||
# UDP sockets may not have a peer name
|
||||
transport._extra['peername'] = None
|
||||
|
||||
|
||||
class _ProactorBasePipeTransport(transports._FlowControlMixin,
|
||||
transports.BaseTransport):
|
||||
"""Base class for pipe and socket transports."""
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
super().__init__(extra, loop)
|
||||
self._set_extra(sock)
|
||||
self._sock = sock
|
||||
self.set_protocol(protocol)
|
||||
self._server = server
|
||||
self._buffer = None # None or bytearray.
|
||||
self._read_fut = None
|
||||
self._write_fut = None
|
||||
self._pending_write = 0
|
||||
self._conn_lost = 0
|
||||
self._closing = False # Set when close() called.
|
||||
self._called_connection_lost = False
|
||||
self._eof_written = False
|
||||
if self._server is not None:
|
||||
self._server._attach(self)
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
if waiter is not None:
|
||||
# only wake up the waiter when connection_made() has been called
|
||||
self._loop.call_soon(futures._set_result_unless_cancelled,
|
||||
waiter, None)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self._sock is None:
|
||||
info.append('closed')
|
||||
elif self._closing:
|
||||
info.append('closing')
|
||||
if self._sock is not None:
|
||||
info.append(f'fd={self._sock.fileno()}')
|
||||
if self._read_fut is not None:
|
||||
info.append(f'read={self._read_fut!r}')
|
||||
if self._write_fut is not None:
|
||||
info.append(f'write={self._write_fut!r}')
|
||||
if self._buffer:
|
||||
info.append(f'write_bufsize={len(self._buffer)}')
|
||||
if self._eof_written:
|
||||
info.append('EOF written')
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
def _set_extra(self, sock):
|
||||
self._extra['pipe'] = sock
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
self._protocol = protocol
|
||||
|
||||
def get_protocol(self):
|
||||
return self._protocol
|
||||
|
||||
def is_closing(self):
|
||||
return self._closing
|
||||
|
||||
def close(self):
|
||||
if self._closing:
|
||||
return
|
||||
self._closing = True
|
||||
self._conn_lost += 1
|
||||
if not self._buffer and self._write_fut is None:
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
if self._read_fut is not None:
|
||||
self._read_fut.cancel()
|
||||
self._read_fut = None
|
||||
|
||||
def __del__(self, _warn=warnings.warn):
|
||||
if self._sock is not None:
|
||||
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
||||
self._sock.close()
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
||||
try:
|
||||
if isinstance(exc, OSError):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
finally:
|
||||
self._force_close(exc)
|
||||
|
||||
def _force_close(self, exc):
|
||||
if self._empty_waiter is not None and not self._empty_waiter.done():
|
||||
if exc is None:
|
||||
self._empty_waiter.set_result(None)
|
||||
else:
|
||||
self._empty_waiter.set_exception(exc)
|
||||
if self._closing and self._called_connection_lost:
|
||||
return
|
||||
self._closing = True
|
||||
self._conn_lost += 1
|
||||
if self._write_fut:
|
||||
self._write_fut.cancel()
|
||||
self._write_fut = None
|
||||
if self._read_fut:
|
||||
self._read_fut.cancel()
|
||||
self._read_fut = None
|
||||
self._pending_write = 0
|
||||
self._buffer = None
|
||||
self._loop.call_soon(self._call_connection_lost, exc)
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
if self._called_connection_lost:
|
||||
return
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
# XXX If there is a pending overlapped read on the other
|
||||
# end then it may fail with ERROR_NETNAME_DELETED if we
|
||||
# just close our end. First calling shutdown() seems to
|
||||
# cure it, but maybe using DisconnectEx() would be better.
|
||||
if hasattr(self._sock, 'shutdown') and self._sock.fileno() != -1:
|
||||
self._sock.shutdown(socket.SHUT_RDWR)
|
||||
self._sock.close()
|
||||
self._sock = None
|
||||
server = self._server
|
||||
if server is not None:
|
||||
server._detach(self)
|
||||
self._server = None
|
||||
self._called_connection_lost = True
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
size = self._pending_write
|
||||
if self._buffer is not None:
|
||||
size += len(self._buffer)
|
||||
return size
|
||||
|
||||
|
||||
class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
|
||||
transports.ReadTransport):
|
||||
"""Transport for read pipes."""
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None, buffer_size=65536):
|
||||
self._pending_data_length = -1
|
||||
self._paused = True
|
||||
super().__init__(loop, sock, protocol, waiter, extra, server)
|
||||
|
||||
self._data = bytearray(buffer_size)
|
||||
self._loop.call_soon(self._loop_reading)
|
||||
self._paused = False
|
||||
|
||||
def is_reading(self):
|
||||
return not self._paused and not self._closing
|
||||
|
||||
def pause_reading(self):
|
||||
if self._closing or self._paused:
|
||||
return
|
||||
self._paused = True
|
||||
|
||||
# bpo-33694: Don't cancel self._read_fut because cancelling an
|
||||
# overlapped WSASend() loss silently data with the current proactor
|
||||
# implementation.
|
||||
#
|
||||
# If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend()
|
||||
# completed (even if HasOverlappedIoCompleted() returns 0), but
|
||||
# Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND
|
||||
# error. Once the overlapped is ignored, the IOCP loop will ignores the
|
||||
# completion I/O event and so not read the result of the overlapped
|
||||
# WSARecv().
|
||||
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r pauses reading", self)
|
||||
|
||||
def resume_reading(self):
|
||||
if self._closing or not self._paused:
|
||||
return
|
||||
|
||||
self._paused = False
|
||||
if self._read_fut is None:
|
||||
self._loop.call_soon(self._loop_reading, None)
|
||||
|
||||
length = self._pending_data_length
|
||||
self._pending_data_length = -1
|
||||
if length > -1:
|
||||
# Call the protocol method after calling _loop_reading(),
|
||||
# since the protocol can decide to pause reading again.
|
||||
self._loop.call_soon(self._data_received, self._data[:length], length)
|
||||
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r resumes reading", self)
|
||||
|
||||
def _eof_received(self):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r received EOF", self)
|
||||
|
||||
try:
|
||||
keep_open = self._protocol.eof_received()
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self._fatal_error(
|
||||
exc, 'Fatal error: protocol.eof_received() call failed.')
|
||||
return
|
||||
|
||||
if not keep_open:
|
||||
self.close()
|
||||
|
||||
def _data_received(self, data, length):
|
||||
if self._paused:
|
||||
# Don't call any protocol method while reading is paused.
|
||||
# The protocol will be called on resume_reading().
|
||||
assert self._pending_data_length == -1
|
||||
self._pending_data_length = length
|
||||
return
|
||||
|
||||
if length == 0:
|
||||
self._eof_received()
|
||||
return
|
||||
|
||||
if isinstance(self._protocol, protocols.BufferedProtocol):
|
||||
try:
|
||||
protocols._feed_data_to_buffered_proto(self._protocol, data)
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self._fatal_error(exc,
|
||||
'Fatal error: protocol.buffer_updated() '
|
||||
'call failed.')
|
||||
return
|
||||
else:
|
||||
self._protocol.data_received(data)
|
||||
|
||||
def _loop_reading(self, fut=None):
|
||||
length = -1
|
||||
data = None
|
||||
try:
|
||||
if fut is not None:
|
||||
assert self._read_fut is fut or (self._read_fut is None and
|
||||
self._closing)
|
||||
self._read_fut = None
|
||||
if fut.done():
|
||||
# deliver data later in "finally" clause
|
||||
length = fut.result()
|
||||
if length == 0:
|
||||
# we got end-of-file so no need to reschedule a new read
|
||||
return
|
||||
|
||||
# It's a new slice so make it immutable so protocols upstream don't have problems
|
||||
data = bytes(memoryview(self._data)[:length])
|
||||
else:
|
||||
# the future will be replaced by next proactor.recv call
|
||||
fut.cancel()
|
||||
|
||||
if self._closing:
|
||||
# since close() has been called we ignore any read data
|
||||
return
|
||||
|
||||
# bpo-33694: buffer_updated() has currently no fast path because of
|
||||
# a data loss issue caused by overlapped WSASend() cancellation.
|
||||
|
||||
if not self._paused:
|
||||
# reschedule a new read
|
||||
self._read_fut = self._loop._proactor.recv_into(self._sock, self._data)
|
||||
except ConnectionAbortedError as exc:
|
||||
if not self._closing:
|
||||
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
||||
elif self._loop.get_debug():
|
||||
logger.debug("Read error on pipe transport while closing",
|
||||
exc_info=True)
|
||||
except ConnectionResetError as exc:
|
||||
self._force_close(exc)
|
||||
except OSError as exc:
|
||||
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
||||
except exceptions.CancelledError:
|
||||
if not self._closing:
|
||||
raise
|
||||
else:
|
||||
if not self._paused:
|
||||
self._read_fut.add_done_callback(self._loop_reading)
|
||||
finally:
|
||||
if length > -1:
|
||||
self._data_received(data, length)
|
||||
|
||||
|
||||
class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
|
||||
transports.WriteTransport):
|
||||
"""Transport for write pipes."""
|
||||
|
||||
_start_tls_compatible = True
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
super().__init__(*args, **kw)
|
||||
self._empty_waiter = None
|
||||
|
||||
def write(self, data):
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError(
|
||||
f"data argument must be a bytes-like object, "
|
||||
f"not {type(data).__name__}")
|
||||
if self._eof_written:
|
||||
raise RuntimeError('write_eof() already called')
|
||||
if self._empty_waiter is not None:
|
||||
raise RuntimeError('unable to write; sendfile is in progress')
|
||||
|
||||
if not data:
|
||||
return
|
||||
|
||||
if self._conn_lost:
|
||||
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
||||
logger.warning('socket.send() raised exception.')
|
||||
self._conn_lost += 1
|
||||
return
|
||||
|
||||
# Observable states:
|
||||
# 1. IDLE: _write_fut and _buffer both None
|
||||
# 2. WRITING: _write_fut set; _buffer None
|
||||
# 3. BACKED UP: _write_fut set; _buffer a bytearray
|
||||
# We always copy the data, so the caller can't modify it
|
||||
# while we're still waiting for the I/O to happen.
|
||||
if self._write_fut is None: # IDLE -> WRITING
|
||||
assert self._buffer is None
|
||||
# Pass a copy, except if it's already immutable.
|
||||
self._loop_writing(data=bytes(data))
|
||||
elif not self._buffer: # WRITING -> BACKED UP
|
||||
# Make a mutable copy which we can extend.
|
||||
self._buffer = bytearray(data)
|
||||
self._maybe_pause_protocol()
|
||||
else: # BACKED UP
|
||||
# Append to buffer (also copies).
|
||||
self._buffer.extend(data)
|
||||
self._maybe_pause_protocol()
|
||||
|
||||
def _loop_writing(self, f=None, data=None):
|
||||
try:
|
||||
if f is not None and self._write_fut is None and self._closing:
|
||||
# XXX most likely self._force_close() has been called, and
|
||||
# it has set self._write_fut to None.
|
||||
return
|
||||
assert f is self._write_fut
|
||||
self._write_fut = None
|
||||
self._pending_write = 0
|
||||
if f:
|
||||
f.result()
|
||||
if data is None:
|
||||
data = self._buffer
|
||||
self._buffer = None
|
||||
if not data:
|
||||
if self._closing:
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
if self._eof_written:
|
||||
self._sock.shutdown(socket.SHUT_WR)
|
||||
# Now that we've reduced the buffer size, tell the
|
||||
# protocol to resume writing if it was paused. Note that
|
||||
# we do this last since the callback is called immediately
|
||||
# and it may add more data to the buffer (even causing the
|
||||
# protocol to be paused again).
|
||||
self._maybe_resume_protocol()
|
||||
else:
|
||||
self._write_fut = self._loop._proactor.send(self._sock, data)
|
||||
if not self._write_fut.done():
|
||||
assert self._pending_write == 0
|
||||
self._pending_write = len(data)
|
||||
self._write_fut.add_done_callback(self._loop_writing)
|
||||
self._maybe_pause_protocol()
|
||||
else:
|
||||
self._write_fut.add_done_callback(self._loop_writing)
|
||||
if self._empty_waiter is not None and self._write_fut is None:
|
||||
self._empty_waiter.set_result(None)
|
||||
except ConnectionResetError as exc:
|
||||
self._force_close(exc)
|
||||
except OSError as exc:
|
||||
self._fatal_error(exc, 'Fatal write error on pipe transport')
|
||||
|
||||
def can_write_eof(self):
|
||||
return True
|
||||
|
||||
def write_eof(self):
|
||||
self.close()
|
||||
|
||||
def abort(self):
|
||||
self._force_close(None)
|
||||
|
||||
def _make_empty_waiter(self):
|
||||
if self._empty_waiter is not None:
|
||||
raise RuntimeError("Empty waiter is already set")
|
||||
self._empty_waiter = self._loop.create_future()
|
||||
if self._write_fut is None:
|
||||
self._empty_waiter.set_result(None)
|
||||
return self._empty_waiter
|
||||
|
||||
def _reset_empty_waiter(self):
|
||||
self._empty_waiter = None
|
||||
|
||||
|
||||
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
|
||||
def __init__(self, *args, **kw):
|
||||
super().__init__(*args, **kw)
|
||||
self._read_fut = self._loop._proactor.recv(self._sock, 16)
|
||||
self._read_fut.add_done_callback(self._pipe_closed)
|
||||
|
||||
def _pipe_closed(self, fut):
|
||||
if fut.cancelled():
|
||||
# the transport has been closed
|
||||
return
|
||||
assert fut.result() == b''
|
||||
if self._closing:
|
||||
assert self._read_fut is None
|
||||
return
|
||||
assert fut is self._read_fut, (fut, self._read_fut)
|
||||
self._read_fut = None
|
||||
if self._write_fut is not None:
|
||||
self._force_close(BrokenPipeError())
|
||||
else:
|
||||
self.close()
|
||||
|
||||
|
||||
class _ProactorDatagramTransport(_ProactorBasePipeTransport,
|
||||
transports.DatagramTransport):
|
||||
max_size = 256 * 1024
|
||||
def __init__(self, loop, sock, protocol, address=None,
|
||||
waiter=None, extra=None):
|
||||
self._address = address
|
||||
self._empty_waiter = None
|
||||
self._buffer_size = 0
|
||||
# We don't need to call _protocol.connection_made() since our base
|
||||
# constructor does it for us.
|
||||
super().__init__(loop, sock, protocol, waiter=waiter, extra=extra)
|
||||
|
||||
# The base constructor sets _buffer = None, so we set it here
|
||||
self._buffer = collections.deque()
|
||||
self._loop.call_soon(self._loop_reading)
|
||||
|
||||
def _set_extra(self, sock):
|
||||
_set_socket_extra(self, sock)
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
return self._buffer_size
|
||||
|
||||
def abort(self):
|
||||
self._force_close(None)
|
||||
|
||||
def sendto(self, data, addr=None):
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError('data argument must be bytes-like object (%r)',
|
||||
type(data))
|
||||
|
||||
if self._address is not None and addr not in (None, self._address):
|
||||
raise ValueError(
|
||||
f'Invalid address: must be None or {self._address}')
|
||||
|
||||
if self._conn_lost and self._address:
|
||||
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
||||
logger.warning('socket.sendto() raised exception.')
|
||||
self._conn_lost += 1
|
||||
return
|
||||
|
||||
# Ensure that what we buffer is immutable.
|
||||
self._buffer.append((bytes(data), addr))
|
||||
self._buffer_size += len(data) + 8 # include header bytes
|
||||
|
||||
if self._write_fut is None:
|
||||
# No current write operations are active, kick one off
|
||||
self._loop_writing()
|
||||
# else: A write operation is already kicked off
|
||||
|
||||
self._maybe_pause_protocol()
|
||||
|
||||
def _loop_writing(self, fut=None):
|
||||
try:
|
||||
if self._conn_lost:
|
||||
return
|
||||
|
||||
assert fut is self._write_fut
|
||||
self._write_fut = None
|
||||
if fut:
|
||||
# We are in a _loop_writing() done callback, get the result
|
||||
fut.result()
|
||||
|
||||
if not self._buffer or (self._conn_lost and self._address):
|
||||
# The connection has been closed
|
||||
if self._closing:
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
return
|
||||
|
||||
data, addr = self._buffer.popleft()
|
||||
self._buffer_size -= len(data)
|
||||
if self._address is not None:
|
||||
self._write_fut = self._loop._proactor.send(self._sock,
|
||||
data)
|
||||
else:
|
||||
self._write_fut = self._loop._proactor.sendto(self._sock,
|
||||
data,
|
||||
addr=addr)
|
||||
except OSError as exc:
|
||||
self._protocol.error_received(exc)
|
||||
except Exception as exc:
|
||||
self._fatal_error(exc, 'Fatal write error on datagram transport')
|
||||
else:
|
||||
self._write_fut.add_done_callback(self._loop_writing)
|
||||
self._maybe_resume_protocol()
|
||||
|
||||
def _loop_reading(self, fut=None):
|
||||
data = None
|
||||
try:
|
||||
if self._conn_lost:
|
||||
return
|
||||
|
||||
assert self._read_fut is fut or (self._read_fut is None and
|
||||
self._closing)
|
||||
|
||||
self._read_fut = None
|
||||
if fut is not None:
|
||||
res = fut.result()
|
||||
|
||||
if self._closing:
|
||||
# since close() has been called we ignore any read data
|
||||
data = None
|
||||
return
|
||||
|
||||
if self._address is not None:
|
||||
data, addr = res, self._address
|
||||
else:
|
||||
data, addr = res
|
||||
|
||||
if self._conn_lost:
|
||||
return
|
||||
if self._address is not None:
|
||||
self._read_fut = self._loop._proactor.recv(self._sock,
|
||||
self.max_size)
|
||||
else:
|
||||
self._read_fut = self._loop._proactor.recvfrom(self._sock,
|
||||
self.max_size)
|
||||
except OSError as exc:
|
||||
self._protocol.error_received(exc)
|
||||
except exceptions.CancelledError:
|
||||
if not self._closing:
|
||||
raise
|
||||
else:
|
||||
if self._read_fut is not None:
|
||||
self._read_fut.add_done_callback(self._loop_reading)
|
||||
finally:
|
||||
if data:
|
||||
self._protocol.datagram_received(data, addr)
|
||||
|
||||
|
||||
class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
|
||||
_ProactorBaseWritePipeTransport,
|
||||
transports.Transport):
|
||||
"""Transport for duplex pipes."""
|
||||
|
||||
def can_write_eof(self):
|
||||
return False
|
||||
|
||||
def write_eof(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _ProactorSocketTransport(_ProactorReadPipeTransport,
|
||||
_ProactorBaseWritePipeTransport,
|
||||
transports.Transport):
|
||||
"""Transport for connected sockets."""
|
||||
|
||||
_sendfile_compatible = constants._SendfileMode.TRY_NATIVE
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
super().__init__(loop, sock, protocol, waiter, extra, server)
|
||||
base_events._set_nodelay(sock)
|
||||
|
||||
def _set_extra(self, sock):
|
||||
_set_socket_extra(self, sock)
|
||||
|
||||
def can_write_eof(self):
|
||||
return True
|
||||
|
||||
def write_eof(self):
|
||||
if self._closing or self._eof_written:
|
||||
return
|
||||
self._eof_written = True
|
||||
if self._write_fut is None:
|
||||
self._sock.shutdown(socket.SHUT_WR)
|
||||
|
||||
|
||||
class BaseProactorEventLoop(base_events.BaseEventLoop):
|
||||
|
||||
def __init__(self, proactor):
|
||||
super().__init__()
|
||||
logger.debug('Using proactor: %s', proactor.__class__.__name__)
|
||||
self._proactor = proactor
|
||||
self._selector = proactor # convenient alias
|
||||
self._self_reading_future = None
|
||||
self._accept_futures = {} # socket file descriptor => Future
|
||||
proactor.set_loop(self)
|
||||
self._make_self_pipe()
|
||||
if threading.current_thread() is threading.main_thread():
|
||||
# wakeup fd can only be installed to a file descriptor from the main thread
|
||||
signal.set_wakeup_fd(self._csock.fileno())
|
||||
|
||||
def _make_socket_transport(self, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
return _ProactorSocketTransport(self, sock, protocol, waiter,
|
||||
extra, server)
|
||||
|
||||
def _make_ssl_transport(
|
||||
self, rawsock, protocol, sslcontext, waiter=None,
|
||||
*, server_side=False, server_hostname=None,
|
||||
extra=None, server=None,
|
||||
ssl_handshake_timeout=None,
|
||||
ssl_shutdown_timeout=None):
|
||||
ssl_protocol = sslproto.SSLProtocol(
|
||||
self, protocol, sslcontext, waiter,
|
||||
server_side, server_hostname,
|
||||
ssl_handshake_timeout=ssl_handshake_timeout,
|
||||
ssl_shutdown_timeout=ssl_shutdown_timeout)
|
||||
_ProactorSocketTransport(self, rawsock, ssl_protocol,
|
||||
extra=extra, server=server)
|
||||
return ssl_protocol._app_transport
|
||||
|
||||
def _make_datagram_transport(self, sock, protocol,
|
||||
address=None, waiter=None, extra=None):
|
||||
return _ProactorDatagramTransport(self, sock, protocol, address,
|
||||
waiter, extra)
|
||||
|
||||
def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
return _ProactorDuplexPipeTransport(self,
|
||||
sock, protocol, waiter, extra)
|
||||
|
||||
def _make_read_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
|
||||
|
||||
def _make_write_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
# We want connection_lost() to be called when other end closes
|
||||
return _ProactorWritePipeTransport(self,
|
||||
sock, protocol, waiter, extra)
|
||||
|
||||
def close(self):
|
||||
if self.is_running():
|
||||
raise RuntimeError("Cannot close a running event loop")
|
||||
if self.is_closed():
|
||||
return
|
||||
|
||||
if threading.current_thread() is threading.main_thread():
|
||||
signal.set_wakeup_fd(-1)
|
||||
# Call these methods before closing the event loop (before calling
|
||||
# BaseEventLoop.close), because they can schedule callbacks with
|
||||
# call_soon(), which is forbidden when the event loop is closed.
|
||||
self._stop_accept_futures()
|
||||
self._close_self_pipe()
|
||||
self._proactor.close()
|
||||
self._proactor = None
|
||||
self._selector = None
|
||||
|
||||
# Close the event loop
|
||||
super().close()
|
||||
|
||||
async def sock_recv(self, sock, n):
|
||||
return await self._proactor.recv(sock, n)
|
||||
|
||||
async def sock_recv_into(self, sock, buf):
|
||||
return await self._proactor.recv_into(sock, buf)
|
||||
|
||||
async def sock_recvfrom(self, sock, bufsize):
|
||||
return await self._proactor.recvfrom(sock, bufsize)
|
||||
|
||||
async def sock_recvfrom_into(self, sock, buf, nbytes=0):
|
||||
if not nbytes:
|
||||
nbytes = len(buf)
|
||||
|
||||
return await self._proactor.recvfrom_into(sock, buf, nbytes)
|
||||
|
||||
async def sock_sendall(self, sock, data):
|
||||
return await self._proactor.send(sock, data)
|
||||
|
||||
async def sock_sendto(self, sock, data, address):
|
||||
return await self._proactor.sendto(sock, data, 0, address)
|
||||
|
||||
async def sock_connect(self, sock, address):
|
||||
if self._debug and sock.gettimeout() != 0:
|
||||
raise ValueError("the socket must be non-blocking")
|
||||
return await self._proactor.connect(sock, address)
|
||||
|
||||
async def sock_accept(self, sock):
|
||||
return await self._proactor.accept(sock)
|
||||
|
||||
async def _sock_sendfile_native(self, sock, file, offset, count):
|
||||
try:
|
||||
fileno = file.fileno()
|
||||
except (AttributeError, io.UnsupportedOperation) as err:
|
||||
raise exceptions.SendfileNotAvailableError("not a regular file")
|
||||
try:
|
||||
fsize = os.fstat(fileno).st_size
|
||||
except OSError:
|
||||
raise exceptions.SendfileNotAvailableError("not a regular file")
|
||||
blocksize = count if count else fsize
|
||||
if not blocksize:
|
||||
return 0 # empty file
|
||||
|
||||
blocksize = min(blocksize, 0xffff_ffff)
|
||||
end_pos = min(offset + count, fsize) if count else fsize
|
||||
offset = min(offset, fsize)
|
||||
total_sent = 0
|
||||
try:
|
||||
while True:
|
||||
blocksize = min(end_pos - offset, blocksize)
|
||||
if blocksize <= 0:
|
||||
return total_sent
|
||||
await self._proactor.sendfile(sock, file, offset, blocksize)
|
||||
offset += blocksize
|
||||
total_sent += blocksize
|
||||
finally:
|
||||
if total_sent > 0:
|
||||
file.seek(offset)
|
||||
|
||||
async def _sendfile_native(self, transp, file, offset, count):
|
||||
resume_reading = transp.is_reading()
|
||||
transp.pause_reading()
|
||||
await transp._make_empty_waiter()
|
||||
try:
|
||||
return await self.sock_sendfile(transp._sock, file, offset, count,
|
||||
fallback=False)
|
||||
finally:
|
||||
transp._reset_empty_waiter()
|
||||
if resume_reading:
|
||||
transp.resume_reading()
|
||||
|
||||
def _close_self_pipe(self):
|
||||
if self._self_reading_future is not None:
|
||||
self._self_reading_future.cancel()
|
||||
self._self_reading_future = None
|
||||
self._ssock.close()
|
||||
self._ssock = None
|
||||
self._csock.close()
|
||||
self._csock = None
|
||||
self._internal_fds -= 1
|
||||
|
||||
def _make_self_pipe(self):
|
||||
# A self-socket, really. :-)
|
||||
self._ssock, self._csock = socket.socketpair()
|
||||
self._ssock.setblocking(False)
|
||||
self._csock.setblocking(False)
|
||||
self._internal_fds += 1
|
||||
|
||||
def _loop_self_reading(self, f=None):
|
||||
try:
|
||||
if f is not None:
|
||||
f.result() # may raise
|
||||
if self._self_reading_future is not f:
|
||||
# When we scheduled this Future, we assigned it to
|
||||
# _self_reading_future. If it's not there now, something has
|
||||
# tried to cancel the loop while this callback was still in the
|
||||
# queue (see windows_events.ProactorEventLoop.run_forever). In
|
||||
# that case stop here instead of continuing to schedule a new
|
||||
# iteration.
|
||||
return
|
||||
f = self._proactor.recv(self._ssock, 4096)
|
||||
except exceptions.CancelledError:
|
||||
# _close_self_pipe() has been called, stop waiting for data
|
||||
return
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self.call_exception_handler({
|
||||
'message': 'Error on reading from the event loop self pipe',
|
||||
'exception': exc,
|
||||
'loop': self,
|
||||
})
|
||||
else:
|
||||
self._self_reading_future = f
|
||||
f.add_done_callback(self._loop_self_reading)
|
||||
|
||||
def _write_to_self(self):
|
||||
# This may be called from a different thread, possibly after
|
||||
# _close_self_pipe() has been called or even while it is
|
||||
# running. Guard for self._csock being None or closed. When
|
||||
# a socket is closed, send() raises OSError (with errno set to
|
||||
# EBADF, but let's not rely on the exact error code).
|
||||
csock = self._csock
|
||||
if csock is None:
|
||||
return
|
||||
|
||||
try:
|
||||
csock.send(b'\0')
|
||||
except OSError:
|
||||
if self._debug:
|
||||
logger.debug("Fail to write a null byte into the "
|
||||
"self-pipe socket",
|
||||
exc_info=True)
|
||||
|
||||
def _start_serving(self, protocol_factory, sock,
|
||||
sslcontext=None, server=None, backlog=100,
|
||||
ssl_handshake_timeout=None,
|
||||
ssl_shutdown_timeout=None):
|
||||
|
||||
def loop(f=None):
|
||||
try:
|
||||
if f is not None:
|
||||
conn, addr = f.result()
|
||||
if self._debug:
|
||||
logger.debug("%r got a new connection from %r: %r",
|
||||
server, addr, conn)
|
||||
protocol = protocol_factory()
|
||||
if sslcontext is not None:
|
||||
self._make_ssl_transport(
|
||||
conn, protocol, sslcontext, server_side=True,
|
||||
extra={'peername': addr}, server=server,
|
||||
ssl_handshake_timeout=ssl_handshake_timeout,
|
||||
ssl_shutdown_timeout=ssl_shutdown_timeout)
|
||||
else:
|
||||
self._make_socket_transport(
|
||||
conn, protocol,
|
||||
extra={'peername': addr}, server=server)
|
||||
if self.is_closed():
|
||||
return
|
||||
f = self._proactor.accept(sock)
|
||||
except OSError as exc:
|
||||
if sock.fileno() != -1:
|
||||
self.call_exception_handler({
|
||||
'message': 'Accept failed on a socket',
|
||||
'exception': exc,
|
||||
'socket': trsock.TransportSocket(sock),
|
||||
})
|
||||
sock.close()
|
||||
elif self._debug:
|
||||
logger.debug("Accept failed on socket %r",
|
||||
sock, exc_info=True)
|
||||
except exceptions.CancelledError:
|
||||
sock.close()
|
||||
else:
|
||||
self._accept_futures[sock.fileno()] = f
|
||||
f.add_done_callback(loop)
|
||||
|
||||
self.call_soon(loop)
|
||||
|
||||
def _process_events(self, event_list):
|
||||
# Events are processed in the IocpProactor._poll() method
|
||||
pass
|
||||
|
||||
def _stop_accept_futures(self):
|
||||
for future in self._accept_futures.values():
|
||||
future.cancel()
|
||||
self._accept_futures.clear()
|
||||
|
||||
def _stop_serving(self, sock):
|
||||
future = self._accept_futures.pop(sock.fileno(), None)
|
||||
if future:
|
||||
future.cancel()
|
||||
self._proactor._stop_serving(sock)
|
||||
sock.close()
|
||||
216
Dependencies/Python/Lib/asyncio/protocols.py
vendored
Normal file
216
Dependencies/Python/Lib/asyncio/protocols.py
vendored
Normal file
@@ -0,0 +1,216 @@
|
||||
"""Abstract Protocol base classes."""
|
||||
|
||||
__all__ = (
|
||||
'BaseProtocol', 'Protocol', 'DatagramProtocol',
|
||||
'SubprocessProtocol', 'BufferedProtocol',
|
||||
)
|
||||
|
||||
|
||||
class BaseProtocol:
|
||||
"""Common base class for protocol interfaces.
|
||||
|
||||
Usually user implements protocols that derived from BaseProtocol
|
||||
like Protocol or ProcessProtocol.
|
||||
|
||||
The only case when BaseProtocol should be implemented directly is
|
||||
write-only transport like write pipe
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def connection_made(self, transport):
|
||||
"""Called when a connection is made.
|
||||
|
||||
The argument is the transport representing the pipe connection.
|
||||
To receive data, wait for data_received() calls.
|
||||
When the connection is closed, connection_lost() is called.
|
||||
"""
|
||||
|
||||
def connection_lost(self, exc):
|
||||
"""Called when the connection is lost or closed.
|
||||
|
||||
The argument is an exception object or None (the latter
|
||||
meaning a regular EOF is received or the connection was
|
||||
aborted or closed).
|
||||
"""
|
||||
|
||||
def pause_writing(self):
|
||||
"""Called when the transport's buffer goes over the high-water mark.
|
||||
|
||||
Pause and resume calls are paired -- pause_writing() is called
|
||||
once when the buffer goes strictly over the high-water mark
|
||||
(even if subsequent writes increases the buffer size even
|
||||
more), and eventually resume_writing() is called once when the
|
||||
buffer size reaches the low-water mark.
|
||||
|
||||
Note that if the buffer size equals the high-water mark,
|
||||
pause_writing() is not called -- it must go strictly over.
|
||||
Conversely, resume_writing() is called when the buffer size is
|
||||
equal or lower than the low-water mark. These end conditions
|
||||
are important to ensure that things go as expected when either
|
||||
mark is zero.
|
||||
|
||||
NOTE: This is the only Protocol callback that is not called
|
||||
through EventLoop.call_soon() -- if it were, it would have no
|
||||
effect when it's most needed (when the app keeps writing
|
||||
without yielding until pause_writing() is called).
|
||||
"""
|
||||
|
||||
def resume_writing(self):
|
||||
"""Called when the transport's buffer drains below the low-water mark.
|
||||
|
||||
See pause_writing() for details.
|
||||
"""
|
||||
|
||||
|
||||
class Protocol(BaseProtocol):
|
||||
"""Interface for stream protocol.
|
||||
|
||||
The user should implement this interface. They can inherit from
|
||||
this class but don't need to. The implementations here do
|
||||
nothing (they don't raise exceptions).
|
||||
|
||||
When the user wants to requests a transport, they pass a protocol
|
||||
factory to a utility function (e.g., EventLoop.create_connection()).
|
||||
|
||||
When the connection is made successfully, connection_made() is
|
||||
called with a suitable transport object. Then data_received()
|
||||
will be called 0 or more times with data (bytes) received from the
|
||||
transport; finally, connection_lost() will be called exactly once
|
||||
with either an exception object or None as an argument.
|
||||
|
||||
State machine of calls:
|
||||
|
||||
start -> CM [-> DR*] [-> ER?] -> CL -> end
|
||||
|
||||
* CM: connection_made()
|
||||
* DR: data_received()
|
||||
* ER: eof_received()
|
||||
* CL: connection_lost()
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def data_received(self, data):
|
||||
"""Called when some data is received.
|
||||
|
||||
The argument is a bytes object.
|
||||
"""
|
||||
|
||||
def eof_received(self):
|
||||
"""Called when the other end calls write_eof() or equivalent.
|
||||
|
||||
If this returns a false value (including None), the transport
|
||||
will close itself. If it returns a true value, closing the
|
||||
transport is up to the protocol.
|
||||
"""
|
||||
|
||||
|
||||
class BufferedProtocol(BaseProtocol):
|
||||
"""Interface for stream protocol with manual buffer control.
|
||||
|
||||
Event methods, such as `create_server` and `create_connection`,
|
||||
accept factories that return protocols that implement this interface.
|
||||
|
||||
The idea of BufferedProtocol is that it allows to manually allocate
|
||||
and control the receive buffer. Event loops can then use the buffer
|
||||
provided by the protocol to avoid unnecessary data copies. This
|
||||
can result in noticeable performance improvement for protocols that
|
||||
receive big amounts of data. Sophisticated protocols can allocate
|
||||
the buffer only once at creation time.
|
||||
|
||||
State machine of calls:
|
||||
|
||||
start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end
|
||||
|
||||
* CM: connection_made()
|
||||
* GB: get_buffer()
|
||||
* BU: buffer_updated()
|
||||
* ER: eof_received()
|
||||
* CL: connection_lost()
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def get_buffer(self, sizehint):
|
||||
"""Called to allocate a new receive buffer.
|
||||
|
||||
*sizehint* is a recommended minimal size for the returned
|
||||
buffer. When set to -1, the buffer size can be arbitrary.
|
||||
|
||||
Must return an object that implements the
|
||||
:ref:`buffer protocol <bufferobjects>`.
|
||||
It is an error to return a zero-sized buffer.
|
||||
"""
|
||||
|
||||
def buffer_updated(self, nbytes):
|
||||
"""Called when the buffer was updated with the received data.
|
||||
|
||||
*nbytes* is the total number of bytes that were written to
|
||||
the buffer.
|
||||
"""
|
||||
|
||||
def eof_received(self):
|
||||
"""Called when the other end calls write_eof() or equivalent.
|
||||
|
||||
If this returns a false value (including None), the transport
|
||||
will close itself. If it returns a true value, closing the
|
||||
transport is up to the protocol.
|
||||
"""
|
||||
|
||||
|
||||
class DatagramProtocol(BaseProtocol):
|
||||
"""Interface for datagram protocol."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def datagram_received(self, data, addr):
|
||||
"""Called when some datagram is received."""
|
||||
|
||||
def error_received(self, exc):
|
||||
"""Called when a send or receive operation raises an OSError.
|
||||
|
||||
(Other than BlockingIOError or InterruptedError.)
|
||||
"""
|
||||
|
||||
|
||||
class SubprocessProtocol(BaseProtocol):
|
||||
"""Interface for protocol for subprocess calls."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
"""Called when the subprocess writes data into stdout/stderr pipe.
|
||||
|
||||
fd is int file descriptor.
|
||||
data is bytes object.
|
||||
"""
|
||||
|
||||
def pipe_connection_lost(self, fd, exc):
|
||||
"""Called when a file descriptor associated with the child process is
|
||||
closed.
|
||||
|
||||
fd is the int file descriptor that was closed.
|
||||
"""
|
||||
|
||||
def process_exited(self):
|
||||
"""Called when subprocess has exited."""
|
||||
|
||||
|
||||
def _feed_data_to_buffered_proto(proto, data):
|
||||
data_len = len(data)
|
||||
while data_len:
|
||||
buf = proto.get_buffer(data_len)
|
||||
buf_len = len(buf)
|
||||
if not buf_len:
|
||||
raise RuntimeError('get_buffer() returned an empty buffer')
|
||||
|
||||
if buf_len >= data_len:
|
||||
buf[:data_len] = data
|
||||
proto.buffer_updated(data_len)
|
||||
return
|
||||
else:
|
||||
buf[:buf_len] = data[:buf_len]
|
||||
proto.buffer_updated(buf_len)
|
||||
data = data[buf_len:]
|
||||
data_len = len(data)
|
||||
308
Dependencies/Python/Lib/asyncio/queues.py
vendored
Normal file
308
Dependencies/Python/Lib/asyncio/queues.py
vendored
Normal file
@@ -0,0 +1,308 @@
|
||||
__all__ = (
|
||||
'Queue',
|
||||
'PriorityQueue',
|
||||
'LifoQueue',
|
||||
'QueueFull',
|
||||
'QueueEmpty',
|
||||
'QueueShutDown',
|
||||
)
|
||||
|
||||
import collections
|
||||
import heapq
|
||||
from types import GenericAlias
|
||||
|
||||
from . import locks
|
||||
from . import mixins
|
||||
|
||||
|
||||
class QueueEmpty(Exception):
|
||||
"""Raised when Queue.get_nowait() is called on an empty Queue."""
|
||||
pass
|
||||
|
||||
|
||||
class QueueFull(Exception):
|
||||
"""Raised when the Queue.put_nowait() method is called on a full Queue."""
|
||||
pass
|
||||
|
||||
|
||||
class QueueShutDown(Exception):
|
||||
"""Raised when putting on to or getting from a shut-down Queue."""
|
||||
pass
|
||||
|
||||
|
||||
class Queue(mixins._LoopBoundMixin):
|
||||
"""A queue, useful for coordinating producer and consumer coroutines.
|
||||
|
||||
If maxsize is less than or equal to zero, the queue size is infinite. If it
|
||||
is an integer greater than 0, then "await put()" will block when the
|
||||
queue reaches maxsize, until an item is removed by get().
|
||||
|
||||
Unlike the standard library Queue, you can reliably know this Queue's size
|
||||
with qsize(), since your single-threaded asyncio application won't be
|
||||
interrupted between calling qsize() and doing an operation on the Queue.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize=0):
|
||||
self._maxsize = maxsize
|
||||
|
||||
# Futures.
|
||||
self._getters = collections.deque()
|
||||
# Futures.
|
||||
self._putters = collections.deque()
|
||||
self._unfinished_tasks = 0
|
||||
self._finished = locks.Event()
|
||||
self._finished.set()
|
||||
self._init(maxsize)
|
||||
self._is_shutdown = False
|
||||
|
||||
# These three are overridable in subclasses.
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = collections.deque()
|
||||
|
||||
def _get(self):
|
||||
return self._queue.popleft()
|
||||
|
||||
def _put(self, item):
|
||||
self._queue.append(item)
|
||||
|
||||
# End of the overridable methods.
|
||||
|
||||
def _wakeup_next(self, waiters):
|
||||
# Wake up the next waiter (if any) that isn't cancelled.
|
||||
while waiters:
|
||||
waiter = waiters.popleft()
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
break
|
||||
|
||||
def __repr__(self):
|
||||
return f'<{type(self).__name__} at {id(self):#x} {self._format()}>'
|
||||
|
||||
def __str__(self):
|
||||
return f'<{type(self).__name__} {self._format()}>'
|
||||
|
||||
__class_getitem__ = classmethod(GenericAlias)
|
||||
|
||||
def _format(self):
|
||||
result = f'maxsize={self._maxsize!r}'
|
||||
if getattr(self, '_queue', None):
|
||||
result += f' _queue={list(self._queue)!r}'
|
||||
if self._getters:
|
||||
result += f' _getters[{len(self._getters)}]'
|
||||
if self._putters:
|
||||
result += f' _putters[{len(self._putters)}]'
|
||||
if self._unfinished_tasks:
|
||||
result += f' tasks={self._unfinished_tasks}'
|
||||
if self._is_shutdown:
|
||||
result += ' shutdown'
|
||||
return result
|
||||
|
||||
def qsize(self):
|
||||
"""Number of items in the queue."""
|
||||
return len(self._queue)
|
||||
|
||||
@property
|
||||
def maxsize(self):
|
||||
"""Number of items allowed in the queue."""
|
||||
return self._maxsize
|
||||
|
||||
def empty(self):
|
||||
"""Return True if the queue is empty, False otherwise."""
|
||||
return not self._queue
|
||||
|
||||
def full(self):
|
||||
"""Return True if there are maxsize items in the queue.
|
||||
|
||||
Note: if the Queue was initialized with maxsize=0 (the default),
|
||||
then full() is never True.
|
||||
"""
|
||||
if self._maxsize <= 0:
|
||||
return False
|
||||
else:
|
||||
return self.qsize() >= self._maxsize
|
||||
|
||||
async def put(self, item):
|
||||
"""Put an item into the queue.
|
||||
|
||||
Put an item into the queue. If the queue is full, wait until a free
|
||||
slot is available before adding item.
|
||||
|
||||
Raises QueueShutDown if the queue has been shut down.
|
||||
"""
|
||||
while self.full():
|
||||
if self._is_shutdown:
|
||||
raise QueueShutDown
|
||||
putter = self._get_loop().create_future()
|
||||
self._putters.append(putter)
|
||||
try:
|
||||
await putter
|
||||
except:
|
||||
putter.cancel() # Just in case putter is not done yet.
|
||||
try:
|
||||
# Clean self._putters from canceled putters.
|
||||
self._putters.remove(putter)
|
||||
except ValueError:
|
||||
# The putter could be removed from self._putters by a
|
||||
# previous get_nowait call or a shutdown call.
|
||||
pass
|
||||
if not self.full() and not putter.cancelled():
|
||||
# We were woken up by get_nowait(), but can't take
|
||||
# the call. Wake up the next in line.
|
||||
self._wakeup_next(self._putters)
|
||||
raise
|
||||
return self.put_nowait(item)
|
||||
|
||||
def put_nowait(self, item):
|
||||
"""Put an item into the queue without blocking.
|
||||
|
||||
If no free slot is immediately available, raise QueueFull.
|
||||
|
||||
Raises QueueShutDown if the queue has been shut down.
|
||||
"""
|
||||
if self._is_shutdown:
|
||||
raise QueueShutDown
|
||||
if self.full():
|
||||
raise QueueFull
|
||||
self._put(item)
|
||||
self._unfinished_tasks += 1
|
||||
self._finished.clear()
|
||||
self._wakeup_next(self._getters)
|
||||
|
||||
async def get(self):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
If queue is empty, wait until an item is available.
|
||||
|
||||
Raises QueueShutDown if the queue has been shut down and is empty, or
|
||||
if the queue has been shut down immediately.
|
||||
"""
|
||||
while self.empty():
|
||||
if self._is_shutdown and self.empty():
|
||||
raise QueueShutDown
|
||||
getter = self._get_loop().create_future()
|
||||
self._getters.append(getter)
|
||||
try:
|
||||
await getter
|
||||
except:
|
||||
getter.cancel() # Just in case getter is not done yet.
|
||||
try:
|
||||
# Clean self._getters from canceled getters.
|
||||
self._getters.remove(getter)
|
||||
except ValueError:
|
||||
# The getter could be removed from self._getters by a
|
||||
# previous put_nowait call, or a shutdown call.
|
||||
pass
|
||||
if not self.empty() and not getter.cancelled():
|
||||
# We were woken up by put_nowait(), but can't take
|
||||
# the call. Wake up the next in line.
|
||||
self._wakeup_next(self._getters)
|
||||
raise
|
||||
return self.get_nowait()
|
||||
|
||||
def get_nowait(self):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
Return an item if one is immediately available, else raise QueueEmpty.
|
||||
|
||||
Raises QueueShutDown if the queue has been shut down and is empty, or
|
||||
if the queue has been shut down immediately.
|
||||
"""
|
||||
if self.empty():
|
||||
if self._is_shutdown:
|
||||
raise QueueShutDown
|
||||
raise QueueEmpty
|
||||
item = self._get()
|
||||
self._wakeup_next(self._putters)
|
||||
return item
|
||||
|
||||
def task_done(self):
|
||||
"""Indicate that a formerly enqueued task is complete.
|
||||
|
||||
Used by queue consumers. For each get() used to fetch a task,
|
||||
a subsequent call to task_done() tells the queue that the processing
|
||||
on the task is complete.
|
||||
|
||||
If a join() is currently blocking, it will resume when all items have
|
||||
been processed (meaning that a task_done() call was received for every
|
||||
item that had been put() into the queue).
|
||||
|
||||
shutdown(immediate=True) calls task_done() for each remaining item in
|
||||
the queue.
|
||||
|
||||
Raises ValueError if called more times than there were items placed in
|
||||
the queue.
|
||||
"""
|
||||
if self._unfinished_tasks <= 0:
|
||||
raise ValueError('task_done() called too many times')
|
||||
self._unfinished_tasks -= 1
|
||||
if self._unfinished_tasks == 0:
|
||||
self._finished.set()
|
||||
|
||||
async def join(self):
|
||||
"""Block until all items in the queue have been gotten and processed.
|
||||
|
||||
The count of unfinished tasks goes up whenever an item is added to the
|
||||
queue. The count goes down whenever a consumer calls task_done() to
|
||||
indicate that the item was retrieved and all work on it is complete.
|
||||
When the count of unfinished tasks drops to zero, join() unblocks.
|
||||
"""
|
||||
if self._unfinished_tasks > 0:
|
||||
await self._finished.wait()
|
||||
|
||||
def shutdown(self, immediate=False):
|
||||
"""Shut-down the queue, making queue gets and puts raise QueueShutDown.
|
||||
|
||||
By default, gets will only raise once the queue is empty. Set
|
||||
'immediate' to True to make gets raise immediately instead.
|
||||
|
||||
All blocked callers of put() and get() will be unblocked. If
|
||||
'immediate', a task is marked as done for each item remaining in
|
||||
the queue, which may unblock callers of join().
|
||||
"""
|
||||
self._is_shutdown = True
|
||||
if immediate:
|
||||
while not self.empty():
|
||||
self._get()
|
||||
if self._unfinished_tasks > 0:
|
||||
self._unfinished_tasks -= 1
|
||||
if self._unfinished_tasks == 0:
|
||||
self._finished.set()
|
||||
# All getters need to re-check queue-empty to raise ShutDown
|
||||
while self._getters:
|
||||
getter = self._getters.popleft()
|
||||
if not getter.done():
|
||||
getter.set_result(None)
|
||||
while self._putters:
|
||||
putter = self._putters.popleft()
|
||||
if not putter.done():
|
||||
putter.set_result(None)
|
||||
|
||||
|
||||
class PriorityQueue(Queue):
|
||||
"""A subclass of Queue; retrieves entries in priority order (lowest first).
|
||||
|
||||
Entries are typically tuples of the form: (priority number, data).
|
||||
"""
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = []
|
||||
|
||||
def _put(self, item, heappush=heapq.heappush):
|
||||
heappush(self._queue, item)
|
||||
|
||||
def _get(self, heappop=heapq.heappop):
|
||||
return heappop(self._queue)
|
||||
|
||||
|
||||
class LifoQueue(Queue):
|
||||
"""A subclass of Queue that retrieves most recently added entries first."""
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = []
|
||||
|
||||
def _put(self, item):
|
||||
self._queue.append(item)
|
||||
|
||||
def _get(self):
|
||||
return self._queue.pop()
|
||||
216
Dependencies/Python/Lib/asyncio/runners.py
vendored
Normal file
216
Dependencies/Python/Lib/asyncio/runners.py
vendored
Normal file
@@ -0,0 +1,216 @@
|
||||
__all__ = ('Runner', 'run')
|
||||
|
||||
import contextvars
|
||||
import enum
|
||||
import functools
|
||||
import threading
|
||||
import signal
|
||||
from . import coroutines
|
||||
from . import events
|
||||
from . import exceptions
|
||||
from . import tasks
|
||||
from . import constants
|
||||
|
||||
class _State(enum.Enum):
|
||||
CREATED = "created"
|
||||
INITIALIZED = "initialized"
|
||||
CLOSED = "closed"
|
||||
|
||||
|
||||
class Runner:
|
||||
"""A context manager that controls event loop life cycle.
|
||||
|
||||
The context manager always creates a new event loop,
|
||||
allows to run async functions inside it,
|
||||
and properly finalizes the loop at the context manager exit.
|
||||
|
||||
If debug is True, the event loop will be run in debug mode.
|
||||
If loop_factory is passed, it is used for new event loop creation.
|
||||
|
||||
asyncio.run(main(), debug=True)
|
||||
|
||||
is a shortcut for
|
||||
|
||||
with asyncio.Runner(debug=True) as runner:
|
||||
runner.run(main())
|
||||
|
||||
The run() method can be called multiple times within the runner's context.
|
||||
|
||||
This can be useful for interactive console (e.g. IPython),
|
||||
unittest runners, console tools, -- everywhere when async code
|
||||
is called from existing sync framework and where the preferred single
|
||||
asyncio.run() call doesn't work.
|
||||
|
||||
"""
|
||||
|
||||
# Note: the class is final, it is not intended for inheritance.
|
||||
|
||||
def __init__(self, *, debug=None, loop_factory=None):
|
||||
self._state = _State.CREATED
|
||||
self._debug = debug
|
||||
self._loop_factory = loop_factory
|
||||
self._loop = None
|
||||
self._context = None
|
||||
self._interrupt_count = 0
|
||||
self._set_event_loop = False
|
||||
|
||||
def __enter__(self):
|
||||
self._lazy_init()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
"""Shutdown and close event loop."""
|
||||
if self._state is not _State.INITIALIZED:
|
||||
return
|
||||
try:
|
||||
loop = self._loop
|
||||
_cancel_all_tasks(loop)
|
||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
||||
loop.run_until_complete(
|
||||
loop.shutdown_default_executor(constants.THREAD_JOIN_TIMEOUT))
|
||||
finally:
|
||||
if self._set_event_loop:
|
||||
events.set_event_loop(None)
|
||||
loop.close()
|
||||
self._loop = None
|
||||
self._state = _State.CLOSED
|
||||
|
||||
def get_loop(self):
|
||||
"""Return embedded event loop."""
|
||||
self._lazy_init()
|
||||
return self._loop
|
||||
|
||||
def run(self, coro, *, context=None):
|
||||
"""Run a coroutine inside the embedded event loop."""
|
||||
if not coroutines.iscoroutine(coro):
|
||||
raise ValueError("a coroutine was expected, got {!r}".format(coro))
|
||||
|
||||
if events._get_running_loop() is not None:
|
||||
# fail fast with short traceback
|
||||
raise RuntimeError(
|
||||
"Runner.run() cannot be called from a running event loop")
|
||||
|
||||
self._lazy_init()
|
||||
|
||||
if context is None:
|
||||
context = self._context
|
||||
task = self._loop.create_task(coro, context=context)
|
||||
|
||||
if (threading.current_thread() is threading.main_thread()
|
||||
and signal.getsignal(signal.SIGINT) is signal.default_int_handler
|
||||
):
|
||||
sigint_handler = functools.partial(self._on_sigint, main_task=task)
|
||||
try:
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
||||
except ValueError:
|
||||
# `signal.signal` may throw if `threading.main_thread` does
|
||||
# not support signals (e.g. embedded interpreter with signals
|
||||
# not registered - see gh-91880)
|
||||
sigint_handler = None
|
||||
else:
|
||||
sigint_handler = None
|
||||
|
||||
self._interrupt_count = 0
|
||||
try:
|
||||
return self._loop.run_until_complete(task)
|
||||
except exceptions.CancelledError:
|
||||
if self._interrupt_count > 0:
|
||||
uncancel = getattr(task, "uncancel", None)
|
||||
if uncancel is not None and uncancel() == 0:
|
||||
raise KeyboardInterrupt()
|
||||
raise # CancelledError
|
||||
finally:
|
||||
if (sigint_handler is not None
|
||||
and signal.getsignal(signal.SIGINT) is sigint_handler
|
||||
):
|
||||
signal.signal(signal.SIGINT, signal.default_int_handler)
|
||||
|
||||
def _lazy_init(self):
|
||||
if self._state is _State.CLOSED:
|
||||
raise RuntimeError("Runner is closed")
|
||||
if self._state is _State.INITIALIZED:
|
||||
return
|
||||
if self._loop_factory is None:
|
||||
self._loop = events.new_event_loop()
|
||||
if not self._set_event_loop:
|
||||
# Call set_event_loop only once to avoid calling
|
||||
# attach_loop multiple times on child watchers
|
||||
events.set_event_loop(self._loop)
|
||||
self._set_event_loop = True
|
||||
else:
|
||||
self._loop = self._loop_factory()
|
||||
if self._debug is not None:
|
||||
self._loop.set_debug(self._debug)
|
||||
self._context = contextvars.copy_context()
|
||||
self._state = _State.INITIALIZED
|
||||
|
||||
def _on_sigint(self, signum, frame, main_task):
|
||||
self._interrupt_count += 1
|
||||
if self._interrupt_count == 1 and not main_task.done():
|
||||
main_task.cancel()
|
||||
# wakeup loop if it is blocked by select() with long timeout
|
||||
self._loop.call_soon_threadsafe(lambda: None)
|
||||
return
|
||||
raise KeyboardInterrupt()
|
||||
|
||||
|
||||
def run(main, *, debug=None, loop_factory=None):
|
||||
"""Execute the coroutine and return the result.
|
||||
|
||||
This function runs the passed coroutine, taking care of
|
||||
managing the asyncio event loop, finalizing asynchronous
|
||||
generators and closing the default executor.
|
||||
|
||||
This function cannot be called when another asyncio event loop is
|
||||
running in the same thread.
|
||||
|
||||
If debug is True, the event loop will be run in debug mode.
|
||||
If loop_factory is passed, it is used for new event loop creation.
|
||||
|
||||
This function always creates a new event loop and closes it at the end.
|
||||
It should be used as a main entry point for asyncio programs, and should
|
||||
ideally only be called once.
|
||||
|
||||
The executor is given a timeout duration of 5 minutes to shutdown.
|
||||
If the executor hasn't finished within that duration, a warning is
|
||||
emitted and the executor is closed.
|
||||
|
||||
Example:
|
||||
|
||||
async def main():
|
||||
await asyncio.sleep(1)
|
||||
print('hello')
|
||||
|
||||
asyncio.run(main())
|
||||
"""
|
||||
if events._get_running_loop() is not None:
|
||||
# fail fast with short traceback
|
||||
raise RuntimeError(
|
||||
"asyncio.run() cannot be called from a running event loop")
|
||||
|
||||
with Runner(debug=debug, loop_factory=loop_factory) as runner:
|
||||
return runner.run(main)
|
||||
|
||||
|
||||
def _cancel_all_tasks(loop):
|
||||
to_cancel = tasks.all_tasks(loop)
|
||||
if not to_cancel:
|
||||
return
|
||||
|
||||
for task in to_cancel:
|
||||
task.cancel()
|
||||
|
||||
loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True))
|
||||
|
||||
for task in to_cancel:
|
||||
if task.cancelled():
|
||||
continue
|
||||
if task.exception() is not None:
|
||||
loop.call_exception_handler({
|
||||
'message': 'unhandled exception during asyncio.run() shutdown',
|
||||
'exception': task.exception(),
|
||||
'task': task,
|
||||
})
|
||||
1314
Dependencies/Python/Lib/asyncio/selector_events.py
vendored
Normal file
1314
Dependencies/Python/Lib/asyncio/selector_events.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
929
Dependencies/Python/Lib/asyncio/sslproto.py
vendored
Normal file
929
Dependencies/Python/Lib/asyncio/sslproto.py
vendored
Normal file
@@ -0,0 +1,929 @@
|
||||
# Contains code from https://github.com/MagicStack/uvloop/tree/v0.16.0
|
||||
# SPDX-License-Identifier: PSF-2.0 AND (MIT OR Apache-2.0)
|
||||
# SPDX-FileCopyrightText: Copyright (c) 2015-2021 MagicStack Inc. http://magic.io
|
||||
|
||||
import collections
|
||||
import enum
|
||||
import warnings
|
||||
try:
|
||||
import ssl
|
||||
except ImportError: # pragma: no cover
|
||||
ssl = None
|
||||
|
||||
from . import constants
|
||||
from . import exceptions
|
||||
from . import protocols
|
||||
from . import transports
|
||||
from .log import logger
|
||||
|
||||
if ssl is not None:
|
||||
SSLAgainErrors = (ssl.SSLWantReadError, ssl.SSLSyscallError)
|
||||
|
||||
|
||||
class SSLProtocolState(enum.Enum):
|
||||
UNWRAPPED = "UNWRAPPED"
|
||||
DO_HANDSHAKE = "DO_HANDSHAKE"
|
||||
WRAPPED = "WRAPPED"
|
||||
FLUSHING = "FLUSHING"
|
||||
SHUTDOWN = "SHUTDOWN"
|
||||
|
||||
|
||||
class AppProtocolState(enum.Enum):
|
||||
# This tracks the state of app protocol (https://git.io/fj59P):
|
||||
#
|
||||
# INIT -cm-> CON_MADE [-dr*->] [-er-> EOF?] -cl-> CON_LOST
|
||||
#
|
||||
# * cm: connection_made()
|
||||
# * dr: data_received()
|
||||
# * er: eof_received()
|
||||
# * cl: connection_lost()
|
||||
|
||||
STATE_INIT = "STATE_INIT"
|
||||
STATE_CON_MADE = "STATE_CON_MADE"
|
||||
STATE_EOF = "STATE_EOF"
|
||||
STATE_CON_LOST = "STATE_CON_LOST"
|
||||
|
||||
|
||||
def _create_transport_context(server_side, server_hostname):
|
||||
if server_side:
|
||||
raise ValueError('Server side SSL needs a valid SSLContext')
|
||||
|
||||
# Client side may pass ssl=True to use a default
|
||||
# context; in that case the sslcontext passed is None.
|
||||
# The default is secure for client connections.
|
||||
# Python 3.4+: use up-to-date strong settings.
|
||||
sslcontext = ssl.create_default_context()
|
||||
if not server_hostname:
|
||||
sslcontext.check_hostname = False
|
||||
return sslcontext
|
||||
|
||||
|
||||
def add_flowcontrol_defaults(high, low, kb):
|
||||
if high is None:
|
||||
if low is None:
|
||||
hi = kb * 1024
|
||||
else:
|
||||
lo = low
|
||||
hi = 4 * lo
|
||||
else:
|
||||
hi = high
|
||||
if low is None:
|
||||
lo = hi // 4
|
||||
else:
|
||||
lo = low
|
||||
|
||||
if not hi >= lo >= 0:
|
||||
raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
|
||||
(hi, lo))
|
||||
|
||||
return hi, lo
|
||||
|
||||
|
||||
class _SSLProtocolTransport(transports._FlowControlMixin,
|
||||
transports.Transport):
|
||||
|
||||
_start_tls_compatible = True
|
||||
_sendfile_compatible = constants._SendfileMode.FALLBACK
|
||||
|
||||
def __init__(self, loop, ssl_protocol):
|
||||
self._loop = loop
|
||||
self._ssl_protocol = ssl_protocol
|
||||
self._closed = False
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
"""Get optional transport information."""
|
||||
return self._ssl_protocol._get_extra_info(name, default)
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
self._ssl_protocol._set_app_protocol(protocol)
|
||||
|
||||
def get_protocol(self):
|
||||
return self._ssl_protocol._app_protocol
|
||||
|
||||
def is_closing(self):
|
||||
return self._closed or self._ssl_protocol._is_transport_closing()
|
||||
|
||||
def close(self):
|
||||
"""Close the transport.
|
||||
|
||||
Buffered data will be flushed asynchronously. No more data
|
||||
will be received. After all buffered data is flushed, the
|
||||
protocol's connection_lost() method will (eventually) called
|
||||
with None as its argument.
|
||||
"""
|
||||
if not self._closed:
|
||||
self._closed = True
|
||||
self._ssl_protocol._start_shutdown()
|
||||
else:
|
||||
self._ssl_protocol = None
|
||||
|
||||
def __del__(self, _warnings=warnings):
|
||||
if not self._closed:
|
||||
self._closed = True
|
||||
_warnings.warn(
|
||||
"unclosed transport <asyncio._SSLProtocolTransport "
|
||||
"object>", ResourceWarning)
|
||||
|
||||
def is_reading(self):
|
||||
return not self._ssl_protocol._app_reading_paused
|
||||
|
||||
def pause_reading(self):
|
||||
"""Pause the receiving end.
|
||||
|
||||
No data will be passed to the protocol's data_received()
|
||||
method until resume_reading() is called.
|
||||
"""
|
||||
self._ssl_protocol._pause_reading()
|
||||
|
||||
def resume_reading(self):
|
||||
"""Resume the receiving end.
|
||||
|
||||
Data received will once again be passed to the protocol's
|
||||
data_received() method.
|
||||
"""
|
||||
self._ssl_protocol._resume_reading()
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
"""Set the high- and low-water limits for write flow control.
|
||||
|
||||
These two values control when to call the protocol's
|
||||
pause_writing() and resume_writing() methods. If specified,
|
||||
the low-water limit must be less than or equal to the
|
||||
high-water limit. Neither value can be negative.
|
||||
|
||||
The defaults are implementation-specific. If only the
|
||||
high-water limit is given, the low-water limit defaults to an
|
||||
implementation-specific value less than or equal to the
|
||||
high-water limit. Setting high to zero forces low to zero as
|
||||
well, and causes pause_writing() to be called whenever the
|
||||
buffer becomes non-empty. Setting low to zero causes
|
||||
resume_writing() to be called only once the buffer is empty.
|
||||
Use of zero for either limit is generally sub-optimal as it
|
||||
reduces opportunities for doing I/O and computation
|
||||
concurrently.
|
||||
"""
|
||||
self._ssl_protocol._set_write_buffer_limits(high, low)
|
||||
self._ssl_protocol._control_app_writing()
|
||||
|
||||
def get_write_buffer_limits(self):
|
||||
return (self._ssl_protocol._outgoing_low_water,
|
||||
self._ssl_protocol._outgoing_high_water)
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
"""Return the current size of the write buffers."""
|
||||
return self._ssl_protocol._get_write_buffer_size()
|
||||
|
||||
def set_read_buffer_limits(self, high=None, low=None):
|
||||
"""Set the high- and low-water limits for read flow control.
|
||||
|
||||
These two values control when to call the upstream transport's
|
||||
pause_reading() and resume_reading() methods. If specified,
|
||||
the low-water limit must be less than or equal to the
|
||||
high-water limit. Neither value can be negative.
|
||||
|
||||
The defaults are implementation-specific. If only the
|
||||
high-water limit is given, the low-water limit defaults to an
|
||||
implementation-specific value less than or equal to the
|
||||
high-water limit. Setting high to zero forces low to zero as
|
||||
well, and causes pause_reading() to be called whenever the
|
||||
buffer becomes non-empty. Setting low to zero causes
|
||||
resume_reading() to be called only once the buffer is empty.
|
||||
Use of zero for either limit is generally sub-optimal as it
|
||||
reduces opportunities for doing I/O and computation
|
||||
concurrently.
|
||||
"""
|
||||
self._ssl_protocol._set_read_buffer_limits(high, low)
|
||||
self._ssl_protocol._control_ssl_reading()
|
||||
|
||||
def get_read_buffer_limits(self):
|
||||
return (self._ssl_protocol._incoming_low_water,
|
||||
self._ssl_protocol._incoming_high_water)
|
||||
|
||||
def get_read_buffer_size(self):
|
||||
"""Return the current size of the read buffer."""
|
||||
return self._ssl_protocol._get_read_buffer_size()
|
||||
|
||||
@property
|
||||
def _protocol_paused(self):
|
||||
# Required for sendfile fallback pause_writing/resume_writing logic
|
||||
return self._ssl_protocol._app_writing_paused
|
||||
|
||||
def write(self, data):
|
||||
"""Write some data bytes to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
"""
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError(f"data: expecting a bytes-like instance, "
|
||||
f"got {type(data).__name__}")
|
||||
if not data:
|
||||
return
|
||||
self._ssl_protocol._write_appdata((data,))
|
||||
|
||||
def writelines(self, list_of_data):
|
||||
"""Write a list (or any iterable) of data bytes to the transport.
|
||||
|
||||
The default implementation concatenates the arguments and
|
||||
calls write() on the result.
|
||||
"""
|
||||
self._ssl_protocol._write_appdata(list_of_data)
|
||||
|
||||
def write_eof(self):
|
||||
"""Close the write end after flushing buffered data.
|
||||
|
||||
This raises :exc:`NotImplementedError` right now.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def can_write_eof(self):
|
||||
"""Return True if this transport supports write_eof(), False if not."""
|
||||
return False
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
self._force_close(None)
|
||||
|
||||
def _force_close(self, exc):
|
||||
self._closed = True
|
||||
if self._ssl_protocol is not None:
|
||||
self._ssl_protocol._abort(exc)
|
||||
|
||||
def _test__append_write_backlog(self, data):
|
||||
# for test only
|
||||
self._ssl_protocol._write_backlog.append(data)
|
||||
self._ssl_protocol._write_buffer_size += len(data)
|
||||
|
||||
|
||||
class SSLProtocol(protocols.BufferedProtocol):
|
||||
max_size = 256 * 1024 # Buffer size passed to read()
|
||||
|
||||
_handshake_start_time = None
|
||||
_handshake_timeout_handle = None
|
||||
_shutdown_timeout_handle = None
|
||||
|
||||
def __init__(self, loop, app_protocol, sslcontext, waiter,
|
||||
server_side=False, server_hostname=None,
|
||||
call_connection_made=True,
|
||||
ssl_handshake_timeout=None,
|
||||
ssl_shutdown_timeout=None):
|
||||
if ssl is None:
|
||||
raise RuntimeError("stdlib ssl module not available")
|
||||
|
||||
self._ssl_buffer = bytearray(self.max_size)
|
||||
self._ssl_buffer_view = memoryview(self._ssl_buffer)
|
||||
|
||||
if ssl_handshake_timeout is None:
|
||||
ssl_handshake_timeout = constants.SSL_HANDSHAKE_TIMEOUT
|
||||
elif ssl_handshake_timeout <= 0:
|
||||
raise ValueError(
|
||||
f"ssl_handshake_timeout should be a positive number, "
|
||||
f"got {ssl_handshake_timeout}")
|
||||
if ssl_shutdown_timeout is None:
|
||||
ssl_shutdown_timeout = constants.SSL_SHUTDOWN_TIMEOUT
|
||||
elif ssl_shutdown_timeout <= 0:
|
||||
raise ValueError(
|
||||
f"ssl_shutdown_timeout should be a positive number, "
|
||||
f"got {ssl_shutdown_timeout}")
|
||||
|
||||
if not sslcontext:
|
||||
sslcontext = _create_transport_context(
|
||||
server_side, server_hostname)
|
||||
|
||||
self._server_side = server_side
|
||||
if server_hostname and not server_side:
|
||||
self._server_hostname = server_hostname
|
||||
else:
|
||||
self._server_hostname = None
|
||||
self._sslcontext = sslcontext
|
||||
# SSL-specific extra info. More info are set when the handshake
|
||||
# completes.
|
||||
self._extra = dict(sslcontext=sslcontext)
|
||||
|
||||
# App data write buffering
|
||||
self._write_backlog = collections.deque()
|
||||
self._write_buffer_size = 0
|
||||
|
||||
self._waiter = waiter
|
||||
self._loop = loop
|
||||
self._set_app_protocol(app_protocol)
|
||||
self._app_transport = None
|
||||
self._app_transport_created = False
|
||||
# transport, ex: SelectorSocketTransport
|
||||
self._transport = None
|
||||
self._ssl_handshake_timeout = ssl_handshake_timeout
|
||||
self._ssl_shutdown_timeout = ssl_shutdown_timeout
|
||||
# SSL and state machine
|
||||
self._incoming = ssl.MemoryBIO()
|
||||
self._outgoing = ssl.MemoryBIO()
|
||||
self._state = SSLProtocolState.UNWRAPPED
|
||||
self._conn_lost = 0 # Set when connection_lost called
|
||||
if call_connection_made:
|
||||
self._app_state = AppProtocolState.STATE_INIT
|
||||
else:
|
||||
self._app_state = AppProtocolState.STATE_CON_MADE
|
||||
self._sslobj = self._sslcontext.wrap_bio(
|
||||
self._incoming, self._outgoing,
|
||||
server_side=self._server_side,
|
||||
server_hostname=self._server_hostname)
|
||||
|
||||
# Flow Control
|
||||
|
||||
self._ssl_writing_paused = False
|
||||
|
||||
self._app_reading_paused = False
|
||||
|
||||
self._ssl_reading_paused = False
|
||||
self._incoming_high_water = 0
|
||||
self._incoming_low_water = 0
|
||||
self._set_read_buffer_limits()
|
||||
self._eof_received = False
|
||||
|
||||
self._app_writing_paused = False
|
||||
self._outgoing_high_water = 0
|
||||
self._outgoing_low_water = 0
|
||||
self._set_write_buffer_limits()
|
||||
self._get_app_transport()
|
||||
|
||||
def _set_app_protocol(self, app_protocol):
|
||||
self._app_protocol = app_protocol
|
||||
# Make fast hasattr check first
|
||||
if (hasattr(app_protocol, 'get_buffer') and
|
||||
isinstance(app_protocol, protocols.BufferedProtocol)):
|
||||
self._app_protocol_get_buffer = app_protocol.get_buffer
|
||||
self._app_protocol_buffer_updated = app_protocol.buffer_updated
|
||||
self._app_protocol_is_buffer = True
|
||||
else:
|
||||
self._app_protocol_is_buffer = False
|
||||
|
||||
def _wakeup_waiter(self, exc=None):
|
||||
if self._waiter is None:
|
||||
return
|
||||
if not self._waiter.cancelled():
|
||||
if exc is not None:
|
||||
self._waiter.set_exception(exc)
|
||||
else:
|
||||
self._waiter.set_result(None)
|
||||
self._waiter = None
|
||||
|
||||
def _get_app_transport(self):
|
||||
if self._app_transport is None:
|
||||
if self._app_transport_created:
|
||||
raise RuntimeError('Creating _SSLProtocolTransport twice')
|
||||
self._app_transport = _SSLProtocolTransport(self._loop, self)
|
||||
self._app_transport_created = True
|
||||
return self._app_transport
|
||||
|
||||
def _is_transport_closing(self):
|
||||
return self._transport is not None and self._transport.is_closing()
|
||||
|
||||
def connection_made(self, transport):
|
||||
"""Called when the low-level connection is made.
|
||||
|
||||
Start the SSL handshake.
|
||||
"""
|
||||
self._transport = transport
|
||||
self._start_handshake()
|
||||
|
||||
def connection_lost(self, exc):
|
||||
"""Called when the low-level connection is lost or closed.
|
||||
|
||||
The argument is an exception object or None (the latter
|
||||
meaning a regular EOF is received or the connection was
|
||||
aborted or closed).
|
||||
"""
|
||||
self._write_backlog.clear()
|
||||
self._outgoing.read()
|
||||
self._conn_lost += 1
|
||||
|
||||
# Just mark the app transport as closed so that its __dealloc__
|
||||
# doesn't complain.
|
||||
if self._app_transport is not None:
|
||||
self._app_transport._closed = True
|
||||
|
||||
if self._state != SSLProtocolState.DO_HANDSHAKE:
|
||||
if (
|
||||
self._app_state == AppProtocolState.STATE_CON_MADE or
|
||||
self._app_state == AppProtocolState.STATE_EOF
|
||||
):
|
||||
self._app_state = AppProtocolState.STATE_CON_LOST
|
||||
self._loop.call_soon(self._app_protocol.connection_lost, exc)
|
||||
self._set_state(SSLProtocolState.UNWRAPPED)
|
||||
self._transport = None
|
||||
self._app_transport = None
|
||||
self._app_protocol = None
|
||||
self._wakeup_waiter(exc)
|
||||
|
||||
if self._shutdown_timeout_handle:
|
||||
self._shutdown_timeout_handle.cancel()
|
||||
self._shutdown_timeout_handle = None
|
||||
if self._handshake_timeout_handle:
|
||||
self._handshake_timeout_handle.cancel()
|
||||
self._handshake_timeout_handle = None
|
||||
|
||||
def get_buffer(self, n):
|
||||
want = n
|
||||
if want <= 0 or want > self.max_size:
|
||||
want = self.max_size
|
||||
if len(self._ssl_buffer) < want:
|
||||
self._ssl_buffer = bytearray(want)
|
||||
self._ssl_buffer_view = memoryview(self._ssl_buffer)
|
||||
return self._ssl_buffer_view
|
||||
|
||||
def buffer_updated(self, nbytes):
|
||||
self._incoming.write(self._ssl_buffer_view[:nbytes])
|
||||
|
||||
if self._state == SSLProtocolState.DO_HANDSHAKE:
|
||||
self._do_handshake()
|
||||
|
||||
elif self._state == SSLProtocolState.WRAPPED:
|
||||
self._do_read()
|
||||
|
||||
elif self._state == SSLProtocolState.FLUSHING:
|
||||
self._do_flush()
|
||||
|
||||
elif self._state == SSLProtocolState.SHUTDOWN:
|
||||
self._do_shutdown()
|
||||
|
||||
def eof_received(self):
|
||||
"""Called when the other end of the low-level stream
|
||||
is half-closed.
|
||||
|
||||
If this returns a false value (including None), the transport
|
||||
will close itself. If it returns a true value, closing the
|
||||
transport is up to the protocol.
|
||||
"""
|
||||
self._eof_received = True
|
||||
try:
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r received EOF", self)
|
||||
|
||||
if self._state == SSLProtocolState.DO_HANDSHAKE:
|
||||
self._on_handshake_complete(ConnectionResetError)
|
||||
|
||||
elif self._state == SSLProtocolState.WRAPPED:
|
||||
self._set_state(SSLProtocolState.FLUSHING)
|
||||
if self._app_reading_paused:
|
||||
return True
|
||||
else:
|
||||
self._do_flush()
|
||||
|
||||
elif self._state == SSLProtocolState.FLUSHING:
|
||||
self._do_write()
|
||||
self._set_state(SSLProtocolState.SHUTDOWN)
|
||||
self._do_shutdown()
|
||||
|
||||
elif self._state == SSLProtocolState.SHUTDOWN:
|
||||
self._do_shutdown()
|
||||
|
||||
except Exception:
|
||||
self._transport.close()
|
||||
raise
|
||||
|
||||
def _get_extra_info(self, name, default=None):
|
||||
if name in self._extra:
|
||||
return self._extra[name]
|
||||
elif self._transport is not None:
|
||||
return self._transport.get_extra_info(name, default)
|
||||
else:
|
||||
return default
|
||||
|
||||
def _set_state(self, new_state):
|
||||
allowed = False
|
||||
|
||||
if new_state == SSLProtocolState.UNWRAPPED:
|
||||
allowed = True
|
||||
|
||||
elif (
|
||||
self._state == SSLProtocolState.UNWRAPPED and
|
||||
new_state == SSLProtocolState.DO_HANDSHAKE
|
||||
):
|
||||
allowed = True
|
||||
|
||||
elif (
|
||||
self._state == SSLProtocolState.DO_HANDSHAKE and
|
||||
new_state == SSLProtocolState.WRAPPED
|
||||
):
|
||||
allowed = True
|
||||
|
||||
elif (
|
||||
self._state == SSLProtocolState.WRAPPED and
|
||||
new_state == SSLProtocolState.FLUSHING
|
||||
):
|
||||
allowed = True
|
||||
|
||||
elif (
|
||||
self._state == SSLProtocolState.FLUSHING and
|
||||
new_state == SSLProtocolState.SHUTDOWN
|
||||
):
|
||||
allowed = True
|
||||
|
||||
if allowed:
|
||||
self._state = new_state
|
||||
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'cannot switch state from {} to {}'.format(
|
||||
self._state, new_state))
|
||||
|
||||
# Handshake flow
|
||||
|
||||
def _start_handshake(self):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r starts SSL handshake", self)
|
||||
self._handshake_start_time = self._loop.time()
|
||||
else:
|
||||
self._handshake_start_time = None
|
||||
|
||||
self._set_state(SSLProtocolState.DO_HANDSHAKE)
|
||||
|
||||
# start handshake timeout count down
|
||||
self._handshake_timeout_handle = \
|
||||
self._loop.call_later(self._ssl_handshake_timeout,
|
||||
self._check_handshake_timeout)
|
||||
|
||||
self._do_handshake()
|
||||
|
||||
def _check_handshake_timeout(self):
|
||||
if self._state == SSLProtocolState.DO_HANDSHAKE:
|
||||
msg = (
|
||||
f"SSL handshake is taking longer than "
|
||||
f"{self._ssl_handshake_timeout} seconds: "
|
||||
f"aborting the connection"
|
||||
)
|
||||
self._fatal_error(ConnectionAbortedError(msg))
|
||||
|
||||
def _do_handshake(self):
|
||||
try:
|
||||
self._sslobj.do_handshake()
|
||||
except SSLAgainErrors:
|
||||
self._process_outgoing()
|
||||
except ssl.SSLError as exc:
|
||||
self._on_handshake_complete(exc)
|
||||
else:
|
||||
self._on_handshake_complete(None)
|
||||
|
||||
def _on_handshake_complete(self, handshake_exc):
|
||||
if self._handshake_timeout_handle is not None:
|
||||
self._handshake_timeout_handle.cancel()
|
||||
self._handshake_timeout_handle = None
|
||||
|
||||
sslobj = self._sslobj
|
||||
try:
|
||||
if handshake_exc is None:
|
||||
self._set_state(SSLProtocolState.WRAPPED)
|
||||
else:
|
||||
raise handshake_exc
|
||||
|
||||
peercert = sslobj.getpeercert()
|
||||
except Exception as exc:
|
||||
handshake_exc = None
|
||||
self._set_state(SSLProtocolState.UNWRAPPED)
|
||||
if isinstance(exc, ssl.CertificateError):
|
||||
msg = 'SSL handshake failed on verifying the certificate'
|
||||
else:
|
||||
msg = 'SSL handshake failed'
|
||||
self._fatal_error(exc, msg)
|
||||
self._wakeup_waiter(exc)
|
||||
return
|
||||
|
||||
if self._loop.get_debug():
|
||||
dt = self._loop.time() - self._handshake_start_time
|
||||
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
|
||||
|
||||
# Add extra info that becomes available after handshake.
|
||||
self._extra.update(peercert=peercert,
|
||||
cipher=sslobj.cipher(),
|
||||
compression=sslobj.compression(),
|
||||
ssl_object=sslobj)
|
||||
if self._app_state == AppProtocolState.STATE_INIT:
|
||||
self._app_state = AppProtocolState.STATE_CON_MADE
|
||||
self._app_protocol.connection_made(self._get_app_transport())
|
||||
self._wakeup_waiter()
|
||||
self._do_read()
|
||||
|
||||
# Shutdown flow
|
||||
|
||||
def _start_shutdown(self):
|
||||
if (
|
||||
self._state in (
|
||||
SSLProtocolState.FLUSHING,
|
||||
SSLProtocolState.SHUTDOWN,
|
||||
SSLProtocolState.UNWRAPPED
|
||||
)
|
||||
):
|
||||
return
|
||||
if self._app_transport is not None:
|
||||
self._app_transport._closed = True
|
||||
if self._state == SSLProtocolState.DO_HANDSHAKE:
|
||||
self._abort(None)
|
||||
else:
|
||||
self._set_state(SSLProtocolState.FLUSHING)
|
||||
self._shutdown_timeout_handle = self._loop.call_later(
|
||||
self._ssl_shutdown_timeout,
|
||||
self._check_shutdown_timeout
|
||||
)
|
||||
self._do_flush()
|
||||
|
||||
def _check_shutdown_timeout(self):
|
||||
if (
|
||||
self._state in (
|
||||
SSLProtocolState.FLUSHING,
|
||||
SSLProtocolState.SHUTDOWN
|
||||
)
|
||||
):
|
||||
self._transport._force_close(
|
||||
exceptions.TimeoutError('SSL shutdown timed out'))
|
||||
|
||||
def _do_flush(self):
|
||||
self._do_read()
|
||||
self._set_state(SSLProtocolState.SHUTDOWN)
|
||||
self._do_shutdown()
|
||||
|
||||
def _do_shutdown(self):
|
||||
try:
|
||||
if not self._eof_received:
|
||||
self._sslobj.unwrap()
|
||||
except SSLAgainErrors:
|
||||
self._process_outgoing()
|
||||
except ssl.SSLError as exc:
|
||||
self._on_shutdown_complete(exc)
|
||||
else:
|
||||
self._process_outgoing()
|
||||
self._call_eof_received()
|
||||
self._on_shutdown_complete(None)
|
||||
|
||||
def _on_shutdown_complete(self, shutdown_exc):
|
||||
if self._shutdown_timeout_handle is not None:
|
||||
self._shutdown_timeout_handle.cancel()
|
||||
self._shutdown_timeout_handle = None
|
||||
|
||||
if shutdown_exc:
|
||||
self._fatal_error(shutdown_exc)
|
||||
else:
|
||||
self._loop.call_soon(self._transport.close)
|
||||
|
||||
def _abort(self, exc):
|
||||
self._set_state(SSLProtocolState.UNWRAPPED)
|
||||
if self._transport is not None:
|
||||
self._transport._force_close(exc)
|
||||
|
||||
# Outgoing flow
|
||||
|
||||
def _write_appdata(self, list_of_data):
|
||||
if (
|
||||
self._state in (
|
||||
SSLProtocolState.FLUSHING,
|
||||
SSLProtocolState.SHUTDOWN,
|
||||
SSLProtocolState.UNWRAPPED
|
||||
)
|
||||
):
|
||||
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
||||
logger.warning('SSL connection is closed')
|
||||
self._conn_lost += 1
|
||||
return
|
||||
|
||||
for data in list_of_data:
|
||||
self._write_backlog.append(data)
|
||||
self._write_buffer_size += len(data)
|
||||
|
||||
try:
|
||||
if self._state == SSLProtocolState.WRAPPED:
|
||||
self._do_write()
|
||||
|
||||
except Exception as ex:
|
||||
self._fatal_error(ex, 'Fatal error on SSL protocol')
|
||||
|
||||
def _do_write(self):
|
||||
try:
|
||||
while self._write_backlog:
|
||||
data = self._write_backlog[0]
|
||||
count = self._sslobj.write(data)
|
||||
data_len = len(data)
|
||||
if count < data_len:
|
||||
self._write_backlog[0] = data[count:]
|
||||
self._write_buffer_size -= count
|
||||
else:
|
||||
del self._write_backlog[0]
|
||||
self._write_buffer_size -= data_len
|
||||
except SSLAgainErrors:
|
||||
pass
|
||||
self._process_outgoing()
|
||||
|
||||
def _process_outgoing(self):
|
||||
if not self._ssl_writing_paused:
|
||||
data = self._outgoing.read()
|
||||
if len(data):
|
||||
self._transport.write(data)
|
||||
self._control_app_writing()
|
||||
|
||||
# Incoming flow
|
||||
|
||||
def _do_read(self):
|
||||
if (
|
||||
self._state not in (
|
||||
SSLProtocolState.WRAPPED,
|
||||
SSLProtocolState.FLUSHING,
|
||||
)
|
||||
):
|
||||
return
|
||||
try:
|
||||
if not self._app_reading_paused:
|
||||
if self._app_protocol_is_buffer:
|
||||
self._do_read__buffered()
|
||||
else:
|
||||
self._do_read__copied()
|
||||
if self._write_backlog:
|
||||
self._do_write()
|
||||
else:
|
||||
self._process_outgoing()
|
||||
self._control_ssl_reading()
|
||||
except Exception as ex:
|
||||
self._fatal_error(ex, 'Fatal error on SSL protocol')
|
||||
|
||||
def _do_read__buffered(self):
|
||||
offset = 0
|
||||
count = 1
|
||||
|
||||
buf = self._app_protocol_get_buffer(self._get_read_buffer_size())
|
||||
wants = len(buf)
|
||||
|
||||
try:
|
||||
count = self._sslobj.read(wants, buf)
|
||||
|
||||
if count > 0:
|
||||
offset = count
|
||||
while offset < wants:
|
||||
count = self._sslobj.read(wants - offset, buf[offset:])
|
||||
if count > 0:
|
||||
offset += count
|
||||
else:
|
||||
break
|
||||
else:
|
||||
self._loop.call_soon(self._do_read)
|
||||
except SSLAgainErrors:
|
||||
pass
|
||||
if offset > 0:
|
||||
self._app_protocol_buffer_updated(offset)
|
||||
if not count:
|
||||
# close_notify
|
||||
self._call_eof_received()
|
||||
self._start_shutdown()
|
||||
|
||||
def _do_read__copied(self):
|
||||
chunk = b'1'
|
||||
zero = True
|
||||
one = False
|
||||
|
||||
try:
|
||||
while True:
|
||||
chunk = self._sslobj.read(self.max_size)
|
||||
if not chunk:
|
||||
break
|
||||
if zero:
|
||||
zero = False
|
||||
one = True
|
||||
first = chunk
|
||||
elif one:
|
||||
one = False
|
||||
data = [first, chunk]
|
||||
else:
|
||||
data.append(chunk)
|
||||
except SSLAgainErrors:
|
||||
pass
|
||||
if one:
|
||||
self._app_protocol.data_received(first)
|
||||
elif not zero:
|
||||
self._app_protocol.data_received(b''.join(data))
|
||||
if not chunk:
|
||||
# close_notify
|
||||
self._call_eof_received()
|
||||
self._start_shutdown()
|
||||
|
||||
def _call_eof_received(self):
|
||||
try:
|
||||
if self._app_state == AppProtocolState.STATE_CON_MADE:
|
||||
self._app_state = AppProtocolState.STATE_EOF
|
||||
keep_open = self._app_protocol.eof_received()
|
||||
if keep_open:
|
||||
logger.warning('returning true from eof_received() '
|
||||
'has no effect when using ssl')
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except BaseException as ex:
|
||||
self._fatal_error(ex, 'Error calling eof_received()')
|
||||
|
||||
# Flow control for writes from APP socket
|
||||
|
||||
def _control_app_writing(self):
|
||||
size = self._get_write_buffer_size()
|
||||
if size >= self._outgoing_high_water and not self._app_writing_paused:
|
||||
self._app_writing_paused = True
|
||||
try:
|
||||
self._app_protocol.pause_writing()
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'protocol.pause_writing() failed',
|
||||
'exception': exc,
|
||||
'transport': self._app_transport,
|
||||
'protocol': self,
|
||||
})
|
||||
elif size <= self._outgoing_low_water and self._app_writing_paused:
|
||||
self._app_writing_paused = False
|
||||
try:
|
||||
self._app_protocol.resume_writing()
|
||||
except (KeyboardInterrupt, SystemExit):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'protocol.resume_writing() failed',
|
||||
'exception': exc,
|
||||
'transport': self._app_transport,
|
||||
'protocol': self,
|
||||
})
|
||||
|
||||
def _get_write_buffer_size(self):
|
||||
return self._outgoing.pending + self._write_buffer_size
|
||||
|
||||
def _set_write_buffer_limits(self, high=None, low=None):
|
||||
high, low = add_flowcontrol_defaults(
|
||||
high, low, constants.FLOW_CONTROL_HIGH_WATER_SSL_WRITE)
|
||||
self._outgoing_high_water = high
|
||||
self._outgoing_low_water = low
|
||||
|
||||
# Flow control for reads to APP socket
|
||||
|
||||
def _pause_reading(self):
|
||||
self._app_reading_paused = True
|
||||
|
||||
def _resume_reading(self):
|
||||
if self._app_reading_paused:
|
||||
self._app_reading_paused = False
|
||||
|
||||
def resume():
|
||||
if self._state == SSLProtocolState.WRAPPED:
|
||||
self._do_read()
|
||||
elif self._state == SSLProtocolState.FLUSHING:
|
||||
self._do_flush()
|
||||
elif self._state == SSLProtocolState.SHUTDOWN:
|
||||
self._do_shutdown()
|
||||
self._loop.call_soon(resume)
|
||||
|
||||
# Flow control for reads from SSL socket
|
||||
|
||||
def _control_ssl_reading(self):
|
||||
size = self._get_read_buffer_size()
|
||||
if size >= self._incoming_high_water and not self._ssl_reading_paused:
|
||||
self._ssl_reading_paused = True
|
||||
self._transport.pause_reading()
|
||||
elif size <= self._incoming_low_water and self._ssl_reading_paused:
|
||||
self._ssl_reading_paused = False
|
||||
self._transport.resume_reading()
|
||||
|
||||
def _set_read_buffer_limits(self, high=None, low=None):
|
||||
high, low = add_flowcontrol_defaults(
|
||||
high, low, constants.FLOW_CONTROL_HIGH_WATER_SSL_READ)
|
||||
self._incoming_high_water = high
|
||||
self._incoming_low_water = low
|
||||
|
||||
def _get_read_buffer_size(self):
|
||||
return self._incoming.pending
|
||||
|
||||
# Flow control for writes to SSL socket
|
||||
|
||||
def pause_writing(self):
|
||||
"""Called when the low-level transport's buffer goes over
|
||||
the high-water mark.
|
||||
"""
|
||||
assert not self._ssl_writing_paused
|
||||
self._ssl_writing_paused = True
|
||||
|
||||
def resume_writing(self):
|
||||
"""Called when the low-level transport's buffer drains below
|
||||
the low-water mark.
|
||||
"""
|
||||
assert self._ssl_writing_paused
|
||||
self._ssl_writing_paused = False
|
||||
self._process_outgoing()
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on transport'):
|
||||
if self._transport:
|
||||
self._transport._force_close(exc)
|
||||
|
||||
if isinstance(exc, OSError):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
elif not isinstance(exc, exceptions.CancelledError):
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
'transport': self._transport,
|
||||
'protocol': self,
|
||||
})
|
||||
174
Dependencies/Python/Lib/asyncio/staggered.py
vendored
Normal file
174
Dependencies/Python/Lib/asyncio/staggered.py
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
"""Support for running coroutines in parallel with staggered start times."""
|
||||
|
||||
__all__ = 'staggered_race',
|
||||
|
||||
import contextlib
|
||||
|
||||
from . import events
|
||||
from . import exceptions as exceptions_mod
|
||||
from . import locks
|
||||
from . import tasks
|
||||
|
||||
|
||||
async def staggered_race(coro_fns, delay, *, loop=None):
|
||||
"""Run coroutines with staggered start times and take the first to finish.
|
||||
|
||||
This method takes an iterable of coroutine functions. The first one is
|
||||
started immediately. From then on, whenever the immediately preceding one
|
||||
fails (raises an exception), or when *delay* seconds has passed, the next
|
||||
coroutine is started. This continues until one of the coroutines complete
|
||||
successfully, in which case all others are cancelled, or until all
|
||||
coroutines fail.
|
||||
|
||||
The coroutines provided should be well-behaved in the following way:
|
||||
|
||||
* They should only ``return`` if completed successfully.
|
||||
|
||||
* They should always raise an exception if they did not complete
|
||||
successfully. In particular, if they handle cancellation, they should
|
||||
probably reraise, like this::
|
||||
|
||||
try:
|
||||
# do work
|
||||
except asyncio.CancelledError:
|
||||
# undo partially completed work
|
||||
raise
|
||||
|
||||
Args:
|
||||
coro_fns: an iterable of coroutine functions, i.e. callables that
|
||||
return a coroutine object when called. Use ``functools.partial`` or
|
||||
lambdas to pass arguments.
|
||||
|
||||
delay: amount of time, in seconds, between starting coroutines. If
|
||||
``None``, the coroutines will run sequentially.
|
||||
|
||||
loop: the event loop to use.
|
||||
|
||||
Returns:
|
||||
tuple *(winner_result, winner_index, exceptions)* where
|
||||
|
||||
- *winner_result*: the result of the winning coroutine, or ``None``
|
||||
if no coroutines won.
|
||||
|
||||
- *winner_index*: the index of the winning coroutine in
|
||||
``coro_fns``, or ``None`` if no coroutines won. If the winning
|
||||
coroutine may return None on success, *winner_index* can be used
|
||||
to definitively determine whether any coroutine won.
|
||||
|
||||
- *exceptions*: list of exceptions returned by the coroutines.
|
||||
``len(exceptions)`` is equal to the number of coroutines actually
|
||||
started, and the order is the same as in ``coro_fns``. The winning
|
||||
coroutine's entry is ``None``.
|
||||
|
||||
"""
|
||||
# TODO: when we have aiter() and anext(), allow async iterables in coro_fns.
|
||||
loop = loop or events.get_running_loop()
|
||||
enum_coro_fns = enumerate(coro_fns)
|
||||
winner_result = None
|
||||
winner_index = None
|
||||
unhandled_exceptions = []
|
||||
exceptions = []
|
||||
running_tasks = set()
|
||||
on_completed_fut = None
|
||||
|
||||
def task_done(task):
|
||||
running_tasks.discard(task)
|
||||
if (
|
||||
on_completed_fut is not None
|
||||
and not on_completed_fut.done()
|
||||
and not running_tasks
|
||||
):
|
||||
on_completed_fut.set_result(None)
|
||||
|
||||
if task.cancelled():
|
||||
return
|
||||
|
||||
exc = task.exception()
|
||||
if exc is None:
|
||||
return
|
||||
unhandled_exceptions.append(exc)
|
||||
|
||||
async def run_one_coro(ok_to_start, previous_failed) -> None:
|
||||
# in eager tasks this waits for the calling task to append this task
|
||||
# to running_tasks, in regular tasks this wait is a no-op that does
|
||||
# not yield a future. See gh-124309.
|
||||
await ok_to_start.wait()
|
||||
# Wait for the previous task to finish, or for delay seconds
|
||||
if previous_failed is not None:
|
||||
with contextlib.suppress(exceptions_mod.TimeoutError):
|
||||
# Use asyncio.wait_for() instead of asyncio.wait() here, so
|
||||
# that if we get cancelled at this point, Event.wait() is also
|
||||
# cancelled, otherwise there will be a "Task destroyed but it is
|
||||
# pending" later.
|
||||
await tasks.wait_for(previous_failed.wait(), delay)
|
||||
# Get the next coroutine to run
|
||||
try:
|
||||
this_index, coro_fn = next(enum_coro_fns)
|
||||
except StopIteration:
|
||||
return
|
||||
# Start task that will run the next coroutine
|
||||
this_failed = locks.Event()
|
||||
next_ok_to_start = locks.Event()
|
||||
next_task = loop.create_task(run_one_coro(next_ok_to_start, this_failed))
|
||||
running_tasks.add(next_task)
|
||||
next_task.add_done_callback(task_done)
|
||||
# next_task has been appended to running_tasks so next_task is ok to
|
||||
# start.
|
||||
next_ok_to_start.set()
|
||||
# Prepare place to put this coroutine's exceptions if not won
|
||||
exceptions.append(None)
|
||||
assert len(exceptions) == this_index + 1
|
||||
|
||||
try:
|
||||
result = await coro_fn()
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as e:
|
||||
exceptions[this_index] = e
|
||||
this_failed.set() # Kickstart the next coroutine
|
||||
else:
|
||||
# Store winner's results
|
||||
nonlocal winner_index, winner_result
|
||||
assert winner_index is None
|
||||
winner_index = this_index
|
||||
winner_result = result
|
||||
# Cancel all other tasks. We take care to not cancel the current
|
||||
# task as well. If we do so, then since there is no `await` after
|
||||
# here and CancelledError are usually thrown at one, we will
|
||||
# encounter a curious corner case where the current task will end
|
||||
# up as done() == True, cancelled() == False, exception() ==
|
||||
# asyncio.CancelledError. This behavior is specified in
|
||||
# https://bugs.python.org/issue30048
|
||||
current_task = tasks.current_task(loop)
|
||||
for t in running_tasks:
|
||||
if t is not current_task:
|
||||
t.cancel()
|
||||
|
||||
propagate_cancellation_error = None
|
||||
try:
|
||||
ok_to_start = locks.Event()
|
||||
first_task = loop.create_task(run_one_coro(ok_to_start, None))
|
||||
running_tasks.add(first_task)
|
||||
first_task.add_done_callback(task_done)
|
||||
# first_task has been appended to running_tasks so first_task is ok to start.
|
||||
ok_to_start.set()
|
||||
propagate_cancellation_error = None
|
||||
# Make sure no tasks are left running if we leave this function
|
||||
while running_tasks:
|
||||
on_completed_fut = loop.create_future()
|
||||
try:
|
||||
await on_completed_fut
|
||||
except exceptions_mod.CancelledError as ex:
|
||||
propagate_cancellation_error = ex
|
||||
for task in running_tasks:
|
||||
task.cancel(*ex.args)
|
||||
on_completed_fut = None
|
||||
if __debug__ and unhandled_exceptions:
|
||||
# If run_one_coro raises an unhandled exception, it's probably a
|
||||
# programming error, and I want to see it.
|
||||
raise ExceptionGroup("staggered race failed", unhandled_exceptions)
|
||||
if propagate_cancellation_error is not None:
|
||||
raise propagate_cancellation_error
|
||||
return winner_result, winner_index, exceptions
|
||||
finally:
|
||||
del exceptions, propagate_cancellation_error, unhandled_exceptions
|
||||
787
Dependencies/Python/Lib/asyncio/streams.py
vendored
Normal file
787
Dependencies/Python/Lib/asyncio/streams.py
vendored
Normal file
@@ -0,0 +1,787 @@
|
||||
__all__ = (
|
||||
'StreamReader', 'StreamWriter', 'StreamReaderProtocol',
|
||||
'open_connection', 'start_server')
|
||||
|
||||
import collections
|
||||
import socket
|
||||
import sys
|
||||
import warnings
|
||||
import weakref
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
__all__ += ('open_unix_connection', 'start_unix_server')
|
||||
|
||||
from . import coroutines
|
||||
from . import events
|
||||
from . import exceptions
|
||||
from . import format_helpers
|
||||
from . import protocols
|
||||
from .log import logger
|
||||
from .tasks import sleep
|
||||
|
||||
|
||||
_DEFAULT_LIMIT = 2 ** 16 # 64 KiB
|
||||
|
||||
|
||||
async def open_connection(host=None, port=None, *,
|
||||
limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""A wrapper for create_connection() returning a (reader, writer) pair.
|
||||
|
||||
The reader returned is a StreamReader instance; the writer is a
|
||||
StreamWriter instance.
|
||||
|
||||
The arguments are all the usual arguments to create_connection()
|
||||
except protocol_factory; most common are positional host and port,
|
||||
with various optional keyword arguments following.
|
||||
|
||||
Additional optional keyword arguments are loop (to set the event loop
|
||||
instance to use) and limit (to set the buffer limit passed to the
|
||||
StreamReader).
|
||||
|
||||
(If you want to customize the StreamReader and/or
|
||||
StreamReaderProtocol classes, just copy the code -- there's
|
||||
really nothing special here except some convenience.)
|
||||
"""
|
||||
loop = events.get_running_loop()
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, loop=loop)
|
||||
transport, _ = await loop.create_connection(
|
||||
lambda: protocol, host, port, **kwds)
|
||||
writer = StreamWriter(transport, protocol, reader, loop)
|
||||
return reader, writer
|
||||
|
||||
|
||||
async def start_server(client_connected_cb, host=None, port=None, *,
|
||||
limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Start a socket server, call back for each client connected.
|
||||
|
||||
The first parameter, `client_connected_cb`, takes two parameters:
|
||||
client_reader, client_writer. client_reader is a StreamReader
|
||||
object, while client_writer is a StreamWriter object. This
|
||||
parameter can either be a plain callback function or a coroutine;
|
||||
if it is a coroutine, it will be automatically converted into a
|
||||
Task.
|
||||
|
||||
The rest of the arguments are all the usual arguments to
|
||||
loop.create_server() except protocol_factory; most common are
|
||||
positional host and port, with various optional keyword arguments
|
||||
following. The return value is the same as loop.create_server().
|
||||
|
||||
Additional optional keyword argument is limit (to set the buffer
|
||||
limit passed to the StreamReader).
|
||||
|
||||
The return value is the same as loop.create_server(), i.e. a
|
||||
Server object which can be used to stop the service.
|
||||
"""
|
||||
loop = events.get_running_loop()
|
||||
|
||||
def factory():
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
||||
loop=loop)
|
||||
return protocol
|
||||
|
||||
return await loop.create_server(factory, host, port, **kwds)
|
||||
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
# UNIX Domain Sockets are supported on this platform
|
||||
|
||||
async def open_unix_connection(path=None, *,
|
||||
limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
|
||||
loop = events.get_running_loop()
|
||||
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, loop=loop)
|
||||
transport, _ = await loop.create_unix_connection(
|
||||
lambda: protocol, path, **kwds)
|
||||
writer = StreamWriter(transport, protocol, reader, loop)
|
||||
return reader, writer
|
||||
|
||||
async def start_unix_server(client_connected_cb, path=None, *,
|
||||
limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Similar to `start_server` but works with UNIX Domain Sockets."""
|
||||
loop = events.get_running_loop()
|
||||
|
||||
def factory():
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
||||
loop=loop)
|
||||
return protocol
|
||||
|
||||
return await loop.create_unix_server(factory, path, **kwds)
|
||||
|
||||
|
||||
class FlowControlMixin(protocols.Protocol):
|
||||
"""Reusable flow control logic for StreamWriter.drain().
|
||||
|
||||
This implements the protocol methods pause_writing(),
|
||||
resume_writing() and connection_lost(). If the subclass overrides
|
||||
these it must call the super methods.
|
||||
|
||||
StreamWriter.drain() must wait for _drain_helper() coroutine.
|
||||
"""
|
||||
|
||||
def __init__(self, loop=None):
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._paused = False
|
||||
self._drain_waiters = collections.deque()
|
||||
self._connection_lost = False
|
||||
|
||||
def pause_writing(self):
|
||||
assert not self._paused
|
||||
self._paused = True
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r pauses writing", self)
|
||||
|
||||
def resume_writing(self):
|
||||
assert self._paused
|
||||
self._paused = False
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r resumes writing", self)
|
||||
|
||||
for waiter in self._drain_waiters:
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
self._connection_lost = True
|
||||
# Wake up the writer(s) if currently paused.
|
||||
if not self._paused:
|
||||
return
|
||||
|
||||
for waiter in self._drain_waiters:
|
||||
if not waiter.done():
|
||||
if exc is None:
|
||||
waiter.set_result(None)
|
||||
else:
|
||||
waiter.set_exception(exc)
|
||||
|
||||
async def _drain_helper(self):
|
||||
if self._connection_lost:
|
||||
raise ConnectionResetError('Connection lost')
|
||||
if not self._paused:
|
||||
return
|
||||
waiter = self._loop.create_future()
|
||||
self._drain_waiters.append(waiter)
|
||||
try:
|
||||
await waiter
|
||||
finally:
|
||||
self._drain_waiters.remove(waiter)
|
||||
|
||||
def _get_close_waiter(self, stream):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
|
||||
"""Helper class to adapt between Protocol and StreamReader.
|
||||
|
||||
(This is a helper class instead of making StreamReader itself a
|
||||
Protocol subclass, because the StreamReader has other potential
|
||||
uses, and to prevent the user of the StreamReader to accidentally
|
||||
call inappropriate methods of the protocol.)
|
||||
"""
|
||||
|
||||
_source_traceback = None
|
||||
|
||||
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
if stream_reader is not None:
|
||||
self._stream_reader_wr = weakref.ref(stream_reader)
|
||||
self._source_traceback = stream_reader._source_traceback
|
||||
else:
|
||||
self._stream_reader_wr = None
|
||||
if client_connected_cb is not None:
|
||||
# This is a stream created by the `create_server()` function.
|
||||
# Keep a strong reference to the reader until a connection
|
||||
# is established.
|
||||
self._strong_reader = stream_reader
|
||||
self._reject_connection = False
|
||||
self._task = None
|
||||
self._transport = None
|
||||
self._client_connected_cb = client_connected_cb
|
||||
self._over_ssl = False
|
||||
self._closed = self._loop.create_future()
|
||||
|
||||
@property
|
||||
def _stream_reader(self):
|
||||
if self._stream_reader_wr is None:
|
||||
return None
|
||||
return self._stream_reader_wr()
|
||||
|
||||
def _replace_transport(self, transport):
|
||||
loop = self._loop
|
||||
self._transport = transport
|
||||
self._over_ssl = transport.get_extra_info('sslcontext') is not None
|
||||
|
||||
def connection_made(self, transport):
|
||||
if self._reject_connection:
|
||||
context = {
|
||||
'message': ('An open stream was garbage collected prior to '
|
||||
'establishing network connection; '
|
||||
'call "stream.close()" explicitly.')
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
transport.abort()
|
||||
return
|
||||
self._transport = transport
|
||||
reader = self._stream_reader
|
||||
if reader is not None:
|
||||
reader.set_transport(transport)
|
||||
self._over_ssl = transport.get_extra_info('sslcontext') is not None
|
||||
if self._client_connected_cb is not None:
|
||||
writer = StreamWriter(transport, self, reader, self._loop)
|
||||
res = self._client_connected_cb(reader, writer)
|
||||
if coroutines.iscoroutine(res):
|
||||
def callback(task):
|
||||
if task.cancelled():
|
||||
transport.close()
|
||||
return
|
||||
exc = task.exception()
|
||||
if exc is not None:
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'Unhandled exception in client_connected_cb',
|
||||
'exception': exc,
|
||||
'transport': transport,
|
||||
})
|
||||
transport.close()
|
||||
|
||||
self._task = self._loop.create_task(res)
|
||||
self._task.add_done_callback(callback)
|
||||
|
||||
self._strong_reader = None
|
||||
|
||||
def connection_lost(self, exc):
|
||||
reader = self._stream_reader
|
||||
if reader is not None:
|
||||
if exc is None:
|
||||
reader.feed_eof()
|
||||
else:
|
||||
reader.set_exception(exc)
|
||||
if not self._closed.done():
|
||||
if exc is None:
|
||||
self._closed.set_result(None)
|
||||
else:
|
||||
self._closed.set_exception(exc)
|
||||
super().connection_lost(exc)
|
||||
self._stream_reader_wr = None
|
||||
self._stream_writer = None
|
||||
self._task = None
|
||||
self._transport = None
|
||||
|
||||
def data_received(self, data):
|
||||
reader = self._stream_reader
|
||||
if reader is not None:
|
||||
reader.feed_data(data)
|
||||
|
||||
def eof_received(self):
|
||||
reader = self._stream_reader
|
||||
if reader is not None:
|
||||
reader.feed_eof()
|
||||
if self._over_ssl:
|
||||
# Prevent a warning in SSLProtocol.eof_received:
|
||||
# "returning true from eof_received()
|
||||
# has no effect when using ssl"
|
||||
return False
|
||||
return True
|
||||
|
||||
def _get_close_waiter(self, stream):
|
||||
return self._closed
|
||||
|
||||
def __del__(self):
|
||||
# Prevent reports about unhandled exceptions.
|
||||
# Better than self._closed._log_traceback = False hack
|
||||
try:
|
||||
closed = self._closed
|
||||
except AttributeError:
|
||||
pass # failed constructor
|
||||
else:
|
||||
if closed.done() and not closed.cancelled():
|
||||
closed.exception()
|
||||
|
||||
|
||||
class StreamWriter:
|
||||
"""Wraps a Transport.
|
||||
|
||||
This exposes write(), writelines(), [can_]write_eof(),
|
||||
get_extra_info() and close(). It adds drain() which returns an
|
||||
optional Future on which you can wait for flow control. It also
|
||||
adds a transport property which references the Transport
|
||||
directly.
|
||||
"""
|
||||
|
||||
def __init__(self, transport, protocol, reader, loop):
|
||||
self._transport = transport
|
||||
self._protocol = protocol
|
||||
# drain() expects that the reader has an exception() method
|
||||
assert reader is None or isinstance(reader, StreamReader)
|
||||
self._reader = reader
|
||||
self._loop = loop
|
||||
self._complete_fut = self._loop.create_future()
|
||||
self._complete_fut.set_result(None)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, f'transport={self._transport!r}']
|
||||
if self._reader is not None:
|
||||
info.append(f'reader={self._reader!r}')
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
@property
|
||||
def transport(self):
|
||||
return self._transport
|
||||
|
||||
def write(self, data):
|
||||
self._transport.write(data)
|
||||
|
||||
def writelines(self, data):
|
||||
self._transport.writelines(data)
|
||||
|
||||
def write_eof(self):
|
||||
return self._transport.write_eof()
|
||||
|
||||
def can_write_eof(self):
|
||||
return self._transport.can_write_eof()
|
||||
|
||||
def close(self):
|
||||
return self._transport.close()
|
||||
|
||||
def is_closing(self):
|
||||
return self._transport.is_closing()
|
||||
|
||||
async def wait_closed(self):
|
||||
await self._protocol._get_close_waiter(self)
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
return self._transport.get_extra_info(name, default)
|
||||
|
||||
async def drain(self):
|
||||
"""Flush the write buffer.
|
||||
|
||||
The intended use is to write
|
||||
|
||||
w.write(data)
|
||||
await w.drain()
|
||||
"""
|
||||
if self._reader is not None:
|
||||
exc = self._reader.exception()
|
||||
if exc is not None:
|
||||
raise exc
|
||||
if self._transport.is_closing():
|
||||
# Wait for protocol.connection_lost() call
|
||||
# Raise connection closing error if any,
|
||||
# ConnectionResetError otherwise
|
||||
# Yield to the event loop so connection_lost() may be
|
||||
# called. Without this, _drain_helper() would return
|
||||
# immediately, and code that calls
|
||||
# write(...); await drain()
|
||||
# in a loop would never call connection_lost(), so it
|
||||
# would not see an error when the socket is closed.
|
||||
await sleep(0)
|
||||
await self._protocol._drain_helper()
|
||||
|
||||
async def start_tls(self, sslcontext, *,
|
||||
server_hostname=None,
|
||||
ssl_handshake_timeout=None,
|
||||
ssl_shutdown_timeout=None):
|
||||
"""Upgrade an existing stream-based connection to TLS."""
|
||||
server_side = self._protocol._client_connected_cb is not None
|
||||
protocol = self._protocol
|
||||
await self.drain()
|
||||
new_transport = await self._loop.start_tls( # type: ignore
|
||||
self._transport, protocol, sslcontext,
|
||||
server_side=server_side, server_hostname=server_hostname,
|
||||
ssl_handshake_timeout=ssl_handshake_timeout,
|
||||
ssl_shutdown_timeout=ssl_shutdown_timeout)
|
||||
self._transport = new_transport
|
||||
protocol._replace_transport(new_transport)
|
||||
|
||||
def __del__(self, warnings=warnings):
|
||||
if not self._transport.is_closing():
|
||||
if self._loop.is_closed():
|
||||
warnings.warn("loop is closed", ResourceWarning)
|
||||
else:
|
||||
self.close()
|
||||
warnings.warn(f"unclosed {self!r}", ResourceWarning)
|
||||
|
||||
class StreamReader:
|
||||
|
||||
_source_traceback = None
|
||||
|
||||
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
|
||||
# The line length limit is a security feature;
|
||||
# it also doubles as half the buffer limit.
|
||||
|
||||
if limit <= 0:
|
||||
raise ValueError('Limit cannot be <= 0')
|
||||
|
||||
self._limit = limit
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._buffer = bytearray()
|
||||
self._eof = False # Whether we're done.
|
||||
self._waiter = None # A future used by _wait_for_data()
|
||||
self._exception = None
|
||||
self._transport = None
|
||||
self._paused = False
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = format_helpers.extract_stack(
|
||||
sys._getframe(1))
|
||||
|
||||
def __repr__(self):
|
||||
info = ['StreamReader']
|
||||
if self._buffer:
|
||||
info.append(f'{len(self._buffer)} bytes')
|
||||
if self._eof:
|
||||
info.append('eof')
|
||||
if self._limit != _DEFAULT_LIMIT:
|
||||
info.append(f'limit={self._limit}')
|
||||
if self._waiter:
|
||||
info.append(f'waiter={self._waiter!r}')
|
||||
if self._exception:
|
||||
info.append(f'exception={self._exception!r}')
|
||||
if self._transport:
|
||||
info.append(f'transport={self._transport!r}')
|
||||
if self._paused:
|
||||
info.append('paused')
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
def exception(self):
|
||||
return self._exception
|
||||
|
||||
def set_exception(self, exc):
|
||||
self._exception = exc
|
||||
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_exception(exc)
|
||||
|
||||
def _wakeup_waiter(self):
|
||||
"""Wakeup read*() functions waiting for data or EOF."""
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(None)
|
||||
|
||||
def set_transport(self, transport):
|
||||
assert self._transport is None, 'Transport already set'
|
||||
self._transport = transport
|
||||
|
||||
def _maybe_resume_transport(self):
|
||||
if self._paused and len(self._buffer) <= self._limit:
|
||||
self._paused = False
|
||||
self._transport.resume_reading()
|
||||
|
||||
def feed_eof(self):
|
||||
self._eof = True
|
||||
self._wakeup_waiter()
|
||||
|
||||
def at_eof(self):
|
||||
"""Return True if the buffer is empty and 'feed_eof' was called."""
|
||||
return self._eof and not self._buffer
|
||||
|
||||
def feed_data(self, data):
|
||||
assert not self._eof, 'feed_data after feed_eof'
|
||||
|
||||
if not data:
|
||||
return
|
||||
|
||||
self._buffer.extend(data)
|
||||
self._wakeup_waiter()
|
||||
|
||||
if (self._transport is not None and
|
||||
not self._paused and
|
||||
len(self._buffer) > 2 * self._limit):
|
||||
try:
|
||||
self._transport.pause_reading()
|
||||
except NotImplementedError:
|
||||
# The transport can't be paused.
|
||||
# We'll just have to buffer all data.
|
||||
# Forget the transport so we don't keep trying.
|
||||
self._transport = None
|
||||
else:
|
||||
self._paused = True
|
||||
|
||||
async def _wait_for_data(self, func_name):
|
||||
"""Wait until feed_data() or feed_eof() is called.
|
||||
|
||||
If stream was paused, automatically resume it.
|
||||
"""
|
||||
# StreamReader uses a future to link the protocol feed_data() method
|
||||
# to a read coroutine. Running two read coroutines at the same time
|
||||
# would have an unexpected behaviour. It would not possible to know
|
||||
# which coroutine would get the next data.
|
||||
if self._waiter is not None:
|
||||
raise RuntimeError(
|
||||
f'{func_name}() called while another coroutine is '
|
||||
f'already waiting for incoming data')
|
||||
|
||||
assert not self._eof, '_wait_for_data after EOF'
|
||||
|
||||
# Waiting for data while paused will make deadlock, so prevent it.
|
||||
# This is essential for readexactly(n) for case when n > self._limit.
|
||||
if self._paused:
|
||||
self._paused = False
|
||||
self._transport.resume_reading()
|
||||
|
||||
self._waiter = self._loop.create_future()
|
||||
try:
|
||||
await self._waiter
|
||||
finally:
|
||||
self._waiter = None
|
||||
|
||||
async def readline(self):
|
||||
"""Read chunk of data from the stream until newline (b'\n') is found.
|
||||
|
||||
On success, return chunk that ends with newline. If only partial
|
||||
line can be read due to EOF, return incomplete line without
|
||||
terminating newline. When EOF was reached while no bytes read, empty
|
||||
bytes object is returned.
|
||||
|
||||
If limit is reached, ValueError will be raised. In that case, if
|
||||
newline was found, complete line including newline will be removed
|
||||
from internal buffer. Else, internal buffer will be cleared. Limit is
|
||||
compared against part of the line without newline.
|
||||
|
||||
If stream was paused, this function will automatically resume it if
|
||||
needed.
|
||||
"""
|
||||
sep = b'\n'
|
||||
seplen = len(sep)
|
||||
try:
|
||||
line = await self.readuntil(sep)
|
||||
except exceptions.IncompleteReadError as e:
|
||||
return e.partial
|
||||
except exceptions.LimitOverrunError as e:
|
||||
if self._buffer.startswith(sep, e.consumed):
|
||||
del self._buffer[:e.consumed + seplen]
|
||||
else:
|
||||
self._buffer.clear()
|
||||
self._maybe_resume_transport()
|
||||
raise ValueError(e.args[0])
|
||||
return line
|
||||
|
||||
async def readuntil(self, separator=b'\n'):
|
||||
"""Read data from the stream until ``separator`` is found.
|
||||
|
||||
On success, the data and separator will be removed from the
|
||||
internal buffer (consumed). Returned data will include the
|
||||
separator at the end.
|
||||
|
||||
Configured stream limit is used to check result. Limit sets the
|
||||
maximal length of data that can be returned, not counting the
|
||||
separator.
|
||||
|
||||
If an EOF occurs and the complete separator is still not found,
|
||||
an IncompleteReadError exception will be raised, and the internal
|
||||
buffer will be reset. The IncompleteReadError.partial attribute
|
||||
may contain the separator partially.
|
||||
|
||||
If the data cannot be read because of over limit, a
|
||||
LimitOverrunError exception will be raised, and the data
|
||||
will be left in the internal buffer, so it can be read again.
|
||||
|
||||
The ``separator`` may also be a tuple of separators. In this
|
||||
case the return value will be the shortest possible that has any
|
||||
separator as the suffix. For the purposes of LimitOverrunError,
|
||||
the shortest possible separator is considered to be the one that
|
||||
matched.
|
||||
"""
|
||||
if isinstance(separator, tuple):
|
||||
# Makes sure shortest matches wins
|
||||
separator = sorted(separator, key=len)
|
||||
else:
|
||||
separator = [separator]
|
||||
if not separator:
|
||||
raise ValueError('Separator should contain at least one element')
|
||||
min_seplen = len(separator[0])
|
||||
max_seplen = len(separator[-1])
|
||||
if min_seplen == 0:
|
||||
raise ValueError('Separator should be at least one-byte string')
|
||||
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
# Consume whole buffer except last bytes, which length is
|
||||
# one less than max_seplen. Let's check corner cases with
|
||||
# separator[-1]='SEPARATOR':
|
||||
# * we have received almost complete separator (without last
|
||||
# byte). i.e buffer='some textSEPARATO'. In this case we
|
||||
# can safely consume max_seplen - 1 bytes.
|
||||
# * last byte of buffer is first byte of separator, i.e.
|
||||
# buffer='abcdefghijklmnopqrS'. We may safely consume
|
||||
# everything except that last byte, but this require to
|
||||
# analyze bytes of buffer that match partial separator.
|
||||
# This is slow and/or require FSM. For this case our
|
||||
# implementation is not optimal, since require rescanning
|
||||
# of data that is known to not belong to separator. In
|
||||
# real world, separator will not be so long to notice
|
||||
# performance problems. Even when reading MIME-encoded
|
||||
# messages :)
|
||||
|
||||
# `offset` is the number of bytes from the beginning of the buffer
|
||||
# where there is no occurrence of any `separator`.
|
||||
offset = 0
|
||||
|
||||
# Loop until we find a `separator` in the buffer, exceed the buffer size,
|
||||
# or an EOF has happened.
|
||||
while True:
|
||||
buflen = len(self._buffer)
|
||||
|
||||
# Check if we now have enough data in the buffer for shortest
|
||||
# separator to fit.
|
||||
if buflen - offset >= min_seplen:
|
||||
match_start = None
|
||||
match_end = None
|
||||
for sep in separator:
|
||||
isep = self._buffer.find(sep, offset)
|
||||
|
||||
if isep != -1:
|
||||
# `separator` is in the buffer. `match_start` and
|
||||
# `match_end` will be used later to retrieve the
|
||||
# data.
|
||||
end = isep + len(sep)
|
||||
if match_end is None or end < match_end:
|
||||
match_end = end
|
||||
match_start = isep
|
||||
if match_end is not None:
|
||||
break
|
||||
|
||||
# see upper comment for explanation.
|
||||
offset = max(0, buflen + 1 - max_seplen)
|
||||
if offset > self._limit:
|
||||
raise exceptions.LimitOverrunError(
|
||||
'Separator is not found, and chunk exceed the limit',
|
||||
offset)
|
||||
|
||||
# Complete message (with full separator) may be present in buffer
|
||||
# even when EOF flag is set. This may happen when the last chunk
|
||||
# adds data which makes separator be found. That's why we check for
|
||||
# EOF *after* inspecting the buffer.
|
||||
if self._eof:
|
||||
chunk = bytes(self._buffer)
|
||||
self._buffer.clear()
|
||||
raise exceptions.IncompleteReadError(chunk, None)
|
||||
|
||||
# _wait_for_data() will resume reading if stream was paused.
|
||||
await self._wait_for_data('readuntil')
|
||||
|
||||
if match_start > self._limit:
|
||||
raise exceptions.LimitOverrunError(
|
||||
'Separator is found, but chunk is longer than limit', match_start)
|
||||
|
||||
chunk = self._buffer[:match_end]
|
||||
del self._buffer[:match_end]
|
||||
self._maybe_resume_transport()
|
||||
return bytes(chunk)
|
||||
|
||||
async def read(self, n=-1):
|
||||
"""Read up to `n` bytes from the stream.
|
||||
|
||||
If `n` is not provided or set to -1,
|
||||
read until EOF, then return all read bytes.
|
||||
If EOF was received and the internal buffer is empty,
|
||||
return an empty bytes object.
|
||||
|
||||
If `n` is 0, return an empty bytes object immediately.
|
||||
|
||||
If `n` is positive, return at most `n` available bytes
|
||||
as soon as at least 1 byte is available in the internal buffer.
|
||||
If EOF is received before any byte is read, return an empty
|
||||
bytes object.
|
||||
|
||||
Returned value is not limited with limit, configured at stream
|
||||
creation.
|
||||
|
||||
If stream was paused, this function will automatically resume it if
|
||||
needed.
|
||||
"""
|
||||
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
if n == 0:
|
||||
return b''
|
||||
|
||||
if n < 0:
|
||||
# This used to just loop creating a new waiter hoping to
|
||||
# collect everything in self._buffer, but that would
|
||||
# deadlock if the subprocess sends more than self.limit
|
||||
# bytes. So just call self.read(self._limit) until EOF.
|
||||
blocks = []
|
||||
while True:
|
||||
block = await self.read(self._limit)
|
||||
if not block:
|
||||
break
|
||||
blocks.append(block)
|
||||
return b''.join(blocks)
|
||||
|
||||
if not self._buffer and not self._eof:
|
||||
await self._wait_for_data('read')
|
||||
|
||||
# This will work right even if buffer is less than n bytes
|
||||
data = bytes(memoryview(self._buffer)[:n])
|
||||
del self._buffer[:n]
|
||||
|
||||
self._maybe_resume_transport()
|
||||
return data
|
||||
|
||||
async def readexactly(self, n):
|
||||
"""Read exactly `n` bytes.
|
||||
|
||||
Raise an IncompleteReadError if EOF is reached before `n` bytes can be
|
||||
read. The IncompleteReadError.partial attribute of the exception will
|
||||
contain the partial read bytes.
|
||||
|
||||
if n is zero, return empty bytes object.
|
||||
|
||||
Returned value is not limited with limit, configured at stream
|
||||
creation.
|
||||
|
||||
If stream was paused, this function will automatically resume it if
|
||||
needed.
|
||||
"""
|
||||
if n < 0:
|
||||
raise ValueError('readexactly size can not be less than zero')
|
||||
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
if n == 0:
|
||||
return b''
|
||||
|
||||
while len(self._buffer) < n:
|
||||
if self._eof:
|
||||
incomplete = bytes(self._buffer)
|
||||
self._buffer.clear()
|
||||
raise exceptions.IncompleteReadError(incomplete, n)
|
||||
|
||||
await self._wait_for_data('readexactly')
|
||||
|
||||
if len(self._buffer) == n:
|
||||
data = bytes(self._buffer)
|
||||
self._buffer.clear()
|
||||
else:
|
||||
data = bytes(memoryview(self._buffer)[:n])
|
||||
del self._buffer[:n]
|
||||
self._maybe_resume_transport()
|
||||
return data
|
||||
|
||||
def __aiter__(self):
|
||||
return self
|
||||
|
||||
async def __anext__(self):
|
||||
val = await self.readline()
|
||||
if val == b'':
|
||||
raise StopAsyncIteration
|
||||
return val
|
||||
229
Dependencies/Python/Lib/asyncio/subprocess.py
vendored
Normal file
229
Dependencies/Python/Lib/asyncio/subprocess.py
vendored
Normal file
@@ -0,0 +1,229 @@
|
||||
__all__ = 'create_subprocess_exec', 'create_subprocess_shell'
|
||||
|
||||
import subprocess
|
||||
|
||||
from . import events
|
||||
from . import protocols
|
||||
from . import streams
|
||||
from . import tasks
|
||||
from .log import logger
|
||||
|
||||
|
||||
PIPE = subprocess.PIPE
|
||||
STDOUT = subprocess.STDOUT
|
||||
DEVNULL = subprocess.DEVNULL
|
||||
|
||||
|
||||
class SubprocessStreamProtocol(streams.FlowControlMixin,
|
||||
protocols.SubprocessProtocol):
|
||||
"""Like StreamReaderProtocol, but for a subprocess."""
|
||||
|
||||
def __init__(self, limit, loop):
|
||||
super().__init__(loop=loop)
|
||||
self._limit = limit
|
||||
self.stdin = self.stdout = self.stderr = None
|
||||
self._transport = None
|
||||
self._process_exited = False
|
||||
self._pipe_fds = []
|
||||
self._stdin_closed = self._loop.create_future()
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self.stdin is not None:
|
||||
info.append(f'stdin={self.stdin!r}')
|
||||
if self.stdout is not None:
|
||||
info.append(f'stdout={self.stdout!r}')
|
||||
if self.stderr is not None:
|
||||
info.append(f'stderr={self.stderr!r}')
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
def connection_made(self, transport):
|
||||
self._transport = transport
|
||||
|
||||
stdout_transport = transport.get_pipe_transport(1)
|
||||
if stdout_transport is not None:
|
||||
self.stdout = streams.StreamReader(limit=self._limit,
|
||||
loop=self._loop)
|
||||
self.stdout.set_transport(stdout_transport)
|
||||
self._pipe_fds.append(1)
|
||||
|
||||
stderr_transport = transport.get_pipe_transport(2)
|
||||
if stderr_transport is not None:
|
||||
self.stderr = streams.StreamReader(limit=self._limit,
|
||||
loop=self._loop)
|
||||
self.stderr.set_transport(stderr_transport)
|
||||
self._pipe_fds.append(2)
|
||||
|
||||
stdin_transport = transport.get_pipe_transport(0)
|
||||
if stdin_transport is not None:
|
||||
self.stdin = streams.StreamWriter(stdin_transport,
|
||||
protocol=self,
|
||||
reader=None,
|
||||
loop=self._loop)
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if fd == 1:
|
||||
reader = self.stdout
|
||||
elif fd == 2:
|
||||
reader = self.stderr
|
||||
else:
|
||||
reader = None
|
||||
if reader is not None:
|
||||
reader.feed_data(data)
|
||||
|
||||
def pipe_connection_lost(self, fd, exc):
|
||||
if fd == 0:
|
||||
pipe = self.stdin
|
||||
if pipe is not None:
|
||||
pipe.close()
|
||||
self.connection_lost(exc)
|
||||
if exc is None:
|
||||
self._stdin_closed.set_result(None)
|
||||
else:
|
||||
self._stdin_closed.set_exception(exc)
|
||||
# Since calling `wait_closed()` is not mandatory,
|
||||
# we shouldn't log the traceback if this is not awaited.
|
||||
self._stdin_closed._log_traceback = False
|
||||
return
|
||||
if fd == 1:
|
||||
reader = self.stdout
|
||||
elif fd == 2:
|
||||
reader = self.stderr
|
||||
else:
|
||||
reader = None
|
||||
if reader is not None:
|
||||
if exc is None:
|
||||
reader.feed_eof()
|
||||
else:
|
||||
reader.set_exception(exc)
|
||||
|
||||
if fd in self._pipe_fds:
|
||||
self._pipe_fds.remove(fd)
|
||||
self._maybe_close_transport()
|
||||
|
||||
def process_exited(self):
|
||||
self._process_exited = True
|
||||
self._maybe_close_transport()
|
||||
|
||||
def _maybe_close_transport(self):
|
||||
if len(self._pipe_fds) == 0 and self._process_exited:
|
||||
self._transport.close()
|
||||
self._transport = None
|
||||
|
||||
def _get_close_waiter(self, stream):
|
||||
if stream is self.stdin:
|
||||
return self._stdin_closed
|
||||
|
||||
|
||||
class Process:
|
||||
def __init__(self, transport, protocol, loop):
|
||||
self._transport = transport
|
||||
self._protocol = protocol
|
||||
self._loop = loop
|
||||
self.stdin = protocol.stdin
|
||||
self.stdout = protocol.stdout
|
||||
self.stderr = protocol.stderr
|
||||
self.pid = transport.get_pid()
|
||||
|
||||
def __repr__(self):
|
||||
return f'<{self.__class__.__name__} {self.pid}>'
|
||||
|
||||
@property
|
||||
def returncode(self):
|
||||
return self._transport.get_returncode()
|
||||
|
||||
async def wait(self):
|
||||
"""Wait until the process exit and return the process return code."""
|
||||
return await self._transport._wait()
|
||||
|
||||
def send_signal(self, signal):
|
||||
self._transport.send_signal(signal)
|
||||
|
||||
def terminate(self):
|
||||
self._transport.terminate()
|
||||
|
||||
def kill(self):
|
||||
self._transport.kill()
|
||||
|
||||
async def _feed_stdin(self, input):
|
||||
debug = self._loop.get_debug()
|
||||
try:
|
||||
if input is not None:
|
||||
self.stdin.write(input)
|
||||
if debug:
|
||||
logger.debug(
|
||||
'%r communicate: feed stdin (%s bytes)', self, len(input))
|
||||
|
||||
await self.stdin.drain()
|
||||
except (BrokenPipeError, ConnectionResetError) as exc:
|
||||
# communicate() ignores BrokenPipeError and ConnectionResetError.
|
||||
# write() and drain() can raise these exceptions.
|
||||
if debug:
|
||||
logger.debug('%r communicate: stdin got %r', self, exc)
|
||||
|
||||
if debug:
|
||||
logger.debug('%r communicate: close stdin', self)
|
||||
self.stdin.close()
|
||||
|
||||
async def _noop(self):
|
||||
return None
|
||||
|
||||
async def _read_stream(self, fd):
|
||||
transport = self._transport.get_pipe_transport(fd)
|
||||
if fd == 2:
|
||||
stream = self.stderr
|
||||
else:
|
||||
assert fd == 1
|
||||
stream = self.stdout
|
||||
if self._loop.get_debug():
|
||||
name = 'stdout' if fd == 1 else 'stderr'
|
||||
logger.debug('%r communicate: read %s', self, name)
|
||||
output = await stream.read()
|
||||
if self._loop.get_debug():
|
||||
name = 'stdout' if fd == 1 else 'stderr'
|
||||
logger.debug('%r communicate: close %s', self, name)
|
||||
transport.close()
|
||||
return output
|
||||
|
||||
async def communicate(self, input=None):
|
||||
if self.stdin is not None:
|
||||
stdin = self._feed_stdin(input)
|
||||
else:
|
||||
stdin = self._noop()
|
||||
if self.stdout is not None:
|
||||
stdout = self._read_stream(1)
|
||||
else:
|
||||
stdout = self._noop()
|
||||
if self.stderr is not None:
|
||||
stderr = self._read_stream(2)
|
||||
else:
|
||||
stderr = self._noop()
|
||||
stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr)
|
||||
await self.wait()
|
||||
return (stdout, stderr)
|
||||
|
||||
|
||||
async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
|
||||
limit=streams._DEFAULT_LIMIT, **kwds):
|
||||
loop = events.get_running_loop()
|
||||
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
||||
loop=loop)
|
||||
transport, protocol = await loop.subprocess_shell(
|
||||
protocol_factory,
|
||||
cmd, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds)
|
||||
return Process(transport, protocol, loop)
|
||||
|
||||
|
||||
async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
|
||||
stderr=None, limit=streams._DEFAULT_LIMIT,
|
||||
**kwds):
|
||||
loop = events.get_running_loop()
|
||||
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
||||
loop=loop)
|
||||
transport, protocol = await loop.subprocess_exec(
|
||||
protocol_factory,
|
||||
program, *args,
|
||||
stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds)
|
||||
return Process(transport, protocol, loop)
|
||||
278
Dependencies/Python/Lib/asyncio/taskgroups.py
vendored
Normal file
278
Dependencies/Python/Lib/asyncio/taskgroups.py
vendored
Normal file
@@ -0,0 +1,278 @@
|
||||
# Adapted with permission from the EdgeDB project;
|
||||
# license: PSFL.
|
||||
|
||||
|
||||
__all__ = ("TaskGroup",)
|
||||
|
||||
from . import events
|
||||
from . import exceptions
|
||||
from . import tasks
|
||||
|
||||
|
||||
class TaskGroup:
|
||||
"""Asynchronous context manager for managing groups of tasks.
|
||||
|
||||
Example use:
|
||||
|
||||
async with asyncio.TaskGroup() as group:
|
||||
task1 = group.create_task(some_coroutine(...))
|
||||
task2 = group.create_task(other_coroutine(...))
|
||||
print("Both tasks have completed now.")
|
||||
|
||||
All tasks are awaited when the context manager exits.
|
||||
|
||||
Any exceptions other than `asyncio.CancelledError` raised within
|
||||
a task will cancel all remaining tasks and wait for them to exit.
|
||||
The exceptions are then combined and raised as an `ExceptionGroup`.
|
||||
"""
|
||||
def __init__(self):
|
||||
self._entered = False
|
||||
self._exiting = False
|
||||
self._aborting = False
|
||||
self._loop = None
|
||||
self._parent_task = None
|
||||
self._parent_cancel_requested = False
|
||||
self._tasks = set()
|
||||
self._errors = []
|
||||
self._base_error = None
|
||||
self._on_completed_fut = None
|
||||
|
||||
def __repr__(self):
|
||||
info = ['']
|
||||
if self._tasks:
|
||||
info.append(f'tasks={len(self._tasks)}')
|
||||
if self._errors:
|
||||
info.append(f'errors={len(self._errors)}')
|
||||
if self._aborting:
|
||||
info.append('cancelling')
|
||||
elif self._entered:
|
||||
info.append('entered')
|
||||
|
||||
info_str = ' '.join(info)
|
||||
return f'<TaskGroup{info_str}>'
|
||||
|
||||
async def __aenter__(self):
|
||||
if self._entered:
|
||||
raise RuntimeError(
|
||||
f"TaskGroup {self!r} has already been entered")
|
||||
if self._loop is None:
|
||||
self._loop = events.get_running_loop()
|
||||
self._parent_task = tasks.current_task(self._loop)
|
||||
if self._parent_task is None:
|
||||
raise RuntimeError(
|
||||
f'TaskGroup {self!r} cannot determine the parent task')
|
||||
self._entered = True
|
||||
|
||||
return self
|
||||
|
||||
async def __aexit__(self, et, exc, tb):
|
||||
tb = None
|
||||
try:
|
||||
return await self._aexit(et, exc)
|
||||
finally:
|
||||
# Exceptions are heavy objects that can have object
|
||||
# cycles (bad for GC); let's not keep a reference to
|
||||
# a bunch of them. It would be nicer to use a try/finally
|
||||
# in __aexit__ directly but that introduced some diff noise
|
||||
self._parent_task = None
|
||||
self._errors = None
|
||||
self._base_error = None
|
||||
exc = None
|
||||
|
||||
async def _aexit(self, et, exc):
|
||||
self._exiting = True
|
||||
|
||||
if (exc is not None and
|
||||
self._is_base_error(exc) and
|
||||
self._base_error is None):
|
||||
self._base_error = exc
|
||||
|
||||
if et is not None and issubclass(et, exceptions.CancelledError):
|
||||
propagate_cancellation_error = exc
|
||||
else:
|
||||
propagate_cancellation_error = None
|
||||
|
||||
if et is not None:
|
||||
if not self._aborting:
|
||||
# Our parent task is being cancelled:
|
||||
#
|
||||
# async with TaskGroup() as g:
|
||||
# g.create_task(...)
|
||||
# await ... # <- CancelledError
|
||||
#
|
||||
# or there's an exception in "async with":
|
||||
#
|
||||
# async with TaskGroup() as g:
|
||||
# g.create_task(...)
|
||||
# 1 / 0
|
||||
#
|
||||
self._abort()
|
||||
|
||||
# We use while-loop here because "self._on_completed_fut"
|
||||
# can be cancelled multiple times if our parent task
|
||||
# is being cancelled repeatedly (or even once, when
|
||||
# our own cancellation is already in progress)
|
||||
while self._tasks:
|
||||
if self._on_completed_fut is None:
|
||||
self._on_completed_fut = self._loop.create_future()
|
||||
|
||||
try:
|
||||
await self._on_completed_fut
|
||||
except exceptions.CancelledError as ex:
|
||||
if not self._aborting:
|
||||
# Our parent task is being cancelled:
|
||||
#
|
||||
# async def wrapper():
|
||||
# async with TaskGroup() as g:
|
||||
# g.create_task(foo)
|
||||
#
|
||||
# "wrapper" is being cancelled while "foo" is
|
||||
# still running.
|
||||
propagate_cancellation_error = ex
|
||||
self._abort()
|
||||
|
||||
self._on_completed_fut = None
|
||||
|
||||
assert not self._tasks
|
||||
|
||||
if self._base_error is not None:
|
||||
try:
|
||||
raise self._base_error
|
||||
finally:
|
||||
exc = None
|
||||
|
||||
if self._parent_cancel_requested:
|
||||
# If this flag is set we *must* call uncancel().
|
||||
if self._parent_task.uncancel() == 0:
|
||||
# If there are no pending cancellations left,
|
||||
# don't propagate CancelledError.
|
||||
propagate_cancellation_error = None
|
||||
|
||||
# Propagate CancelledError if there is one, except if there
|
||||
# are other errors -- those have priority.
|
||||
try:
|
||||
if propagate_cancellation_error is not None and not self._errors:
|
||||
try:
|
||||
raise propagate_cancellation_error
|
||||
finally:
|
||||
exc = None
|
||||
finally:
|
||||
propagate_cancellation_error = None
|
||||
|
||||
if et is not None and not issubclass(et, exceptions.CancelledError):
|
||||
self._errors.append(exc)
|
||||
|
||||
if self._errors:
|
||||
# If the parent task is being cancelled from the outside
|
||||
# of the taskgroup, un-cancel and re-cancel the parent task,
|
||||
# which will keep the cancel count stable.
|
||||
if self._parent_task.cancelling():
|
||||
self._parent_task.uncancel()
|
||||
self._parent_task.cancel()
|
||||
try:
|
||||
raise BaseExceptionGroup(
|
||||
'unhandled errors in a TaskGroup',
|
||||
self._errors,
|
||||
) from None
|
||||
finally:
|
||||
exc = None
|
||||
|
||||
|
||||
def create_task(self, coro, *, name=None, context=None):
|
||||
"""Create a new task in this group and return it.
|
||||
|
||||
Similar to `asyncio.create_task`.
|
||||
"""
|
||||
if not self._entered:
|
||||
coro.close()
|
||||
raise RuntimeError(f"TaskGroup {self!r} has not been entered")
|
||||
if self._exiting and not self._tasks:
|
||||
coro.close()
|
||||
raise RuntimeError(f"TaskGroup {self!r} is finished")
|
||||
if self._aborting:
|
||||
coro.close()
|
||||
raise RuntimeError(f"TaskGroup {self!r} is shutting down")
|
||||
if context is None:
|
||||
task = self._loop.create_task(coro, name=name)
|
||||
else:
|
||||
task = self._loop.create_task(coro, name=name, context=context)
|
||||
|
||||
# Always schedule the done callback even if the task is
|
||||
# already done (e.g. if the coro was able to complete eagerly),
|
||||
# otherwise if the task completes with an exception then it will cancel
|
||||
# the current task too early. gh-128550, gh-128588
|
||||
self._tasks.add(task)
|
||||
task.add_done_callback(self._on_task_done)
|
||||
try:
|
||||
return task
|
||||
finally:
|
||||
# gh-128552: prevent a refcycle of
|
||||
# task.exception().__traceback__->TaskGroup.create_task->task
|
||||
del task
|
||||
|
||||
# Since Python 3.8 Tasks propagate all exceptions correctly,
|
||||
# except for KeyboardInterrupt and SystemExit which are
|
||||
# still considered special.
|
||||
|
||||
def _is_base_error(self, exc: BaseException) -> bool:
|
||||
assert isinstance(exc, BaseException)
|
||||
return isinstance(exc, (SystemExit, KeyboardInterrupt))
|
||||
|
||||
def _abort(self):
|
||||
self._aborting = True
|
||||
|
||||
for t in self._tasks:
|
||||
if not t.done():
|
||||
t.cancel()
|
||||
|
||||
def _on_task_done(self, task):
|
||||
self._tasks.discard(task)
|
||||
|
||||
if self._on_completed_fut is not None and not self._tasks:
|
||||
if not self._on_completed_fut.done():
|
||||
self._on_completed_fut.set_result(True)
|
||||
|
||||
if task.cancelled():
|
||||
return
|
||||
|
||||
exc = task.exception()
|
||||
if exc is None:
|
||||
return
|
||||
|
||||
self._errors.append(exc)
|
||||
if self._is_base_error(exc) and self._base_error is None:
|
||||
self._base_error = exc
|
||||
|
||||
if self._parent_task.done():
|
||||
# Not sure if this case is possible, but we want to handle
|
||||
# it anyways.
|
||||
self._loop.call_exception_handler({
|
||||
'message': f'Task {task!r} has errored out but its parent '
|
||||
f'task {self._parent_task} is already completed',
|
||||
'exception': exc,
|
||||
'task': task,
|
||||
})
|
||||
return
|
||||
|
||||
if not self._aborting and not self._parent_cancel_requested:
|
||||
# If parent task *is not* being cancelled, it means that we want
|
||||
# to manually cancel it to abort whatever is being run right now
|
||||
# in the TaskGroup. But we want to mark parent task as
|
||||
# "not cancelled" later in __aexit__. Example situation that
|
||||
# we need to handle:
|
||||
#
|
||||
# async def foo():
|
||||
# try:
|
||||
# async with TaskGroup() as g:
|
||||
# g.create_task(crash_soon())
|
||||
# await something # <- this needs to be canceled
|
||||
# # by the TaskGroup, e.g.
|
||||
# # foo() needs to be cancelled
|
||||
# except Exception:
|
||||
# # Ignore any exceptions raised in the TaskGroup
|
||||
# pass
|
||||
# await something_else # this line has to be called
|
||||
# # after TaskGroup is finished.
|
||||
self._abort()
|
||||
self._parent_cancel_requested = True
|
||||
self._parent_task.cancel()
|
||||
1118
Dependencies/Python/Lib/asyncio/tasks.py
vendored
Normal file
1118
Dependencies/Python/Lib/asyncio/tasks.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
25
Dependencies/Python/Lib/asyncio/threads.py
vendored
Normal file
25
Dependencies/Python/Lib/asyncio/threads.py
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
"""High-level support for working with threads in asyncio"""
|
||||
|
||||
import functools
|
||||
import contextvars
|
||||
|
||||
from . import events
|
||||
|
||||
|
||||
__all__ = "to_thread",
|
||||
|
||||
|
||||
async def to_thread(func, /, *args, **kwargs):
|
||||
"""Asynchronously run function *func* in a separate thread.
|
||||
|
||||
Any *args and **kwargs supplied for this function are directly passed
|
||||
to *func*. Also, the current :class:`contextvars.Context` is propagated,
|
||||
allowing context variables from the main thread to be accessed in the
|
||||
separate thread.
|
||||
|
||||
Return a coroutine that can be awaited to get the eventual result of *func*.
|
||||
"""
|
||||
loop = events.get_running_loop()
|
||||
ctx = contextvars.copy_context()
|
||||
func_call = functools.partial(ctx.run, func, *args, **kwargs)
|
||||
return await loop.run_in_executor(None, func_call)
|
||||
184
Dependencies/Python/Lib/asyncio/timeouts.py
vendored
Normal file
184
Dependencies/Python/Lib/asyncio/timeouts.py
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
import enum
|
||||
|
||||
from types import TracebackType
|
||||
from typing import final, Optional, Type
|
||||
|
||||
from . import events
|
||||
from . import exceptions
|
||||
from . import tasks
|
||||
|
||||
|
||||
__all__ = (
|
||||
"Timeout",
|
||||
"timeout",
|
||||
"timeout_at",
|
||||
)
|
||||
|
||||
|
||||
class _State(enum.Enum):
|
||||
CREATED = "created"
|
||||
ENTERED = "active"
|
||||
EXPIRING = "expiring"
|
||||
EXPIRED = "expired"
|
||||
EXITED = "finished"
|
||||
|
||||
|
||||
@final
|
||||
class Timeout:
|
||||
"""Asynchronous context manager for cancelling overdue coroutines.
|
||||
|
||||
Use `timeout()` or `timeout_at()` rather than instantiating this class directly.
|
||||
"""
|
||||
|
||||
def __init__(self, when: Optional[float]) -> None:
|
||||
"""Schedule a timeout that will trigger at a given loop time.
|
||||
|
||||
- If `when` is `None`, the timeout will never trigger.
|
||||
- If `when < loop.time()`, the timeout will trigger on the next
|
||||
iteration of the event loop.
|
||||
"""
|
||||
self._state = _State.CREATED
|
||||
|
||||
self._timeout_handler: Optional[events.TimerHandle] = None
|
||||
self._task: Optional[tasks.Task] = None
|
||||
self._when = when
|
||||
|
||||
def when(self) -> Optional[float]:
|
||||
"""Return the current deadline."""
|
||||
return self._when
|
||||
|
||||
def reschedule(self, when: Optional[float]) -> None:
|
||||
"""Reschedule the timeout."""
|
||||
if self._state is not _State.ENTERED:
|
||||
if self._state is _State.CREATED:
|
||||
raise RuntimeError("Timeout has not been entered")
|
||||
raise RuntimeError(
|
||||
f"Cannot change state of {self._state.value} Timeout",
|
||||
)
|
||||
|
||||
self._when = when
|
||||
|
||||
if self._timeout_handler is not None:
|
||||
self._timeout_handler.cancel()
|
||||
|
||||
if when is None:
|
||||
self._timeout_handler = None
|
||||
else:
|
||||
loop = events.get_running_loop()
|
||||
if when <= loop.time():
|
||||
self._timeout_handler = loop.call_soon(self._on_timeout)
|
||||
else:
|
||||
self._timeout_handler = loop.call_at(when, self._on_timeout)
|
||||
|
||||
def expired(self) -> bool:
|
||||
"""Is timeout expired during execution?"""
|
||||
return self._state in (_State.EXPIRING, _State.EXPIRED)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
info = ['']
|
||||
if self._state is _State.ENTERED:
|
||||
when = round(self._when, 3) if self._when is not None else None
|
||||
info.append(f"when={when}")
|
||||
info_str = ' '.join(info)
|
||||
return f"<Timeout [{self._state.value}]{info_str}>"
|
||||
|
||||
async def __aenter__(self) -> "Timeout":
|
||||
if self._state is not _State.CREATED:
|
||||
raise RuntimeError("Timeout has already been entered")
|
||||
task = tasks.current_task()
|
||||
if task is None:
|
||||
raise RuntimeError("Timeout should be used inside a task")
|
||||
self._state = _State.ENTERED
|
||||
self._task = task
|
||||
self._cancelling = self._task.cancelling()
|
||||
self.reschedule(self._when)
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: Optional[Type[BaseException]],
|
||||
exc_val: Optional[BaseException],
|
||||
exc_tb: Optional[TracebackType],
|
||||
) -> Optional[bool]:
|
||||
assert self._state in (_State.ENTERED, _State.EXPIRING)
|
||||
|
||||
if self._timeout_handler is not None:
|
||||
self._timeout_handler.cancel()
|
||||
self._timeout_handler = None
|
||||
|
||||
if self._state is _State.EXPIRING:
|
||||
self._state = _State.EXPIRED
|
||||
|
||||
if self._task.uncancel() <= self._cancelling and exc_type is not None:
|
||||
# Since there are no new cancel requests, we're
|
||||
# handling this.
|
||||
if issubclass(exc_type, exceptions.CancelledError):
|
||||
raise TimeoutError from exc_val
|
||||
elif exc_val is not None:
|
||||
self._insert_timeout_error(exc_val)
|
||||
if isinstance(exc_val, ExceptionGroup):
|
||||
for exc in exc_val.exceptions:
|
||||
self._insert_timeout_error(exc)
|
||||
elif self._state is _State.ENTERED:
|
||||
self._state = _State.EXITED
|
||||
|
||||
return None
|
||||
|
||||
def _on_timeout(self) -> None:
|
||||
assert self._state is _State.ENTERED
|
||||
self._task.cancel()
|
||||
self._state = _State.EXPIRING
|
||||
# drop the reference early
|
||||
self._timeout_handler = None
|
||||
|
||||
@staticmethod
|
||||
def _insert_timeout_error(exc_val: BaseException) -> None:
|
||||
while exc_val.__context__ is not None:
|
||||
if isinstance(exc_val.__context__, exceptions.CancelledError):
|
||||
te = TimeoutError()
|
||||
te.__context__ = te.__cause__ = exc_val.__context__
|
||||
exc_val.__context__ = te
|
||||
break
|
||||
exc_val = exc_val.__context__
|
||||
|
||||
|
||||
def timeout(delay: Optional[float]) -> Timeout:
|
||||
"""Timeout async context manager.
|
||||
|
||||
Useful in cases when you want to apply timeout logic around block
|
||||
of code or in cases when asyncio.wait_for is not suitable. For example:
|
||||
|
||||
>>> async with asyncio.timeout(10): # 10 seconds timeout
|
||||
... await long_running_task()
|
||||
|
||||
|
||||
delay - value in seconds or None to disable timeout logic
|
||||
|
||||
long_running_task() is interrupted by raising asyncio.CancelledError,
|
||||
the top-most affected timeout() context manager converts CancelledError
|
||||
into TimeoutError.
|
||||
"""
|
||||
loop = events.get_running_loop()
|
||||
return Timeout(loop.time() + delay if delay is not None else None)
|
||||
|
||||
|
||||
def timeout_at(when: Optional[float]) -> Timeout:
|
||||
"""Schedule the timeout at absolute time.
|
||||
|
||||
Like timeout() but argument gives absolute time in the same clock system
|
||||
as loop.time().
|
||||
|
||||
Please note: it is not POSIX time but a time with
|
||||
undefined starting base, e.g. the time of the system power on.
|
||||
|
||||
>>> async with asyncio.timeout_at(loop.time() + 10):
|
||||
... await long_running_task()
|
||||
|
||||
|
||||
when - a deadline when timeout occurs or None to disable timeout logic
|
||||
|
||||
long_running_task() is interrupted by raising asyncio.CancelledError,
|
||||
the top-most affected timeout() context manager converts CancelledError
|
||||
into TimeoutError.
|
||||
"""
|
||||
return Timeout(when)
|
||||
337
Dependencies/Python/Lib/asyncio/transports.py
vendored
Normal file
337
Dependencies/Python/Lib/asyncio/transports.py
vendored
Normal file
@@ -0,0 +1,337 @@
|
||||
"""Abstract Transport class."""
|
||||
|
||||
__all__ = (
|
||||
'BaseTransport', 'ReadTransport', 'WriteTransport',
|
||||
'Transport', 'DatagramTransport', 'SubprocessTransport',
|
||||
)
|
||||
|
||||
|
||||
class BaseTransport:
|
||||
"""Base class for transports."""
|
||||
|
||||
__slots__ = ('_extra',)
|
||||
|
||||
def __init__(self, extra=None):
|
||||
if extra is None:
|
||||
extra = {}
|
||||
self._extra = extra
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
"""Get optional transport information."""
|
||||
return self._extra.get(name, default)
|
||||
|
||||
def is_closing(self):
|
||||
"""Return True if the transport is closing or closed."""
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
"""Close the transport.
|
||||
|
||||
Buffered data will be flushed asynchronously. No more data
|
||||
will be received. After all buffered data is flushed, the
|
||||
protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
"""Set a new protocol."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_protocol(self):
|
||||
"""Return the current protocol."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class ReadTransport(BaseTransport):
|
||||
"""Interface for read-only transports."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def is_reading(self):
|
||||
"""Return True if the transport is receiving."""
|
||||
raise NotImplementedError
|
||||
|
||||
def pause_reading(self):
|
||||
"""Pause the receiving end.
|
||||
|
||||
No data will be passed to the protocol's data_received()
|
||||
method until resume_reading() is called.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def resume_reading(self):
|
||||
"""Resume the receiving end.
|
||||
|
||||
Data received will once again be passed to the protocol's
|
||||
data_received() method.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class WriteTransport(BaseTransport):
|
||||
"""Interface for write-only transports."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
"""Set the high- and low-water limits for write flow control.
|
||||
|
||||
These two values control when to call the protocol's
|
||||
pause_writing() and resume_writing() methods. If specified,
|
||||
the low-water limit must be less than or equal to the
|
||||
high-water limit. Neither value can be negative.
|
||||
|
||||
The defaults are implementation-specific. If only the
|
||||
high-water limit is given, the low-water limit defaults to an
|
||||
implementation-specific value less than or equal to the
|
||||
high-water limit. Setting high to zero forces low to zero as
|
||||
well, and causes pause_writing() to be called whenever the
|
||||
buffer becomes non-empty. Setting low to zero causes
|
||||
resume_writing() to be called only once the buffer is empty.
|
||||
Use of zero for either limit is generally sub-optimal as it
|
||||
reduces opportunities for doing I/O and computation
|
||||
concurrently.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
"""Return the current size of the write buffer."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_write_buffer_limits(self):
|
||||
"""Get the high and low watermarks for write flow control.
|
||||
Return a tuple (low, high) where low and high are
|
||||
positive number of bytes."""
|
||||
raise NotImplementedError
|
||||
|
||||
def write(self, data):
|
||||
"""Write some data bytes to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def writelines(self, list_of_data):
|
||||
"""Write a list (or any iterable) of data bytes to the transport.
|
||||
|
||||
The default implementation concatenates the arguments and
|
||||
calls write() on the result.
|
||||
"""
|
||||
data = b''.join(list_of_data)
|
||||
self.write(data)
|
||||
|
||||
def write_eof(self):
|
||||
"""Close the write end after flushing buffered data.
|
||||
|
||||
(This is like typing ^D into a UNIX program reading from stdin.)
|
||||
|
||||
Data may still be received.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def can_write_eof(self):
|
||||
"""Return True if this transport supports write_eof(), False if not."""
|
||||
raise NotImplementedError
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Transport(ReadTransport, WriteTransport):
|
||||
"""Interface representing a bidirectional transport.
|
||||
|
||||
There may be several implementations, but typically, the user does
|
||||
not implement new transports; rather, the platform provides some
|
||||
useful transports that are implemented using the platform's best
|
||||
practices.
|
||||
|
||||
The user never instantiates a transport directly; they call a
|
||||
utility function, passing it a protocol factory and other
|
||||
information necessary to create the transport and protocol. (E.g.
|
||||
EventLoop.create_connection() or EventLoop.create_server().)
|
||||
|
||||
The utility function will asynchronously create a transport and a
|
||||
protocol and hook them up by calling the protocol's
|
||||
connection_made() method, passing it the transport.
|
||||
|
||||
The implementation here raises NotImplemented for every method
|
||||
except writelines(), which calls write() in a loop.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
class DatagramTransport(BaseTransport):
|
||||
"""Interface for datagram (UDP) transports."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def sendto(self, data, addr=None):
|
||||
"""Send data to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
addr is target socket address.
|
||||
If addr is None use target address pointed on transport creation.
|
||||
If data is an empty bytes object a zero-length datagram will be
|
||||
sent.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SubprocessTransport(BaseTransport):
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def get_pid(self):
|
||||
"""Get subprocess id."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_returncode(self):
|
||||
"""Get subprocess returncode.
|
||||
|
||||
See also
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_pipe_transport(self, fd):
|
||||
"""Get transport for pipe with number fd."""
|
||||
raise NotImplementedError
|
||||
|
||||
def send_signal(self, signal):
|
||||
"""Send signal to subprocess.
|
||||
|
||||
See also:
|
||||
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def terminate(self):
|
||||
"""Stop the subprocess.
|
||||
|
||||
Alias for close() method.
|
||||
|
||||
On Posix OSs the method sends SIGTERM to the subprocess.
|
||||
On Windows the Win32 API function TerminateProcess()
|
||||
is called to stop the subprocess.
|
||||
|
||||
See also:
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def kill(self):
|
||||
"""Kill the subprocess.
|
||||
|
||||
On Posix OSs the function sends SIGKILL to the subprocess.
|
||||
On Windows kill() is an alias for terminate().
|
||||
|
||||
See also:
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _FlowControlMixin(Transport):
|
||||
"""All the logic for (write) flow control in a mix-in base class.
|
||||
|
||||
The subclass must implement get_write_buffer_size(). It must call
|
||||
_maybe_pause_protocol() whenever the write buffer size increases,
|
||||
and _maybe_resume_protocol() whenever it decreases. It may also
|
||||
override set_write_buffer_limits() (e.g. to specify different
|
||||
defaults).
|
||||
|
||||
The subclass constructor must call super().__init__(extra). This
|
||||
will call set_write_buffer_limits().
|
||||
|
||||
The user may call set_write_buffer_limits() and
|
||||
get_write_buffer_size(), and their protocol's pause_writing() and
|
||||
resume_writing() may be called.
|
||||
"""
|
||||
|
||||
__slots__ = ('_loop', '_protocol_paused', '_high_water', '_low_water')
|
||||
|
||||
def __init__(self, extra=None, loop=None):
|
||||
super().__init__(extra)
|
||||
assert loop is not None
|
||||
self._loop = loop
|
||||
self._protocol_paused = False
|
||||
self._set_write_buffer_limits()
|
||||
|
||||
def _maybe_pause_protocol(self):
|
||||
size = self.get_write_buffer_size()
|
||||
if size <= self._high_water:
|
||||
return
|
||||
if not self._protocol_paused:
|
||||
self._protocol_paused = True
|
||||
try:
|
||||
self._protocol.pause_writing()
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'protocol.pause_writing() failed',
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
|
||||
def _maybe_resume_protocol(self):
|
||||
if (self._protocol_paused and
|
||||
self.get_write_buffer_size() <= self._low_water):
|
||||
self._protocol_paused = False
|
||||
try:
|
||||
self._protocol.resume_writing()
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'protocol.resume_writing() failed',
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
|
||||
def get_write_buffer_limits(self):
|
||||
return (self._low_water, self._high_water)
|
||||
|
||||
def _set_write_buffer_limits(self, high=None, low=None):
|
||||
if high is None:
|
||||
if low is None:
|
||||
high = 64 * 1024
|
||||
else:
|
||||
high = 4 * low
|
||||
if low is None:
|
||||
low = high // 4
|
||||
|
||||
if not high >= low >= 0:
|
||||
raise ValueError(
|
||||
f'high ({high!r}) must be >= low ({low!r}) must be >= 0')
|
||||
|
||||
self._high_water = high
|
||||
self._low_water = low
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
self._set_write_buffer_limits(high=high, low=low)
|
||||
self._maybe_pause_protocol()
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
raise NotImplementedError
|
||||
98
Dependencies/Python/Lib/asyncio/trsock.py
vendored
Normal file
98
Dependencies/Python/Lib/asyncio/trsock.py
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
import socket
|
||||
|
||||
|
||||
class TransportSocket:
|
||||
|
||||
"""A socket-like wrapper for exposing real transport sockets.
|
||||
|
||||
These objects can be safely returned by APIs like
|
||||
`transport.get_extra_info('socket')`. All potentially disruptive
|
||||
operations (like "socket.close()") are banned.
|
||||
"""
|
||||
|
||||
__slots__ = ('_sock',)
|
||||
|
||||
def __init__(self, sock: socket.socket):
|
||||
self._sock = sock
|
||||
|
||||
@property
|
||||
def family(self):
|
||||
return self._sock.family
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self._sock.type
|
||||
|
||||
@property
|
||||
def proto(self):
|
||||
return self._sock.proto
|
||||
|
||||
def __repr__(self):
|
||||
s = (
|
||||
f"<asyncio.TransportSocket fd={self.fileno()}, "
|
||||
f"family={self.family!s}, type={self.type!s}, "
|
||||
f"proto={self.proto}"
|
||||
)
|
||||
|
||||
if self.fileno() != -1:
|
||||
try:
|
||||
laddr = self.getsockname()
|
||||
if laddr:
|
||||
s = f"{s}, laddr={laddr}"
|
||||
except socket.error:
|
||||
pass
|
||||
try:
|
||||
raddr = self.getpeername()
|
||||
if raddr:
|
||||
s = f"{s}, raddr={raddr}"
|
||||
except socket.error:
|
||||
pass
|
||||
|
||||
return f"{s}>"
|
||||
|
||||
def __getstate__(self):
|
||||
raise TypeError("Cannot serialize asyncio.TransportSocket object")
|
||||
|
||||
def fileno(self):
|
||||
return self._sock.fileno()
|
||||
|
||||
def dup(self):
|
||||
return self._sock.dup()
|
||||
|
||||
def get_inheritable(self):
|
||||
return self._sock.get_inheritable()
|
||||
|
||||
def shutdown(self, how):
|
||||
# asyncio doesn't currently provide a high-level transport API
|
||||
# to shutdown the connection.
|
||||
self._sock.shutdown(how)
|
||||
|
||||
def getsockopt(self, *args, **kwargs):
|
||||
return self._sock.getsockopt(*args, **kwargs)
|
||||
|
||||
def setsockopt(self, *args, **kwargs):
|
||||
self._sock.setsockopt(*args, **kwargs)
|
||||
|
||||
def getpeername(self):
|
||||
return self._sock.getpeername()
|
||||
|
||||
def getsockname(self):
|
||||
return self._sock.getsockname()
|
||||
|
||||
def getsockbyname(self):
|
||||
return self._sock.getsockbyname()
|
||||
|
||||
def settimeout(self, value):
|
||||
if value == 0:
|
||||
return
|
||||
raise ValueError(
|
||||
'settimeout(): only 0 timeout is allowed on transport sockets')
|
||||
|
||||
def gettimeout(self):
|
||||
return 0
|
||||
|
||||
def setblocking(self, flag):
|
||||
if not flag:
|
||||
return
|
||||
raise ValueError(
|
||||
'setblocking(): transport sockets cannot be blocking')
|
||||
1536
Dependencies/Python/Lib/asyncio/unix_events.py
vendored
Normal file
1536
Dependencies/Python/Lib/asyncio/unix_events.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
903
Dependencies/Python/Lib/asyncio/windows_events.py
vendored
Normal file
903
Dependencies/Python/Lib/asyncio/windows_events.py
vendored
Normal file
@@ -0,0 +1,903 @@
|
||||
"""Selector and proactor event loops for Windows."""
|
||||
|
||||
import sys
|
||||
|
||||
if sys.platform != 'win32': # pragma: no cover
|
||||
raise ImportError('win32 only')
|
||||
|
||||
import _overlapped
|
||||
import _winapi
|
||||
import errno
|
||||
from functools import partial
|
||||
import math
|
||||
import msvcrt
|
||||
import socket
|
||||
import struct
|
||||
import time
|
||||
import weakref
|
||||
|
||||
from . import events
|
||||
from . import base_subprocess
|
||||
from . import futures
|
||||
from . import exceptions
|
||||
from . import proactor_events
|
||||
from . import selector_events
|
||||
from . import tasks
|
||||
from . import windows_utils
|
||||
from .log import logger
|
||||
|
||||
|
||||
__all__ = (
|
||||
'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
|
||||
'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy',
|
||||
'WindowsProactorEventLoopPolicy', 'EventLoop',
|
||||
)
|
||||
|
||||
|
||||
NULL = _winapi.NULL
|
||||
INFINITE = _winapi.INFINITE
|
||||
ERROR_CONNECTION_REFUSED = 1225
|
||||
ERROR_CONNECTION_ABORTED = 1236
|
||||
|
||||
# Initial delay in seconds for connect_pipe() before retrying to connect
|
||||
CONNECT_PIPE_INIT_DELAY = 0.001
|
||||
|
||||
# Maximum delay in seconds for connect_pipe() before retrying to connect
|
||||
CONNECT_PIPE_MAX_DELAY = 0.100
|
||||
|
||||
|
||||
class _OverlappedFuture(futures.Future):
|
||||
"""Subclass of Future which represents an overlapped operation.
|
||||
|
||||
Cancelling it will immediately cancel the overlapped operation.
|
||||
"""
|
||||
|
||||
def __init__(self, ov, *, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._ov = ov
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
if self._ov is not None:
|
||||
state = 'pending' if self._ov.pending else 'completed'
|
||||
info.insert(1, f'overlapped=<{state}, {self._ov.address:#x}>')
|
||||
return info
|
||||
|
||||
def _cancel_overlapped(self):
|
||||
if self._ov is None:
|
||||
return
|
||||
try:
|
||||
self._ov.cancel()
|
||||
except OSError as exc:
|
||||
context = {
|
||||
'message': 'Cancelling an overlapped future failed',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
self._ov = None
|
||||
|
||||
def cancel(self, msg=None):
|
||||
self._cancel_overlapped()
|
||||
return super().cancel(msg=msg)
|
||||
|
||||
def set_exception(self, exception):
|
||||
super().set_exception(exception)
|
||||
self._cancel_overlapped()
|
||||
|
||||
def set_result(self, result):
|
||||
super().set_result(result)
|
||||
self._ov = None
|
||||
|
||||
|
||||
class _BaseWaitHandleFuture(futures.Future):
|
||||
"""Subclass of Future which represents a wait handle."""
|
||||
|
||||
def __init__(self, ov, handle, wait_handle, *, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
# Keep a reference to the Overlapped object to keep it alive until the
|
||||
# wait is unregistered
|
||||
self._ov = ov
|
||||
self._handle = handle
|
||||
self._wait_handle = wait_handle
|
||||
|
||||
# Should we call UnregisterWaitEx() if the wait completes
|
||||
# or is cancelled?
|
||||
self._registered = True
|
||||
|
||||
def _poll(self):
|
||||
# non-blocking wait: use a timeout of 0 millisecond
|
||||
return (_winapi.WaitForSingleObject(self._handle, 0) ==
|
||||
_winapi.WAIT_OBJECT_0)
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
info.append(f'handle={self._handle:#x}')
|
||||
if self._handle is not None:
|
||||
state = 'signaled' if self._poll() else 'waiting'
|
||||
info.append(state)
|
||||
if self._wait_handle is not None:
|
||||
info.append(f'wait_handle={self._wait_handle:#x}')
|
||||
return info
|
||||
|
||||
def _unregister_wait_cb(self, fut):
|
||||
# The wait was unregistered: it's not safe to destroy the Overlapped
|
||||
# object
|
||||
self._ov = None
|
||||
|
||||
def _unregister_wait(self):
|
||||
if not self._registered:
|
||||
return
|
||||
self._registered = False
|
||||
|
||||
wait_handle = self._wait_handle
|
||||
self._wait_handle = None
|
||||
try:
|
||||
_overlapped.UnregisterWait(wait_handle)
|
||||
except OSError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_IO_PENDING:
|
||||
context = {
|
||||
'message': 'Failed to unregister the wait handle',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
return
|
||||
# ERROR_IO_PENDING means that the unregister is pending
|
||||
|
||||
self._unregister_wait_cb(None)
|
||||
|
||||
def cancel(self, msg=None):
|
||||
self._unregister_wait()
|
||||
return super().cancel(msg=msg)
|
||||
|
||||
def set_exception(self, exception):
|
||||
self._unregister_wait()
|
||||
super().set_exception(exception)
|
||||
|
||||
def set_result(self, result):
|
||||
self._unregister_wait()
|
||||
super().set_result(result)
|
||||
|
||||
|
||||
class _WaitCancelFuture(_BaseWaitHandleFuture):
|
||||
"""Subclass of Future which represents a wait for the cancellation of a
|
||||
_WaitHandleFuture using an event.
|
||||
"""
|
||||
|
||||
def __init__(self, ov, event, wait_handle, *, loop=None):
|
||||
super().__init__(ov, event, wait_handle, loop=loop)
|
||||
|
||||
self._done_callback = None
|
||||
|
||||
def cancel(self):
|
||||
raise RuntimeError("_WaitCancelFuture must not be cancelled")
|
||||
|
||||
def set_result(self, result):
|
||||
super().set_result(result)
|
||||
if self._done_callback is not None:
|
||||
self._done_callback(self)
|
||||
|
||||
def set_exception(self, exception):
|
||||
super().set_exception(exception)
|
||||
if self._done_callback is not None:
|
||||
self._done_callback(self)
|
||||
|
||||
|
||||
class _WaitHandleFuture(_BaseWaitHandleFuture):
|
||||
def __init__(self, ov, handle, wait_handle, proactor, *, loop=None):
|
||||
super().__init__(ov, handle, wait_handle, loop=loop)
|
||||
self._proactor = proactor
|
||||
self._unregister_proactor = True
|
||||
self._event = _overlapped.CreateEvent(None, True, False, None)
|
||||
self._event_fut = None
|
||||
|
||||
def _unregister_wait_cb(self, fut):
|
||||
if self._event is not None:
|
||||
_winapi.CloseHandle(self._event)
|
||||
self._event = None
|
||||
self._event_fut = None
|
||||
|
||||
# If the wait was cancelled, the wait may never be signalled, so
|
||||
# it's required to unregister it. Otherwise, IocpProactor.close() will
|
||||
# wait forever for an event which will never come.
|
||||
#
|
||||
# If the IocpProactor already received the event, it's safe to call
|
||||
# _unregister() because we kept a reference to the Overlapped object
|
||||
# which is used as a unique key.
|
||||
self._proactor._unregister(self._ov)
|
||||
self._proactor = None
|
||||
|
||||
super()._unregister_wait_cb(fut)
|
||||
|
||||
def _unregister_wait(self):
|
||||
if not self._registered:
|
||||
return
|
||||
self._registered = False
|
||||
|
||||
wait_handle = self._wait_handle
|
||||
self._wait_handle = None
|
||||
try:
|
||||
_overlapped.UnregisterWaitEx(wait_handle, self._event)
|
||||
except OSError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_IO_PENDING:
|
||||
context = {
|
||||
'message': 'Failed to unregister the wait handle',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
return
|
||||
# ERROR_IO_PENDING is not an error, the wait was unregistered
|
||||
|
||||
self._event_fut = self._proactor._wait_cancel(self._event,
|
||||
self._unregister_wait_cb)
|
||||
|
||||
|
||||
class PipeServer(object):
|
||||
"""Class representing a pipe server.
|
||||
|
||||
This is much like a bound, listening socket.
|
||||
"""
|
||||
def __init__(self, address):
|
||||
self._address = address
|
||||
self._free_instances = weakref.WeakSet()
|
||||
# initialize the pipe attribute before calling _server_pipe_handle()
|
||||
# because this function can raise an exception and the destructor calls
|
||||
# the close() method
|
||||
self._pipe = None
|
||||
self._accept_pipe_future = None
|
||||
self._pipe = self._server_pipe_handle(True)
|
||||
|
||||
def _get_unconnected_pipe(self):
|
||||
# Create new instance and return previous one. This ensures
|
||||
# that (until the server is closed) there is always at least
|
||||
# one pipe handle for address. Therefore if a client attempt
|
||||
# to connect it will not fail with FileNotFoundError.
|
||||
tmp, self._pipe = self._pipe, self._server_pipe_handle(False)
|
||||
return tmp
|
||||
|
||||
def _server_pipe_handle(self, first):
|
||||
# Return a wrapper for a new pipe handle.
|
||||
if self.closed():
|
||||
return None
|
||||
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
|
||||
if first:
|
||||
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
h = _winapi.CreateNamedPipe(
|
||||
self._address, flags,
|
||||
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
|
||||
_winapi.PIPE_WAIT,
|
||||
_winapi.PIPE_UNLIMITED_INSTANCES,
|
||||
windows_utils.BUFSIZE, windows_utils.BUFSIZE,
|
||||
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
|
||||
pipe = windows_utils.PipeHandle(h)
|
||||
self._free_instances.add(pipe)
|
||||
return pipe
|
||||
|
||||
def closed(self):
|
||||
return (self._address is None)
|
||||
|
||||
def close(self):
|
||||
if self._accept_pipe_future is not None:
|
||||
self._accept_pipe_future.cancel()
|
||||
self._accept_pipe_future = None
|
||||
# Close all instances which have not been connected to by a client.
|
||||
if self._address is not None:
|
||||
for pipe in self._free_instances:
|
||||
pipe.close()
|
||||
self._pipe = None
|
||||
self._address = None
|
||||
self._free_instances.clear()
|
||||
|
||||
__del__ = close
|
||||
|
||||
|
||||
class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop):
|
||||
"""Windows version of selector event loop."""
|
||||
|
||||
|
||||
class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
|
||||
"""Windows version of proactor event loop using IOCP."""
|
||||
|
||||
def __init__(self, proactor=None):
|
||||
if proactor is None:
|
||||
proactor = IocpProactor()
|
||||
super().__init__(proactor)
|
||||
|
||||
def _run_forever_setup(self):
|
||||
assert self._self_reading_future is None
|
||||
self.call_soon(self._loop_self_reading)
|
||||
super()._run_forever_setup()
|
||||
|
||||
def _run_forever_cleanup(self):
|
||||
super()._run_forever_cleanup()
|
||||
if self._self_reading_future is not None:
|
||||
ov = self._self_reading_future._ov
|
||||
self._self_reading_future.cancel()
|
||||
# self_reading_future always uses IOCP, so even though it's
|
||||
# been cancelled, we need to make sure that the IOCP message
|
||||
# is received so that the kernel is not holding on to the
|
||||
# memory, possibly causing memory corruption later. Only
|
||||
# unregister it if IO is complete in all respects. Otherwise
|
||||
# we need another _poll() later to complete the IO.
|
||||
if ov is not None and not ov.pending:
|
||||
self._proactor._unregister(ov)
|
||||
self._self_reading_future = None
|
||||
|
||||
async def create_pipe_connection(self, protocol_factory, address):
|
||||
f = self._proactor.connect_pipe(address)
|
||||
pipe = await f
|
||||
protocol = protocol_factory()
|
||||
trans = self._make_duplex_pipe_transport(pipe, protocol,
|
||||
extra={'addr': address})
|
||||
return trans, protocol
|
||||
|
||||
async def start_serving_pipe(self, protocol_factory, address):
|
||||
server = PipeServer(address)
|
||||
|
||||
def loop_accept_pipe(f=None):
|
||||
pipe = None
|
||||
try:
|
||||
if f:
|
||||
pipe = f.result()
|
||||
server._free_instances.discard(pipe)
|
||||
|
||||
if server.closed():
|
||||
# A client connected before the server was closed:
|
||||
# drop the client (close the pipe) and exit
|
||||
pipe.close()
|
||||
return
|
||||
|
||||
protocol = protocol_factory()
|
||||
self._make_duplex_pipe_transport(
|
||||
pipe, protocol, extra={'addr': address})
|
||||
|
||||
pipe = server._get_unconnected_pipe()
|
||||
if pipe is None:
|
||||
return
|
||||
|
||||
f = self._proactor.accept_pipe(pipe)
|
||||
except BrokenPipeError:
|
||||
if pipe and pipe.fileno() != -1:
|
||||
pipe.close()
|
||||
self.call_soon(loop_accept_pipe)
|
||||
except OSError as exc:
|
||||
if pipe and pipe.fileno() != -1:
|
||||
self.call_exception_handler({
|
||||
'message': 'Pipe accept failed',
|
||||
'exception': exc,
|
||||
'pipe': pipe,
|
||||
})
|
||||
pipe.close()
|
||||
elif self._debug:
|
||||
logger.warning("Accept pipe failed on pipe %r",
|
||||
pipe, exc_info=True)
|
||||
self.call_soon(loop_accept_pipe)
|
||||
except exceptions.CancelledError:
|
||||
if pipe:
|
||||
pipe.close()
|
||||
else:
|
||||
server._accept_pipe_future = f
|
||||
f.add_done_callback(loop_accept_pipe)
|
||||
|
||||
self.call_soon(loop_accept_pipe)
|
||||
return [server]
|
||||
|
||||
async def _make_subprocess_transport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=None, **kwargs):
|
||||
waiter = self.create_future()
|
||||
transp = _WindowsSubprocessTransport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
waiter=waiter, extra=extra,
|
||||
**kwargs)
|
||||
try:
|
||||
await waiter
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException:
|
||||
transp.close()
|
||||
await transp._wait()
|
||||
raise
|
||||
|
||||
return transp
|
||||
|
||||
|
||||
class IocpProactor:
|
||||
"""Proactor implementation using IOCP."""
|
||||
|
||||
def __init__(self, concurrency=INFINITE):
|
||||
self._loop = None
|
||||
self._results = []
|
||||
self._iocp = _overlapped.CreateIoCompletionPort(
|
||||
_overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)
|
||||
self._cache = {}
|
||||
self._registered = weakref.WeakSet()
|
||||
self._unregistered = []
|
||||
self._stopped_serving = weakref.WeakSet()
|
||||
|
||||
def _check_closed(self):
|
||||
if self._iocp is None:
|
||||
raise RuntimeError('IocpProactor is closed')
|
||||
|
||||
def __repr__(self):
|
||||
info = ['overlapped#=%s' % len(self._cache),
|
||||
'result#=%s' % len(self._results)]
|
||||
if self._iocp is None:
|
||||
info.append('closed')
|
||||
return '<%s %s>' % (self.__class__.__name__, " ".join(info))
|
||||
|
||||
def set_loop(self, loop):
|
||||
self._loop = loop
|
||||
|
||||
def select(self, timeout=None):
|
||||
if not self._results:
|
||||
self._poll(timeout)
|
||||
tmp = self._results
|
||||
self._results = []
|
||||
try:
|
||||
return tmp
|
||||
finally:
|
||||
# Needed to break cycles when an exception occurs.
|
||||
tmp = None
|
||||
|
||||
def _result(self, value):
|
||||
fut = self._loop.create_future()
|
||||
fut.set_result(value)
|
||||
return fut
|
||||
|
||||
@staticmethod
|
||||
def finish_socket_func(trans, key, ov):
|
||||
try:
|
||||
return ov.getresult()
|
||||
except OSError as exc:
|
||||
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
||||
_overlapped.ERROR_OPERATION_ABORTED):
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
@classmethod
|
||||
def _finish_recvfrom(cls, trans, key, ov, *, empty_result):
|
||||
try:
|
||||
return cls.finish_socket_func(trans, key, ov)
|
||||
except OSError as exc:
|
||||
# WSARecvFrom will report ERROR_PORT_UNREACHABLE when the same
|
||||
# socket is used to send to an address that is not listening.
|
||||
if exc.winerror == _overlapped.ERROR_PORT_UNREACHABLE:
|
||||
return empty_result, None
|
||||
else:
|
||||
raise
|
||||
|
||||
def recv(self, conn, nbytes, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
try:
|
||||
if isinstance(conn, socket.socket):
|
||||
ov.WSARecv(conn.fileno(), nbytes, flags)
|
||||
else:
|
||||
ov.ReadFile(conn.fileno(), nbytes)
|
||||
except BrokenPipeError:
|
||||
return self._result(b'')
|
||||
|
||||
return self._register(ov, conn, self.finish_socket_func)
|
||||
|
||||
def recv_into(self, conn, buf, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
try:
|
||||
if isinstance(conn, socket.socket):
|
||||
ov.WSARecvInto(conn.fileno(), buf, flags)
|
||||
else:
|
||||
ov.ReadFileInto(conn.fileno(), buf)
|
||||
except BrokenPipeError:
|
||||
return self._result(0)
|
||||
|
||||
return self._register(ov, conn, self.finish_socket_func)
|
||||
|
||||
def recvfrom(self, conn, nbytes, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
try:
|
||||
ov.WSARecvFrom(conn.fileno(), nbytes, flags)
|
||||
except BrokenPipeError:
|
||||
return self._result((b'', None))
|
||||
|
||||
return self._register(ov, conn, partial(self._finish_recvfrom,
|
||||
empty_result=b''))
|
||||
|
||||
def recvfrom_into(self, conn, buf, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
try:
|
||||
ov.WSARecvFromInto(conn.fileno(), buf, flags)
|
||||
except BrokenPipeError:
|
||||
return self._result((0, None))
|
||||
|
||||
return self._register(ov, conn, partial(self._finish_recvfrom,
|
||||
empty_result=0))
|
||||
|
||||
def sendto(self, conn, buf, flags=0, addr=None):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
|
||||
ov.WSASendTo(conn.fileno(), buf, flags, addr)
|
||||
|
||||
return self._register(ov, conn, self.finish_socket_func)
|
||||
|
||||
def send(self, conn, buf, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
if isinstance(conn, socket.socket):
|
||||
ov.WSASend(conn.fileno(), buf, flags)
|
||||
else:
|
||||
ov.WriteFile(conn.fileno(), buf)
|
||||
|
||||
return self._register(ov, conn, self.finish_socket_func)
|
||||
|
||||
def accept(self, listener):
|
||||
self._register_with_iocp(listener)
|
||||
conn = self._get_accept_socket(listener.family)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.AcceptEx(listener.fileno(), conn.fileno())
|
||||
|
||||
def finish_accept(trans, key, ov):
|
||||
ov.getresult()
|
||||
# Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
|
||||
buf = struct.pack('@P', listener.fileno())
|
||||
conn.setsockopt(socket.SOL_SOCKET,
|
||||
_overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf)
|
||||
conn.settimeout(listener.gettimeout())
|
||||
return conn, conn.getpeername()
|
||||
|
||||
async def accept_coro(future, conn):
|
||||
# Coroutine closing the accept socket if the future is cancelled
|
||||
try:
|
||||
await future
|
||||
except exceptions.CancelledError:
|
||||
conn.close()
|
||||
raise
|
||||
|
||||
future = self._register(ov, listener, finish_accept)
|
||||
coro = accept_coro(future, conn)
|
||||
tasks.ensure_future(coro, loop=self._loop)
|
||||
return future
|
||||
|
||||
def connect(self, conn, address):
|
||||
if conn.type == socket.SOCK_DGRAM:
|
||||
# WSAConnect will complete immediately for UDP sockets so we don't
|
||||
# need to register any IOCP operation
|
||||
_overlapped.WSAConnect(conn.fileno(), address)
|
||||
fut = self._loop.create_future()
|
||||
fut.set_result(None)
|
||||
return fut
|
||||
|
||||
self._register_with_iocp(conn)
|
||||
# The socket needs to be locally bound before we call ConnectEx().
|
||||
try:
|
||||
_overlapped.BindLocal(conn.fileno(), conn.family)
|
||||
except OSError as e:
|
||||
if e.winerror != errno.WSAEINVAL:
|
||||
raise
|
||||
# Probably already locally bound; check using getsockname().
|
||||
if conn.getsockname()[1] == 0:
|
||||
raise
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.ConnectEx(conn.fileno(), address)
|
||||
|
||||
def finish_connect(trans, key, ov):
|
||||
ov.getresult()
|
||||
# Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
|
||||
conn.setsockopt(socket.SOL_SOCKET,
|
||||
_overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
|
||||
return conn
|
||||
|
||||
return self._register(ov, conn, finish_connect)
|
||||
|
||||
def sendfile(self, sock, file, offset, count):
|
||||
self._register_with_iocp(sock)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
offset_low = offset & 0xffff_ffff
|
||||
offset_high = (offset >> 32) & 0xffff_ffff
|
||||
ov.TransmitFile(sock.fileno(),
|
||||
msvcrt.get_osfhandle(file.fileno()),
|
||||
offset_low, offset_high,
|
||||
count, 0, 0)
|
||||
|
||||
return self._register(ov, sock, self.finish_socket_func)
|
||||
|
||||
def accept_pipe(self, pipe):
|
||||
self._register_with_iocp(pipe)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
connected = ov.ConnectNamedPipe(pipe.fileno())
|
||||
|
||||
if connected:
|
||||
# ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means
|
||||
# that the pipe is connected. There is no need to wait for the
|
||||
# completion of the connection.
|
||||
return self._result(pipe)
|
||||
|
||||
def finish_accept_pipe(trans, key, ov):
|
||||
ov.getresult()
|
||||
return pipe
|
||||
|
||||
return self._register(ov, pipe, finish_accept_pipe)
|
||||
|
||||
async def connect_pipe(self, address):
|
||||
delay = CONNECT_PIPE_INIT_DELAY
|
||||
while True:
|
||||
# Unfortunately there is no way to do an overlapped connect to
|
||||
# a pipe. Call CreateFile() in a loop until it doesn't fail with
|
||||
# ERROR_PIPE_BUSY.
|
||||
try:
|
||||
handle = _overlapped.ConnectPipe(address)
|
||||
break
|
||||
except OSError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_PIPE_BUSY:
|
||||
raise
|
||||
|
||||
# ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
|
||||
delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
|
||||
await tasks.sleep(delay)
|
||||
|
||||
return windows_utils.PipeHandle(handle)
|
||||
|
||||
def wait_for_handle(self, handle, timeout=None):
|
||||
"""Wait for a handle.
|
||||
|
||||
Return a Future object. The result of the future is True if the wait
|
||||
completed, or False if the wait did not complete (on timeout).
|
||||
"""
|
||||
return self._wait_for_handle(handle, timeout, False)
|
||||
|
||||
def _wait_cancel(self, event, done_callback):
|
||||
fut = self._wait_for_handle(event, None, True)
|
||||
# add_done_callback() cannot be used because the wait may only complete
|
||||
# in IocpProactor.close(), while the event loop is not running.
|
||||
fut._done_callback = done_callback
|
||||
return fut
|
||||
|
||||
def _wait_for_handle(self, handle, timeout, _is_cancel):
|
||||
self._check_closed()
|
||||
|
||||
if timeout is None:
|
||||
ms = _winapi.INFINITE
|
||||
else:
|
||||
# RegisterWaitForSingleObject() has a resolution of 1 millisecond,
|
||||
# round away from zero to wait *at least* timeout seconds.
|
||||
ms = math.ceil(timeout * 1e3)
|
||||
|
||||
# We only create ov so we can use ov.address as a key for the cache.
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
wait_handle = _overlapped.RegisterWaitWithQueue(
|
||||
handle, self._iocp, ov.address, ms)
|
||||
if _is_cancel:
|
||||
f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop)
|
||||
else:
|
||||
f = _WaitHandleFuture(ov, handle, wait_handle, self,
|
||||
loop=self._loop)
|
||||
if f._source_traceback:
|
||||
del f._source_traceback[-1]
|
||||
|
||||
def finish_wait_for_handle(trans, key, ov):
|
||||
# Note that this second wait means that we should only use
|
||||
# this with handles types where a successful wait has no
|
||||
# effect. So events or processes are all right, but locks
|
||||
# or semaphores are not. Also note if the handle is
|
||||
# signalled and then quickly reset, then we may return
|
||||
# False even though we have not timed out.
|
||||
return f._poll()
|
||||
|
||||
self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)
|
||||
return f
|
||||
|
||||
def _register_with_iocp(self, obj):
|
||||
# To get notifications of finished ops on this objects sent to the
|
||||
# completion port, were must register the handle.
|
||||
if obj not in self._registered:
|
||||
self._registered.add(obj)
|
||||
_overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0)
|
||||
# XXX We could also use SetFileCompletionNotificationModes()
|
||||
# to avoid sending notifications to completion port of ops
|
||||
# that succeed immediately.
|
||||
|
||||
def _register(self, ov, obj, callback):
|
||||
self._check_closed()
|
||||
|
||||
# Return a future which will be set with the result of the
|
||||
# operation when it completes. The future's value is actually
|
||||
# the value returned by callback().
|
||||
f = _OverlappedFuture(ov, loop=self._loop)
|
||||
if f._source_traceback:
|
||||
del f._source_traceback[-1]
|
||||
if not ov.pending:
|
||||
# The operation has completed, so no need to postpone the
|
||||
# work. We cannot take this short cut if we need the
|
||||
# NumberOfBytes, CompletionKey values returned by
|
||||
# PostQueuedCompletionStatus().
|
||||
try:
|
||||
value = callback(None, None, ov)
|
||||
except OSError as e:
|
||||
f.set_exception(e)
|
||||
else:
|
||||
f.set_result(value)
|
||||
# Even if GetOverlappedResult() was called, we have to wait for the
|
||||
# notification of the completion in GetQueuedCompletionStatus().
|
||||
# Register the overlapped operation to keep a reference to the
|
||||
# OVERLAPPED object, otherwise the memory is freed and Windows may
|
||||
# read uninitialized memory.
|
||||
|
||||
# Register the overlapped operation for later. Note that
|
||||
# we only store obj to prevent it from being garbage
|
||||
# collected too early.
|
||||
self._cache[ov.address] = (f, ov, obj, callback)
|
||||
return f
|
||||
|
||||
def _unregister(self, ov):
|
||||
"""Unregister an overlapped object.
|
||||
|
||||
Call this method when its future has been cancelled. The event can
|
||||
already be signalled (pending in the proactor event queue). It is also
|
||||
safe if the event is never signalled (because it was cancelled).
|
||||
"""
|
||||
self._check_closed()
|
||||
self._unregistered.append(ov)
|
||||
|
||||
def _get_accept_socket(self, family):
|
||||
s = socket.socket(family)
|
||||
s.settimeout(0)
|
||||
return s
|
||||
|
||||
def _poll(self, timeout=None):
|
||||
if timeout is None:
|
||||
ms = INFINITE
|
||||
elif timeout < 0:
|
||||
raise ValueError("negative timeout")
|
||||
else:
|
||||
# GetQueuedCompletionStatus() has a resolution of 1 millisecond,
|
||||
# round away from zero to wait *at least* timeout seconds.
|
||||
ms = math.ceil(timeout * 1e3)
|
||||
if ms >= INFINITE:
|
||||
raise ValueError("timeout too big")
|
||||
|
||||
while True:
|
||||
status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
|
||||
if status is None:
|
||||
break
|
||||
ms = 0
|
||||
|
||||
err, transferred, key, address = status
|
||||
try:
|
||||
f, ov, obj, callback = self._cache.pop(address)
|
||||
except KeyError:
|
||||
if self._loop.get_debug():
|
||||
self._loop.call_exception_handler({
|
||||
'message': ('GetQueuedCompletionStatus() returned an '
|
||||
'unexpected event'),
|
||||
'status': ('err=%s transferred=%s key=%#x address=%#x'
|
||||
% (err, transferred, key, address)),
|
||||
})
|
||||
|
||||
# key is either zero, or it is used to return a pipe
|
||||
# handle which should be closed to avoid a leak.
|
||||
if key not in (0, _overlapped.INVALID_HANDLE_VALUE):
|
||||
_winapi.CloseHandle(key)
|
||||
continue
|
||||
|
||||
if obj in self._stopped_serving:
|
||||
f.cancel()
|
||||
# Don't call the callback if _register() already read the result or
|
||||
# if the overlapped has been cancelled
|
||||
elif not f.done():
|
||||
try:
|
||||
value = callback(transferred, key, ov)
|
||||
except OSError as e:
|
||||
f.set_exception(e)
|
||||
self._results.append(f)
|
||||
else:
|
||||
f.set_result(value)
|
||||
self._results.append(f)
|
||||
finally:
|
||||
f = None
|
||||
|
||||
# Remove unregistered futures
|
||||
for ov in self._unregistered:
|
||||
self._cache.pop(ov.address, None)
|
||||
self._unregistered.clear()
|
||||
|
||||
def _stop_serving(self, obj):
|
||||
# obj is a socket or pipe handle. It will be closed in
|
||||
# BaseProactorEventLoop._stop_serving() which will make any
|
||||
# pending operations fail quickly.
|
||||
self._stopped_serving.add(obj)
|
||||
|
||||
def close(self):
|
||||
if self._iocp is None:
|
||||
# already closed
|
||||
return
|
||||
|
||||
# Cancel remaining registered operations.
|
||||
for fut, ov, obj, callback in list(self._cache.values()):
|
||||
if fut.cancelled():
|
||||
# Nothing to do with cancelled futures
|
||||
pass
|
||||
elif isinstance(fut, _WaitCancelFuture):
|
||||
# _WaitCancelFuture must not be cancelled
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
fut.cancel()
|
||||
except OSError as exc:
|
||||
if self._loop is not None:
|
||||
context = {
|
||||
'message': 'Cancelling a future failed',
|
||||
'exception': exc,
|
||||
'future': fut,
|
||||
}
|
||||
if fut._source_traceback:
|
||||
context['source_traceback'] = fut._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
|
||||
# Wait until all cancelled overlapped complete: don't exit with running
|
||||
# overlapped to prevent a crash. Display progress every second if the
|
||||
# loop is still running.
|
||||
msg_update = 1.0
|
||||
start_time = time.monotonic()
|
||||
next_msg = start_time + msg_update
|
||||
while self._cache:
|
||||
if next_msg <= time.monotonic():
|
||||
logger.debug('%r is running after closing for %.1f seconds',
|
||||
self, time.monotonic() - start_time)
|
||||
next_msg = time.monotonic() + msg_update
|
||||
|
||||
# handle a few events, or timeout
|
||||
self._poll(msg_update)
|
||||
|
||||
self._results = []
|
||||
|
||||
_winapi.CloseHandle(self._iocp)
|
||||
self._iocp = None
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
|
||||
class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport):
|
||||
|
||||
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
||||
self._proc = windows_utils.Popen(
|
||||
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
|
||||
bufsize=bufsize, **kwargs)
|
||||
|
||||
def callback(f):
|
||||
returncode = self._proc.poll()
|
||||
self._process_exited(returncode)
|
||||
|
||||
f = self._loop._proactor.wait_for_handle(int(self._proc._handle))
|
||||
f.add_done_callback(callback)
|
||||
|
||||
|
||||
SelectorEventLoop = _WindowsSelectorEventLoop
|
||||
|
||||
|
||||
class WindowsSelectorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
||||
_loop_factory = SelectorEventLoop
|
||||
|
||||
|
||||
class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
||||
_loop_factory = ProactorEventLoop
|
||||
|
||||
|
||||
DefaultEventLoopPolicy = WindowsProactorEventLoopPolicy
|
||||
EventLoop = ProactorEventLoop
|
||||
173
Dependencies/Python/Lib/asyncio/windows_utils.py
vendored
Normal file
173
Dependencies/Python/Lib/asyncio/windows_utils.py
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
"""Various Windows specific bits and pieces."""
|
||||
|
||||
import sys
|
||||
|
||||
if sys.platform != 'win32': # pragma: no cover
|
||||
raise ImportError('win32 only')
|
||||
|
||||
import _winapi
|
||||
import itertools
|
||||
import msvcrt
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
import warnings
|
||||
|
||||
|
||||
__all__ = 'pipe', 'Popen', 'PIPE', 'PipeHandle'
|
||||
|
||||
|
||||
# Constants/globals
|
||||
|
||||
|
||||
BUFSIZE = 8192
|
||||
PIPE = subprocess.PIPE
|
||||
STDOUT = subprocess.STDOUT
|
||||
_mmap_counter = itertools.count()
|
||||
|
||||
|
||||
# Replacement for os.pipe() using handles instead of fds
|
||||
|
||||
|
||||
def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
|
||||
"""Like os.pipe() but with overlapped support and using handles not fds."""
|
||||
address = tempfile.mktemp(
|
||||
prefix=r'\\.\pipe\python-pipe-{:d}-{:d}-'.format(
|
||||
os.getpid(), next(_mmap_counter)))
|
||||
|
||||
if duplex:
|
||||
openmode = _winapi.PIPE_ACCESS_DUPLEX
|
||||
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
|
||||
obsize, ibsize = bufsize, bufsize
|
||||
else:
|
||||
openmode = _winapi.PIPE_ACCESS_INBOUND
|
||||
access = _winapi.GENERIC_WRITE
|
||||
obsize, ibsize = 0, bufsize
|
||||
|
||||
openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
|
||||
if overlapped[0]:
|
||||
openmode |= _winapi.FILE_FLAG_OVERLAPPED
|
||||
|
||||
if overlapped[1]:
|
||||
flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED
|
||||
else:
|
||||
flags_and_attribs = 0
|
||||
|
||||
h1 = h2 = None
|
||||
try:
|
||||
h1 = _winapi.CreateNamedPipe(
|
||||
address, openmode, _winapi.PIPE_WAIT,
|
||||
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
|
||||
|
||||
h2 = _winapi.CreateFile(
|
||||
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
|
||||
flags_and_attribs, _winapi.NULL)
|
||||
|
||||
ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
|
||||
ov.GetOverlappedResult(True)
|
||||
return h1, h2
|
||||
except:
|
||||
if h1 is not None:
|
||||
_winapi.CloseHandle(h1)
|
||||
if h2 is not None:
|
||||
_winapi.CloseHandle(h2)
|
||||
raise
|
||||
|
||||
|
||||
# Wrapper for a pipe handle
|
||||
|
||||
|
||||
class PipeHandle:
|
||||
"""Wrapper for an overlapped pipe handle which is vaguely file-object like.
|
||||
|
||||
The IOCP event loop can use these instead of socket objects.
|
||||
"""
|
||||
def __init__(self, handle):
|
||||
self._handle = handle
|
||||
|
||||
def __repr__(self):
|
||||
if self._handle is not None:
|
||||
handle = f'handle={self._handle!r}'
|
||||
else:
|
||||
handle = 'closed'
|
||||
return f'<{self.__class__.__name__} {handle}>'
|
||||
|
||||
@property
|
||||
def handle(self):
|
||||
return self._handle
|
||||
|
||||
def fileno(self):
|
||||
if self._handle is None:
|
||||
raise ValueError("I/O operation on closed pipe")
|
||||
return self._handle
|
||||
|
||||
def close(self, *, CloseHandle=_winapi.CloseHandle):
|
||||
if self._handle is not None:
|
||||
CloseHandle(self._handle)
|
||||
self._handle = None
|
||||
|
||||
def __del__(self, _warn=warnings.warn):
|
||||
if self._handle is not None:
|
||||
_warn(f"unclosed {self!r}", ResourceWarning, source=self)
|
||||
self.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
self.close()
|
||||
|
||||
|
||||
# Replacement for subprocess.Popen using overlapped pipe handles
|
||||
|
||||
|
||||
class Popen(subprocess.Popen):
|
||||
"""Replacement for subprocess.Popen using overlapped pipe handles.
|
||||
|
||||
The stdin, stdout, stderr are None or instances of PipeHandle.
|
||||
"""
|
||||
def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds):
|
||||
assert not kwds.get('universal_newlines')
|
||||
assert kwds.get('bufsize', 0) == 0
|
||||
stdin_rfd = stdout_wfd = stderr_wfd = None
|
||||
stdin_wh = stdout_rh = stderr_rh = None
|
||||
if stdin == PIPE:
|
||||
stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True)
|
||||
stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY)
|
||||
else:
|
||||
stdin_rfd = stdin
|
||||
if stdout == PIPE:
|
||||
stdout_rh, stdout_wh = pipe(overlapped=(True, False))
|
||||
stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0)
|
||||
else:
|
||||
stdout_wfd = stdout
|
||||
if stderr == PIPE:
|
||||
stderr_rh, stderr_wh = pipe(overlapped=(True, False))
|
||||
stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0)
|
||||
elif stderr == STDOUT:
|
||||
stderr_wfd = stdout_wfd
|
||||
else:
|
||||
stderr_wfd = stderr
|
||||
try:
|
||||
super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
|
||||
stderr=stderr_wfd, **kwds)
|
||||
except:
|
||||
for h in (stdin_wh, stdout_rh, stderr_rh):
|
||||
if h is not None:
|
||||
_winapi.CloseHandle(h)
|
||||
raise
|
||||
else:
|
||||
if stdin_wh is not None:
|
||||
self.stdin = PipeHandle(stdin_wh)
|
||||
if stdout_rh is not None:
|
||||
self.stdout = PipeHandle(stdout_rh)
|
||||
if stderr_rh is not None:
|
||||
self.stderr = PipeHandle(stderr_rh)
|
||||
finally:
|
||||
if stdin == PIPE:
|
||||
os.close(stdin_rfd)
|
||||
if stdout == PIPE:
|
||||
os.close(stdout_wfd)
|
||||
if stderr == PIPE:
|
||||
os.close(stderr_wfd)
|
||||
Reference in New Issue
Block a user