signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def start(self, messages_to_driver, messages_to_bundle_engine):
|
os.setpgrp()<EOL>self.service_bundle.hitch_dir.save_pgid("<STR_LIT>", os.getpgid(os.getpid()))<EOL>self.service_engine = ServiceEngine(self, self.service_bundle.values())<EOL>self.start_time = time.time()<EOL>self.messages_to_driver = messages_to_driver<EOL>self.messages_to_bundle_engine = messages_to_bundle_engine<EOL>os.chdir(self.service_bundle.hitch_dir.project_directory)<EOL>self.logfile = open(self.service_bundle.hitch_dir.testlog(), "<STR_LIT:a>")<EOL>self.loop = pyuv.Loop.default_loop()<EOL>self.pipe_logfile = pyuv.Pipe(self.loop)<EOL>self.pipe_logfile.open(self.logfile.fileno())<EOL>if not self.service_bundle.quiet:<EOL><INDENT>self.pipe_stdout = pyuv.Pipe(self.loop)<EOL>self.pipe_stdout.open(sys.stdout.fileno())<EOL><DEDENT>self.service_engine.start_services_without_prerequisites()<EOL>self.timer_handler = pyuv.Timer(self.loop)<EOL>self.timer_handler.start(self.poll_handler, <NUM_LIT>, <NUM_LIT>)<EOL>self.tail_handles = []<EOL>self.tail_positions = {}<EOL>self.initiate_tail("<STR_LIT>", "<STR_LIT>", None)<EOL>self.initiate_tail("<STR_LIT>", "<STR_LIT>", Fore.YELLOW)<EOL>self.service_engine.tail_setup_and_poststart()<EOL>self.signal_h = pyuv.Signal(self.loop)<EOL>self.signal_h.start(self.signal_cb, signal.SIGINT)<EOL>self.signal_h.start(self.signal_cb, signal.SIGTERM)<EOL>self.loop.run()<EOL>self.loop = None<EOL>os.kill(os.getpid(), signal.SIGKILL)<EOL>
|
Orchestrate processes and I/O pipes.
|
f5346:c0:m4
|
def _close_pipes(self):
|
if not self.service_bundle.quiet:<EOL><INDENT>if not self.pipe_stdout.closed:<EOL><INDENT>self.pipe_stdout.close()<EOL><DEDENT><DEDENT>for pipe in self.service_engine.pipes():<EOL><INDENT>if pipe is not None and not pipe.closed:<EOL><INDENT>pipe.close()<EOL><DEDENT><DEDENT>if not self.signal_h.closed:<EOL><INDENT>self.signal_h.close()<EOL><DEDENT>if not self.timer_handler.closed:<EOL><INDENT>self.timer_handler.close()<EOL><DEDENT>for handle in self.tail_handles:<EOL><INDENT>handle.close()<EOL><DEDENT>self.logfile.close()<EOL>
|
Close all the pipes in order to shut the engine down.
|
f5346:c0:m5
|
def writeline(self, identifier, line, color='<STR_LIT>'):
|
reset_all = Fore.RESET + Back.RESET + Style.RESET_ALL<EOL>full_line = "<STR_LIT>".format(<EOL>reset_all,<EOL>color,<EOL>identifier.rjust(self.service_engine.longest_service_name() + <NUM_LIT:10>),<EOL>line,<EOL>reset_all<EOL>)<EOL>if not self.ipython_on and not self.service_bundle.quiet:<EOL><INDENT>self.pipe_stdout.write(full_line.encode('<STR_LIT:utf-8>'))<EOL><DEDENT>self.pipe_logfile.write(full_line.encode('<STR_LIT:utf-8>'))<EOL>
|
Log a line to the log file and/or stdout.
|
f5346:c0:m6
|
def poll_handler(self, timer_handle):
|
self.service_engine.poll()<EOL>if not self._ready and self.service_engine.all_services_ready():<EOL><INDENT>startup_duration = time.time() - self.start_time<EOL>self.logline("<STR_LIT>".format(startup_duration), color=Style.BRIGHT)<EOL>self.messages_to_driver.put("<STR_LIT>")<EOL>self._ready = True<EOL><DEDENT>if not self.messages_to_bundle_engine.empty():<EOL><INDENT>msg = self.messages_to_bundle_engine.get()<EOL>if not self._driver_sent_shutdown_signal and msg == "<STR_LIT>":<EOL><INDENT>self._driver_sent_shutdown_signal = True<EOL>self.stop()<EOL><DEDENT>if msg == "<STR_LIT>":<EOL><INDENT>self.ipython_on = True<EOL><DEDENT>elif msg == "<STR_LIT>":<EOL><INDENT>self.ipython_on = False<EOL><DEDENT><DEDENT>if not self._ready and not self._timedout:<EOL><INDENT>if time.time() - self.start_time > self.service_bundle.startup_timeout:<EOL><INDENT>self.warnline("<STR_LIT>")<EOL>self._timedout = True<EOL>self.messages_to_driver.put(<EOL>ServiceStartupTimeoutException(<EOL>"<STR_LIT>".format(<EOL>self.service_bundle.startup_timeout,<EOL>"<STR_LIT:U+002CU+0020>".join([x.name for x in self.service_engine.not_ready_services()]),<EOL>)<EOL>)<EOL>)<EOL><DEDENT><DEDENT>
|
Handle messages from the test thread and timeout.
|
f5346:c0:m10
|
def stop(self):
|
if not self._shutdown_triggered:<EOL><INDENT>self._shutdown_triggered = True<EOL>self.ipython_on = False<EOL>start_shutdown_time = time.time()<EOL>for child in psutil.Process(os.getppid()).children():<EOL><INDENT>if child is not None and child.is_running() and child.pid != os.getpid():<EOL><INDENT>for grandchild in psutil.Process(child.pid).children(recursive=True):<EOL><INDENT>try:<EOL><INDENT>grandchild.send_signal(signal.SIGINT)<EOL>self.logline("<STR_LIT>".format(grandchild.pid, "<STR_LIT:U+0020>".join(grandchild.cmdline())))<EOL><DEDENT>except psutil.NoSuchProcess:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>try:<EOL><INDENT>child.send_signal(signal.SIGINT)<EOL>self.logline("<STR_LIT>".format(child.pid, "<STR_LIT:U+0020>".join(child.cmdline())))<EOL><DEDENT>except psutil.NoSuchProcess:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>self.service_engine.stop()<EOL>still_alive = False<EOL>if time.time() < start_shutdown_time + self.service_bundle.shutdown_timeout:<EOL><INDENT>sleep_time = self.service_bundle.shutdown_timeout - (time.time() - start_shutdown_time)<EOL>for i in range(<NUM_LIT:0>, int(sleep_time * <NUM_LIT:100>)):<EOL><INDENT>still_alive = False<EOL>for child in psutil.Process(os.getppid()).children():<EOL><INDENT>if child is not None and child.is_running() and child.pid != os.getpid():<EOL><INDENT>for grandchild in psutil.Process(child.pid).children(recursive=True):<EOL><INDENT>try:<EOL><INDENT>if grandchild.status != "<STR_LIT>":<EOL><INDENT>still_alive = True<EOL><DEDENT><DEDENT>except psutil.NoSuchProcess:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if not still_alive:<EOL><INDENT>break<EOL><DEDENT>time.sleep(<NUM_LIT>)<EOL><DEDENT><DEDENT>if still_alive:<EOL><INDENT>for child in psutil.Process(os.getppid()).children():<EOL><INDENT>if child is not None and child.is_running() and child.pid != os.getpid():<EOL><INDENT>for grandchild in psutil.Process(child.pid).children(recursive=True):<EOL><INDENT>try:<EOL><INDENT>fullname = "<STR_LIT:U+0020>".join(grandchild.cmdline())<EOL>grandchild.send_signal(signal.SIGKILL)<EOL>self.logline("<STR_LIT>".format(grandchild.pid, grandchild.name))<EOL><DEDENT>except psutil.NoSuchProcess:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>try:<EOL><INDENT>child.send_signal(signal.SIGKILL)<EOL>self.logline("<STR_LIT>".format(child.pid, child.name()))<EOL><DEDENT>except psutil.NoSuchProcess:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT><DEDENT>self.logline("<STR_LIT>".format(time.time() - start_shutdown_time))<EOL>self._close_pipes()<EOL><DEDENT>
|
Shut down all hitchserve services and processes started from driver process, kill if necessary.
|
f5346:c0:m11
|
def log(message):
|
import sys<EOL>_write(sys.stdout, message)<EOL>
|
Output to stdout.
|
f5347:m1
|
def warn(message):
|
import sys<EOL>_write(sys.stderr, message)<EOL>
|
Output to stderr.
|
f5347:m2
|
def __init__(self, *args, **kwargs):
|
self.command = list(args)<EOL>self.directory = kwargs['<STR_LIT>'] if '<STR_LIT>' in kwargs else None<EOL>self.env_vars = kwargs['<STR_LIT>'] if '<STR_LIT>' in kwargs else None<EOL>
|
Define a subcommand.
Args:
*args (str): Sequence of program arguments needed to run the command.
directory (Optional[str]): Directory the command is run in.
env_vars (Optional[dict]): Environment variable to feed to the subcommand.
|
f5348:c1:m0
|
@property<EOL><INDENT>def directory(self):<DEDENT>
|
return self._directory<EOL>
|
str: Directory subcommand runs in.
|
f5348:c1:m1
|
def run(self, shell=False, ignore_errors=False, stdin=False, check_output=False):
|
previous_directory = os.getcwd()<EOL>os.chdir(self.directory)<EOL>try:<EOL><INDENT>kwargs = {<EOL>'<STR_LIT>': sys.stderr,<EOL>'<STR_LIT>': sys.stdin if stdin else None,<EOL>'<STR_LIT>': self.env_vars,<EOL>'<STR_LIT>': shell,<EOL>}<EOL>if check_output:<EOL><INDENT>return subprocess.check_output(self.command, **kwargs).decode("<STR_LIT:utf8>")<EOL><DEDENT>else:<EOL><INDENT>kwargs['<STR_LIT>'] = sys.stdout<EOL>return subprocess.check_call(self.command, **kwargs)<EOL><DEDENT><DEDENT>except subprocess.CalledProcessError:<EOL><INDENT>if ignore_errors:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>os.chdir(previous_directory)<EOL>
|
Run subcommand.
Args:
shell (Optional[bool]): Run command using shell (default False)
ignore_errors (Optional[bool]): If the command has a non-zero return code, don't raise an exception (default False)
stdin (Optional[bool]): Plug input from stdin when running command (default False)
check_output (Optional[bool]): Return command output as string (default False)
Returns:
String if check_output is True, else None.
Raises:
subprocess.CalledProcessError when the command has an error, unless ignore_errors is True.
|
f5348:c1:m3
|
def __init__(self, command=None, log_line_ready_checker=None, directory=None, no_libfaketime=False, env_vars=None, stop_signal=signal.SIGINT, needs=None):
|
if type(command) is str:<EOL><INDENT>raise ServiceMisconfiguration((<EOL>"<STR_LIT>"<EOL>"<STR_LIT>"<EOL>))<EOL><DEDENT>if isinstance(command, commandlib.Command):<EOL><INDENT>self.command = command.arguments<EOL>if env_vars is None:<EOL><INDENT>self.env_vars = command.env<EOL><DEDENT>else:<EOL><INDENT>self.env_vars = command.env<EOL>self.env_vars.update(env_vars)<EOL><DEDENT>self.directory = str(command.directory) if command.directory is not None else str(directory)<EOL><DEDENT>else:<EOL><INDENT>self.command = [str(arg) for arg in command] if command is not None else command<EOL>self.env_vars = {} if env_vars is None else env_vars<EOL>self.directory = directory<EOL><DEDENT>self.no_libfaketime = no_libfaketime<EOL>self.needs = needs<EOL>self.log_line_ready_checker = log_line_ready_checker<EOL>self.stop_signal = stop_signal<EOL>self._pid = multiprocessing.Value('<STR_LIT:i>', <NUM_LIT:0>)<EOL>if not inspect.isfunction(log_line_ready_checker):<EOL><INDENT>raise ServiceMisconfiguration(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>if len(inspect.getargspec(log_line_ready_checker).args) != <NUM_LIT:1>:<EOL><INDENT>raise ServiceMisconfiguration(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>if needs is not None:<EOL><INDENT>if type(needs) != list:<EOL><INDENT>raise ServiceMisconfiguration(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>for need in needs:<EOL><INDENT>if not isinstance(need, Service):<EOL><INDENT>raise ServiceMisconfiguration(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT><DEDENT><DEDENT>
|
Define and configure a service.
Each service has a command, directory and function which checks
each line of the logs to ascertain readiness.
Args:
command (List(str) or commandlib.Command): Sequence of program arguments needed to run the service.
log_line_ready_checker (Function[str]): Function which returns True when passed a line which indicates service readiness.
directory (Optional[str]): Directory the service command is run in. Defaults to project directory specified in service bundle.
no_libfaketime (Optional[bool]): If True, don't run service with libfaketime. Useful if libfaketime breaks the service.
env_vars (Optional[dict]): Dictionary of environment variables to feed to the service when running it.
needs (Optional[List[Service]]): List of services which must be started before this service will run.
stop_signal (Optional[int]): First signal to send to service when shutting it down (default: signal.SIGINT).
Raises:
ServiceMisconfiguration when the wrong parameters are passed.
|
f5348:c2:m0
|
def setup(self):
|
pass<EOL>
|
Method that is run before starting the service.
|
f5348:c2:m1
|
def poststart(self):
|
pass<EOL>
|
Method that is run immediately after detecting that the service has started.
|
f5348:c2:m2
|
@property<EOL><INDENT>def pid(self):<DEDENT>
|
return self._pid.value<EOL>
|
int: UNIX process id of the service process.
|
f5348:c2:m3
|
@property<EOL><INDENT>def process(self):<DEDENT>
|
return psutil.Process(self.pid)<EOL>
|
psutil.Process: psutil Process object of the service.
|
f5348:c2:m5
|
@property<EOL><INDENT>def name(self):<DEDENT>
|
return self._name<EOL>
|
str: Service name.
|
f5348:c2:m6
|
@property<EOL><INDENT>def env_vars(self):<DEDENT>
|
if not self.no_libfaketime:<EOL><INDENT>faketime_filename = self.service_group.hitch_dir.faketime()<EOL>env_vars = dict(os.environ)<EOL>env_vars.update(self._env_vars)<EOL>env_vars.update(faketime.get_environment_vars(faketime_filename))<EOL><DEDENT>else:<EOL><INDENT>env_vars = dict(os.environ)<EOL>env_vars.update(self._env_vars)<EOL><DEDENT>return env_vars<EOL>
|
dict: All environment variables fed to the service.
|
f5348:c2:m8
|
@property<EOL><INDENT>def directory(self):<DEDENT>
|
if self._directory is None:<EOL><INDENT>return self.service_group.hitch_dir.hitch_dir<EOL><DEDENT>else:<EOL><INDENT>return self._directory<EOL><DEDENT>
|
str: Directory that the service is run in.
|
f5348:c2:m10
|
@property<EOL><INDENT>def command(self):<DEDENT>
|
return self._command<EOL>
|
List[str]: Command used to run the service.
|
f5348:c2:m12
|
def subcommand(self, *args):
|
return Subcommand(*args, directory=self.directory, env_vars=self.env_vars)<EOL>
|
Get subcommand acting on a service. Subcommand will run in service directory
and with the environment variables used to run the service itself.
Args:
*args: Arguments to run command (e.g. "redis-cli", "-n", "1")
Returns:
Subcommand object.
|
f5348:c2:m16
|
def __init__(self, address, protocol, auth):
|
if not isinstance(address, NetAddress):<EOL><INDENT>address = NetAddress.from_string(address)<EOL><DEDENT>self.address = address<EOL>self.protocol = protocol<EOL>self.auth = auth<EOL>self.peername = None<EOL>
|
A SOCKS proxy at a NetAddress following a SOCKS protocol.
auth is an authentication method to use when connecting, or None.
|
f5358:c8:m0
|
async def _connect_one(self, remote_address):
|
loop = asyncio.get_event_loop()<EOL>for info in await loop.getaddrinfo(str(self.address.host), self.address.port,<EOL>type=socket.SOCK_STREAM):<EOL><INDENT>client = self.protocol(remote_address, self.auth)<EOL>sock = socket.socket(family=info[<NUM_LIT:0>])<EOL>try:<EOL><INDENT>sock.setblocking(False)<EOL>await loop.sock_connect(sock, info[<NUM_LIT:4>])<EOL>await self._handshake(client, sock, loop)<EOL>self.peername = sock.getpeername()<EOL>return sock<EOL><DEDENT>except (OSError, SOCKSProtocolError) as e:<EOL><INDENT>exception = e<EOL><DEDENT><DEDENT>return exception<EOL>
|
Connect to the proxy and perform a handshake requesting a connection.
Return the open socket on success, or the exception on failure.
|
f5358:c8:m3
|
async def _connect(self, remote_addresses):
|
assert remote_addresses<EOL>exceptions = []<EOL>for remote_address in remote_addresses:<EOL><INDENT>sock = await self._connect_one(remote_address)<EOL>if isinstance(sock, socket.socket):<EOL><INDENT>return sock, remote_address<EOL><DEDENT>exceptions.append(sock)<EOL><DEDENT>strings = set(f'<STR_LIT>' for exc in exceptions)<EOL>raise (exceptions[<NUM_LIT:0>] if len(strings) == <NUM_LIT:1> else<EOL>OSError(f'<STR_LIT>'))<EOL>
|
Connect to the proxy and perform a handshake requesting a connection to each address in
addresses.
Return an (open_socket, remote_address) pair on success.
|
f5358:c8:m4
|
async def _detect_proxy(self):
|
if self.protocol is SOCKS4a:<EOL><INDENT>remote_address = NetAddress('<STR_LIT>', <NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>remote_address = NetAddress('<STR_LIT>', <NUM_LIT>)<EOL><DEDENT>sock = await self._connect_one(remote_address)<EOL>if isinstance(sock, socket.socket):<EOL><INDENT>sock.close()<EOL>return True<EOL><DEDENT>return isinstance(sock, SOCKSFailure)<EOL>
|
Return True if it appears we can connect to a SOCKS proxy,
otherwise False.
|
f5358:c8:m5
|
@classmethod<EOL><INDENT>async def auto_detect_at_address(cls, address, auth):<DEDENT>
|
for protocol in (SOCKS5, SOCKS4a, SOCKS4):<EOL><INDENT>proxy = cls(address, protocol, auth)<EOL>if await proxy._detect_proxy():<EOL><INDENT>return proxy<EOL><DEDENT><DEDENT>return None<EOL>
|
Try to detect a SOCKS proxy at address using the authentication method (or None).
SOCKS5, SOCKS4a and SOCKS are tried in order. If a SOCKS proxy is detected a
SOCKSProxy object is returned.
Returning a SOCKSProxy does not mean it is functioning - for example, it may have
no network connectivity.
If no proxy is detected return None.
|
f5358:c8:m6
|
@classmethod<EOL><INDENT>async def auto_detect_at_host(cls, host, ports, auth):<DEDENT>
|
for port in ports:<EOL><INDENT>proxy = await cls.auto_detect_at_address(NetAddress(host, port), auth)<EOL>if proxy:<EOL><INDENT>return proxy<EOL><DEDENT><DEDENT>return None<EOL>
|
Try to detect a SOCKS proxy on a host on one of the ports.
Calls auto_detect_address for the ports in order. Returning a SOCKSProxy does not
mean it is functioning - for example, it may have no network connectivity.
If no proxy is detected return None.
|
f5358:c8:m7
|
async def create_connection(self, protocol_factory, host, port, *,<EOL>resolve=False, ssl=None,<EOL>family=<NUM_LIT:0>, proto=<NUM_LIT:0>, flags=<NUM_LIT:0>):
|
loop = asyncio.get_event_loop()<EOL>if resolve:<EOL><INDENT>remote_addresses = [NetAddress(info[<NUM_LIT:4>][<NUM_LIT:0>], info[<NUM_LIT:4>][<NUM_LIT:1>]) for info in<EOL>await loop.getaddrinfo(host, port, family=family, proto=proto,<EOL>type=socket.SOCK_STREAM, flags=flags)]<EOL><DEDENT>else:<EOL><INDENT>remote_addresses = [NetAddress(host, port)]<EOL><DEDENT>sock, remote_address = await self._connect(remote_addresses)<EOL>def set_address():<EOL><INDENT>protocol = protocol_factory()<EOL>protocol._proxy = self<EOL>protocol._remote_address = remote_address<EOL>return protocol<EOL><DEDENT>return await loop.create_connection(set_address, sock=sock, ssl=ssl,<EOL>server_hostname=host if ssl else None)<EOL>
|
Set up a connection to (host, port) through the proxy.
If resolve is True then host is resolved locally with
getaddrinfo using family, proto and flags, otherwise the proxy
is asked to resolve host.
The function signature is similar to loop.create_connection()
with the same result. The attribute _address is set on the
protocol to the address of the successful remote connection.
Additionally raises SOCKSError if something goes wrong with
the proxy handshake.
|
f5358:c8:m8
|
def is_valid_hostname(hostname):
|
if not isinstance(hostname, str):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if hostname and hostname[-<NUM_LIT:1>] == "<STR_LIT:.>":<EOL><INDENT>hostname = hostname[:-<NUM_LIT:1>]<EOL><DEDENT>if not hostname or len(hostname) > <NUM_LIT>:<EOL><INDENT>return False<EOL><DEDENT>labels = hostname.split('<STR_LIT:.>')<EOL>if re.match(NUMERIC_REGEX, labels[-<NUM_LIT:1>]):<EOL><INDENT>return False<EOL><DEDENT>return all(LABEL_REGEX.match(label) for label in labels)<EOL>
|
Return True if hostname is valid, otherwise False.
|
f5359:m0
|
def classify_host(host):
|
if isinstance(host, (IPv4Address, IPv6Address)):<EOL><INDENT>return host<EOL><DEDENT>if is_valid_hostname(host):<EOL><INDENT>return host<EOL><DEDENT>return ip_address(host)<EOL>
|
Host is an IPv4Address, IPv6Address or a string.
If an IPv4Address or IPv6Address return it. Otherwise convert the string to an
IPv4Address or IPv6Address object if possible and return it. Otherwise return the
original string if it is a valid hostname.
Raise ValueError if a string cannot be interpreted as an IP address and it is not
a valid hostname.
|
f5359:m1
|
def validate_port(port):
|
if not isinstance(port, (str, int)):<EOL><INDENT>raise TypeError(f'<STR_LIT>')<EOL><DEDENT>if isinstance(port, str) and port.isdigit():<EOL><INDENT>port = int(port)<EOL><DEDENT>if isinstance(port, int) and <NUM_LIT:0> < port <= <NUM_LIT>:<EOL><INDENT>return port<EOL><DEDENT>raise ValueError(f'<STR_LIT>')<EOL>
|
Validate port and return it as an integer.
A string, or its representation as an integer, is accepted.
|
f5359:m2
|
def validate_protocol(protocol):
|
if not re.match(PROTOCOL_REGEX, protocol):<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT>return protocol.lower()<EOL>
|
Validate a protocol, a string, and return it.
|
f5359:m3
|
def is_async_call(func):
|
while isinstance(func, partial):<EOL><INDENT>func = func.func<EOL><DEDENT>return inspect.iscoroutinefunction(func)<EOL>
|
inspect.iscoroutinefunction that looks through partials.
|
f5359:m6
|
def __init__(self, host, port):
|
self._host = classify_host(host)<EOL>self._port = validate_port(port)<EOL>
|
Construct a NetAddress from a host and a port.
Host is classified and port is an integer.
|
f5359:c1:m0
|
@classmethod<EOL><INDENT>def from_string(cls, string, *, default_func=None):<DEDENT>
|
if not isinstance(string, str):<EOL><INDENT>raise TypeError(f'<STR_LIT>')<EOL><DEDENT>host, port = _split_address(string)<EOL>if default_func:<EOL><INDENT>host = host or default_func(ServicePart.HOST)<EOL>port = port or default_func(ServicePart.PORT)<EOL>if not host or not port:<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT><DEDENT>return cls(host, port)<EOL>
|
Construct a NetAddress from a string and return a (host, port) pair.
If either (or both) is missing and default_func is provided, it is called with
ServicePart.HOST or ServicePart.PORT to get a default.
|
f5359:c1:m3
|
def __init__(self, protocol, address):
|
self._protocol = validate_protocol(protocol)<EOL>if not isinstance(address, NetAddress):<EOL><INDENT>address = NetAddress.from_string(address)<EOL><DEDENT>self._address = address<EOL>
|
Construct a service from a protocol string and a NetAddress object,
|
f5359:c2:m0
|
@classmethod<EOL><INDENT>def from_string(cls, string, *, default_func=None):<DEDENT>
|
if not isinstance(string, str):<EOL><INDENT>raise TypeError(f'<STR_LIT>')<EOL><DEDENT>parts = string.split('<STR_LIT>', <NUM_LIT:1>)<EOL>if len(parts) == <NUM_LIT:2>:<EOL><INDENT>protocol, address = parts<EOL><DEDENT>else:<EOL><INDENT>item, = parts<EOL>protocol = None<EOL>if default_func:<EOL><INDENT>if default_func(item, ServicePart.HOST) and default_func(item, ServicePart.PORT):<EOL><INDENT>protocol, address = item, '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>protocol, address = default_func(None, ServicePart.PROTOCOL), item<EOL><DEDENT><DEDENT>if not protocol:<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT><DEDENT>if default_func:<EOL><INDENT>default_func = partial(default_func, protocol.lower())<EOL><DEDENT>address = NetAddress.from_string(address, default_func=default_func)<EOL>return cls(protocol, address)<EOL>
|
Construct a Service from a string.
If default_func is provided and any ServicePart is missing, it is called with
default_func(protocol, part) to obtain the missing part.
|
f5359:c2:m7
|
@classmethod<EOL><INDENT>def _message_id(cls, message, require_id):<DEDENT>
|
raise NotImplementedError<EOL>
|
Validate the message is a dictionary and return its ID.
Raise an error if the message is invalid or the ID is of an
invalid type. If it has no ID, raise an error if require_id
is True, otherwise return None.
|
f5360:c8:m0
|
@classmethod<EOL><INDENT>def _validate_message(cls, message):<DEDENT>
|
Validate other parts of the message other than those
done in _message_id.
|
f5360:c8:m1
|
|
@classmethod<EOL><INDENT>def _request_args(cls, request):<DEDENT>
|
raise NotImplementedError<EOL>
|
Validate the existence and type of the arguments passed
in the request dictionary.
|
f5360:c8:m2
|
@classmethod<EOL><INDENT>def _message_to_payload(cls, message):<DEDENT>
|
try:<EOL><INDENT>return json.loads(message.decode())<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>message = '<STR_LIT>'<EOL><DEDENT>except json.JSONDecodeError:<EOL><INDENT>message = '<STR_LIT>'<EOL><DEDENT>raise cls._error(cls.PARSE_ERROR, message, True, None)<EOL>
|
Returns a Python object or a ProtocolError.
|
f5360:c8:m5
|
@classmethod<EOL><INDENT>def message_to_item(cls, message):<DEDENT>
|
payload = cls._message_to_payload(message)<EOL>if isinstance(payload, dict):<EOL><INDENT>if '<STR_LIT>' in payload:<EOL><INDENT>return cls._process_request(payload)<EOL><DEDENT>else:<EOL><INDENT>return cls._process_response(payload)<EOL><DEDENT><DEDENT>elif isinstance(payload, list) and cls.allow_batches:<EOL><INDENT>if not payload:<EOL><INDENT>raise cls._error(JSONRPC.INVALID_REQUEST, '<STR_LIT>',<EOL>True, None)<EOL><DEDENT>return payload, None<EOL><DEDENT>raise cls._error(cls.INVALID_REQUEST,<EOL>'<STR_LIT>', True, None)<EOL>
|
Translate an unframed received message and return an
(item, request_id) pair.
The item can be a Request, Notification, Response or a list.
A JSON RPC error response is returned as an RPCError inside a
Response object.
If a Batch is returned, request_id is an iterable of request
ids, one per batch member.
If the message violates the protocol in some way a
ProtocolError is returned, except if the message was
determined to be a response, in which case the ProtocolError
is placed inside a Response object. This is so that client
code can mark a request as having been responded to even if
the response was bad.
raises: ProtocolError
|
f5360:c8:m7
|
@classmethod<EOL><INDENT>def request_message(cls, item, request_id):<DEDENT>
|
assert isinstance(item, Request)<EOL>return cls.encode_payload(cls.request_payload(item, request_id))<EOL>
|
Convert an RPCRequest item to a message.
|
f5360:c8:m8
|
@classmethod<EOL><INDENT>def notification_message(cls, item):<DEDENT>
|
assert isinstance(item, Notification)<EOL>return cls.encode_payload(cls.request_payload(item, None))<EOL>
|
Convert an RPCRequest item to a message.
|
f5360:c8:m9
|
@classmethod<EOL><INDENT>def response_message(cls, result, request_id):<DEDENT>
|
if isinstance(result, CodeMessageError):<EOL><INDENT>payload = cls.error_payload(result, request_id)<EOL><DEDENT>else:<EOL><INDENT>payload = cls.response_payload(result, request_id)<EOL><DEDENT>return cls.encode_payload(payload)<EOL>
|
Convert a response result (or RPCError) to a message.
|
f5360:c8:m10
|
@classmethod<EOL><INDENT>def batch_message(cls, batch, request_ids):<DEDENT>
|
assert isinstance(batch, Batch)<EOL>if not cls.allow_batches:<EOL><INDENT>raise ProtocolError.invalid_request(<EOL>'<STR_LIT>')<EOL><DEDENT>id_iter = iter(request_ids)<EOL>rm = cls.request_message<EOL>nm = cls.notification_message<EOL>parts = (rm(request, next(id_iter)) if isinstance(request, Request)<EOL>else nm(request) for request in batch)<EOL>return cls.batch_message_from_parts(parts)<EOL>
|
Convert a request Batch to a message.
|
f5360:c8:m11
|
@classmethod<EOL><INDENT>def batch_message_from_parts(cls, messages):<DEDENT>
|
<EOL>middle = b'<STR_LIT:U+002CU+0020>'.join(messages)<EOL>if not middle:<EOL><INDENT>raise ProtocolError.empty_batch()<EOL><DEDENT>return b'<STR_LIT>'.join([b'<STR_LIT:[>', middle, b'<STR_LIT:]>'])<EOL>
|
Convert messages, one per batch item, into a batch message. At
least one message must be passed.
|
f5360:c8:m12
|
@classmethod<EOL><INDENT>def encode_payload(cls, payload):<DEDENT>
|
try:<EOL><INDENT>return json.dumps(payload).encode()<EOL><DEDENT>except TypeError:<EOL><INDENT>msg = f'<STR_LIT>'<EOL>raise ProtocolError(cls.INTERNAL_ERROR, msg) from None<EOL><DEDENT>
|
Encode a Python object as JSON and convert it to bytes.
|
f5360:c8:m13
|
@classmethod<EOL><INDENT>def request_payload(cls, request, request_id):<DEDENT>
|
if isinstance(request.args, dict):<EOL><INDENT>raise ProtocolError.invalid_args(<EOL>'<STR_LIT>')<EOL><DEDENT>return {<EOL>'<STR_LIT>': request.method,<EOL>'<STR_LIT>': request.args,<EOL>'<STR_LIT:id>': request_id<EOL>}<EOL>
|
JSON v1 request (or notification) payload.
|
f5360:c9:m4
|
@classmethod<EOL><INDENT>def response_payload(cls, result, request_id):<DEDENT>
|
return {<EOL>'<STR_LIT:result>': result,<EOL>'<STR_LIT:error>': None,<EOL>'<STR_LIT:id>': request_id<EOL>}<EOL>
|
JSON v1 response payload.
|
f5360:c9:m5
|
@classmethod<EOL><INDENT>def request_payload(cls, request, request_id):<DEDENT>
|
payload = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': request.method,<EOL>}<EOL>if request_id is not None:<EOL><INDENT>payload['<STR_LIT:id>'] = request_id<EOL><DEDENT>if request.args or request.args == {}:<EOL><INDENT>payload['<STR_LIT>'] = request.args<EOL><DEDENT>return payload<EOL>
|
JSON v2 request (or notification) payload.
|
f5360:c10:m4
|
@classmethod<EOL><INDENT>def response_payload(cls, result, request_id):<DEDENT>
|
return {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:result>': result,<EOL>'<STR_LIT:id>': request_id<EOL>}<EOL>
|
JSON v2 response payload.
|
f5360:c10:m5
|
@classmethod<EOL><INDENT>def detect_protocol(cls, message):<DEDENT>
|
main = cls._message_to_payload(message)<EOL>def protocol_for_payload(payload):<EOL><INDENT>if not isinstance(payload, dict):<EOL><INDENT>return JSONRPCLoose <EOL><DEDENT>version = payload.get('<STR_LIT>')<EOL>if version == '<STR_LIT>':<EOL><INDENT>return JSONRPCv2<EOL><DEDENT>if version == '<STR_LIT:1.0>':<EOL><INDENT>return JSONRPCv1<EOL><DEDENT>if '<STR_LIT:result>' in payload and '<STR_LIT:error>' in payload:<EOL><INDENT>return JSONRPCv1<EOL><DEDENT>return JSONRPCLoose<EOL><DEDENT>if isinstance(main, list):<EOL><INDENT>parts = set(protocol_for_payload(payload) for payload in main)<EOL>if len(parts) == <NUM_LIT:1>:<EOL><INDENT>return parts.pop()<EOL><DEDENT>for protocol in (JSONRPCv2, JSONRPCv1):<EOL><INDENT>if protocol in parts:<EOL><INDENT>return protocol<EOL><DEDENT><DEDENT>return JSONRPCLoose<EOL><DEDENT>return protocol_for_payload(main)<EOL>
|
Attempt to detect the protocol from the message.
|
f5360:c12:m0
|
def send_request(self, request):
|
request_id = next(self._id_counter)<EOL>message = self._protocol.request_message(request, request_id)<EOL>return message, self._event(request, request_id)<EOL>
|
Send a Request. Return a (message, event) pair.
The message is an unframed message to send over the network.
Wait on the event for the response; which will be in the
"result" attribute.
Raises: ProtocolError if the request violates the protocol
in some way..
|
f5360:c13:m7
|
def receive_message(self, message):
|
if self._protocol is JSONRPCAutoDetect:<EOL><INDENT>self._protocol = JSONRPCAutoDetect.detect_protocol(message)<EOL><DEDENT>try:<EOL><INDENT>item, request_id = self._protocol.message_to_item(message)<EOL><DEDENT>except ProtocolError as e:<EOL><INDENT>if e.response_msg_id is not id:<EOL><INDENT>return self._receive_response(e, e.response_msg_id)<EOL><DEDENT>raise<EOL><DEDENT>if isinstance(item, Request):<EOL><INDENT>item.send_result = partial(self._send_result, request_id)<EOL>return [item]<EOL><DEDENT>if isinstance(item, Notification):<EOL><INDENT>return [item]<EOL><DEDENT>if isinstance(item, Response):<EOL><INDENT>return self._receive_response(item.result, request_id)<EOL><DEDENT>assert isinstance(item, list)<EOL>if all(isinstance(payload, dict) and ('<STR_LIT:result>' in payload or '<STR_LIT:error>' in payload)<EOL>for payload in item):<EOL><INDENT>return self._receive_response_batch(item)<EOL><DEDENT>else:<EOL><INDENT>return self._receive_request_batch(item)<EOL><DEDENT>
|
Call with an unframed message received from the network.
Raises: ProtocolError if the message violates the protocol in
some way. However, if it happened in a response that can be
paired with a request, the ProtocolError is instead set in the
result attribute of the send_request() that caused the error.
|
f5360:c13:m10
|
def cancel_pending_requests(self):
|
exception = CancelledError()<EOL>for _request, event in self._requests.values():<EOL><INDENT>event.result = exception<EOL>event.set()<EOL><DEDENT>self._requests.clear()<EOL>
|
Cancel all pending requests.
|
f5360:c13:m11
|
def pending_requests(self):
|
return [request for request, event in self._requests.values()]<EOL>
|
All sent requests that have not received a response.
|
f5360:c13:m12
|
async def run_in_thread(func, *args):
|
return await get_event_loop().run_in_executor(None, func, *args)<EOL>
|
Run a function in a separate thread, and await its completion.
|
f5362:m0
|
def timeout_after(seconds, coro=None, *args):
|
if coro:<EOL><INDENT>return _timeout_after_func(seconds, False, coro, args)<EOL><DEDENT>return TimeoutAfter(seconds)<EOL>
|
Execute the specified coroutine and return its result. However,
issue a cancellation request to the calling task after seconds
have elapsed. When this happens, a TaskTimeout exception is
raised. If coro is None, the result of this function serves
as an asynchronous context manager that applies a timeout to a
block of statements.
timeout_after() may be composed with other timeout_after()
operations (i.e., nested timeouts). If an outer timeout expires
first, then TimeoutCancellationError is raised instead of
TaskTimeout. If an inner timeout expires and fails to properly
TaskTimeout, a UncaughtTimeoutError is raised in the outer
timeout.
|
f5362:m7
|
def timeout_at(clock, coro=None, *args):
|
if coro:<EOL><INDENT>return _timeout_after_func(clock, True, coro, args)<EOL><DEDENT>return TimeoutAfter(clock, absolute=True)<EOL>
|
Execute the specified coroutine and return its result. However,
issue a cancellation request to the calling task after seconds
have elapsed. When this happens, a TaskTimeout exception is
raised. If coro is None, the result of this function serves
as an asynchronous context manager that applies a timeout to a
block of statements.
timeout_after() may be composed with other timeout_after()
operations (i.e., nested timeouts). If an outer timeout expires
first, then TimeoutCancellationError is raised instead of
TaskTimeout. If an inner timeout expires and fails to properly
TaskTimeout, a UncaughtTimeoutError is raised in the outer
timeout.
|
f5362:m8
|
def ignore_after(seconds, coro=None, *args, timeout_result=None):
|
if coro:<EOL><INDENT>return _ignore_after_func(seconds, False, coro, args, timeout_result)<EOL><DEDENT>return TimeoutAfter(seconds, ignore=True)<EOL>
|
Execute the specified coroutine and return its result. Issue a
cancellation request after seconds have elapsed. When a timeout
occurs, no exception is raised. Instead, timeout_result is
returned.
If coro is None, the result is an asynchronous context manager
that applies a timeout to a block of statements. For the context
manager case, the resulting context manager object has an expired
attribute set to True if time expired.
Note: ignore_after() may also be composed with other timeout
operations. TimeoutCancellationError and UncaughtTimeoutError
exceptions might be raised according to the same rules as for
timeout_after().
|
f5362:m10
|
def ignore_at(clock, coro=None, *args, timeout_result=None):
|
if coro:<EOL><INDENT>return _ignore_after_func(clock, True, coro, args, timeout_result)<EOL><DEDENT>return TimeoutAfter(clock, absolute=True, ignore=True)<EOL>
|
Stop the enclosed task or block of code at an absolute
clock value. Same usage as ignore_after().
|
f5362:m11
|
def _add_task(self, task):
|
if hasattr(task, '<STR_LIT>'):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>if self._closed:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>task._task_group = self<EOL>if task.done():<EOL><INDENT>self._done.append(task)<EOL><DEDENT>else:<EOL><INDENT>self._pending.add(task)<EOL>task.add_done_callback(self._on_done)<EOL><DEDENT>
|
Add an already existing task to the task group.
|
f5362:c1:m1
|
async def spawn(self, coro, *args):
|
task = await spawn(coro, *args, report_crash=False)<EOL>self._add_task(task)<EOL>return task<EOL>
|
Create a new task that’s part of the group. Returns a Task
instance.
|
f5362:c1:m3
|
async def add_task(self, task):
|
self._add_task(task)<EOL>
|
Add an already existing task to the task group.
|
f5362:c1:m4
|
async def next_done(self):
|
if not self._done and self._pending:<EOL><INDENT>self._done_event.clear()<EOL>await self._done_event.wait()<EOL><DEDENT>if self._done:<EOL><INDENT>return self._done.popleft()<EOL><DEDENT>return None<EOL>
|
Returns the next completed task. Returns None if no more tasks
remain. A TaskGroup may also be used as an asynchronous iterator.
|
f5362:c1:m5
|
async def next_result(self):
|
task = await self.next_done()<EOL>if not task:<EOL><INDENT>raise NoRemainingTasksError('<STR_LIT>')<EOL><DEDENT>return task.result()<EOL>
|
Returns the result of the next completed task. If the task failed
with an exception, that exception is raised. A RuntimeError
exception is raised if this is called when no remaining tasks
are available.
|
f5362:c1:m6
|
async def join(self):
|
def errored(task):<EOL><INDENT>return not task.cancelled() and task.exception()<EOL><DEDENT>try:<EOL><INDENT>if self._wait in (all, object):<EOL><INDENT>while True:<EOL><INDENT>task = await self.next_done()<EOL>if task is None:<EOL><INDENT>return<EOL><DEDENT>if errored(task):<EOL><INDENT>break<EOL><DEDENT>if self._wait is object:<EOL><INDENT>if task.cancelled() or task.result() is not None:<EOL><INDENT>return<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else: <EOL><INDENT>task = await self.next_done()<EOL>if task is None or not errored(task):<EOL><INDENT>return<EOL><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>await self.cancel_remaining()<EOL><DEDENT>if errored(task):<EOL><INDENT>raise task.exception()<EOL><DEDENT>
|
Wait for tasks in the group to terminate according to the wait
policy for the group.
If the join() operation itself is cancelled, all remaining
tasks in the group are also cancelled.
If a TaskGroup is used as a context manager, the join() method
is called on context-exit.
Once join() returns, no more tasks may be added to the task
group. Tasks can be added while join() is running.
|
f5362:c1:m7
|
async def cancel_remaining(self):
|
self._closed = True<EOL>task_list = list(self._pending)<EOL>for task in task_list:<EOL><INDENT>task.cancel()<EOL><DEDENT>for task in task_list:<EOL><INDENT>with suppress(CancelledError):<EOL><INDENT>await task<EOL><DEDENT><DEDENT>
|
Cancel all remaining tasks.
|
f5362:c1:m8
|
async def create_connection(self):
|
connector = self.proxy or self.loop<EOL>return await connector.create_connection(<EOL>self.session_factory, self.host, self.port, **self.kwargs)<EOL>
|
Initiate a connection.
|
f5363:c1:m1
|
def data_received(self, framed_message):
|
if self.verbosity >= <NUM_LIT:4>:<EOL><INDENT>self.logger.debug(f'<STR_LIT>')<EOL><DEDENT>self.recv_size += len(framed_message)<EOL>self.bump_cost(len(framed_message) * self.bw_cost_per_byte)<EOL>self.framer.received_bytes(framed_message)<EOL>
|
Called by asyncio when a message comes in.
|
f5363:c4:m6
|
def pause_writing(self):
|
if not self.is_closing():<EOL><INDENT>self._can_send.clear()<EOL>self.transport.pause_reading()<EOL><DEDENT>
|
Transport calls when the send buffer is full.
|
f5363:c4:m7
|
def resume_writing(self):
|
if not self._can_send.is_set():<EOL><INDENT>self._can_send.set()<EOL>self.transport.resume_reading()<EOL><DEDENT>
|
Transport calls when the send buffer has room.
|
f5363:c4:m8
|
def connection_made(self, transport):
|
self.transport = transport<EOL>if self._proxy is None:<EOL><INDENT>peername = transport.get_extra_info('<STR_LIT>')<EOL>self._remote_address = NetAddress(peername[<NUM_LIT:0>], peername[<NUM_LIT:1>])<EOL><DEDENT>self._task = spawn_sync(self._process_messages(), loop=self.loop)<EOL>
|
Called by asyncio when a connection is established.
Derived classes overriding this method must call this first.
|
f5363:c4:m9
|
def connection_lost(self, exc):
|
<EOL>if self.transport:<EOL><INDENT>self.transport = None<EOL>self.closed_event.set()<EOL>self._can_send.set()<EOL>self.loop.call_soon(self._task.cancel)<EOL><DEDENT>
|
Called by asyncio when the connection closes.
Tear down things done in connection_made.
|
f5363:c4:m10
|
def is_send_buffer_full(self):
|
return not self._can_send.is_set()<EOL>
|
Return True if the send socket buffer is full.
|
f5363:c4:m11
|
def recalc_concurrency(self):
|
<EOL>now = time.time()<EOL>self.cost = max(<NUM_LIT:0>, self.cost - (now - self._cost_time) * self.cost_decay_per_sec)<EOL>self._cost_time = now<EOL>self._cost_last = self.cost<EOL>value = self._incoming_concurrency.max_concurrent<EOL>cost_soft_range = self.cost_hard_limit - self.cost_soft_limit<EOL>if cost_soft_range <= <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>cost = self.cost + self.extra_cost()<EOL>self._cost_fraction = max(<NUM_LIT:0.0>, (cost - self.cost_soft_limit) / cost_soft_range)<EOL>target = max(<NUM_LIT:0>, ceil((<NUM_LIT:1.0> - self._cost_fraction) * self.initial_concurrent))<EOL>if abs(target - value) > <NUM_LIT:1>:<EOL><INDENT>self.logger.info(f'<STR_LIT>')<EOL><DEDENT>self._incoming_concurrency.set_target(target)<EOL>
|
Call to recalculate sleeps and concurrency for the session. Called automatically if
cost has drifted significantly. Otherwise can be called at regular intervals if
desired.
|
f5363:c4:m13
|
def unanswered_request_count(self):
|
<EOL>return max(<NUM_LIT:0>, len(self._group._pending) - <NUM_LIT:1>)<EOL>
|
The number of requests received but not yet answered.
|
f5363:c4:m14
|
def extra_cost(self):
|
return <NUM_LIT:0.0><EOL>
|
A dynamic value added to this session's cost when deciding how much to throttle
requests. Can be negative.
|
f5363:c4:m15
|
def default_framer(self):
|
raise NotImplementedError<EOL>
|
Return a default framer.
|
f5363:c4:m16
|
def proxy(self):
|
return self._proxy<EOL>
|
Returns the proxy used, or None.
|
f5363:c4:m17
|
def remote_address(self):
|
return self._remote_address<EOL>
|
Returns a NetAddress or None if not connected.
|
f5363:c4:m18
|
def is_closing(self):
|
return not self.transport or self.transport.is_closing()<EOL>
|
Return True if the connection is closing.
|
f5363:c4:m19
|
def abort(self):
|
if self.transport:<EOL><INDENT>self.transport.abort()<EOL><DEDENT>
|
Forcefully close the connection.
|
f5363:c4:m21
|
async def close(self, *, force_after=<NUM_LIT:30>):
|
if self.transport:<EOL><INDENT>self.transport.close()<EOL>try:<EOL><INDENT>async with timeout_after(force_after):<EOL><INDENT>await self.closed_event.wait()<EOL><DEDENT><DEDENT>except TaskTimeout:<EOL><INDENT>self.abort()<EOL>await self.closed_event.wait()<EOL><DEDENT><DEDENT>
|
Close the connection and return when closed.
|
f5363:c4:m22
|
async def _throttled_message(self, message):
|
try:<EOL><INDENT>timeout = self.processing_timeout<EOL>async with timeout_after(timeout):<EOL><INDENT>async with self._incoming_concurrency:<EOL><INDENT>if self._cost_fraction:<EOL><INDENT>await sleep(self._cost_fraction * self.cost_sleep)<EOL><DEDENT>await self.handle_message(message)<EOL><DEDENT><DEDENT><DEDENT>except ProtocolError as e:<EOL><INDENT>self.logger.error(f'<STR_LIT>')<EOL>self._bump_errors(e)<EOL><DEDENT>except TaskTimeout:<EOL><INDENT>self.logger.info(f'<STR_LIT>')<EOL>self._bump_errors()<EOL><DEDENT>except ExcessiveSessionCostError:<EOL><INDENT>await self.close()<EOL><DEDENT>except CancelledError:<EOL><INDENT>raise<EOL><DEDENT>except Exception:<EOL><INDENT>self.logger.exception(f'<STR_LIT>')<EOL>self._bump_errors()<EOL><DEDENT>
|
Process a single request, respecting the concurrency limit.
|
f5363:c5:m1
|
def default_framer(self):
|
return BitcoinFramer(bytes.fromhex('<STR_LIT>'), <NUM_LIT>)<EOL>
|
Return a bitcoin framer.
|
f5363:c5:m2
|
async def handle_message(self, message):
|
message is a (command, payload) pair.
|
f5363:c5:m3
|
|
async def send_message(self, message):
|
await self._send_message(message)<EOL>
|
Send a message (command, payload) over the network.
|
f5363:c5:m4
|
async def _throttled_request(self, request):
|
disconnect = False<EOL>try:<EOL><INDENT>timeout = self.processing_timeout<EOL>async with timeout_after(timeout):<EOL><INDENT>async with self._incoming_concurrency:<EOL><INDENT>if self.is_closing():<EOL><INDENT>return<EOL><DEDENT>if self._cost_fraction:<EOL><INDENT>await sleep(self._cost_fraction * self.cost_sleep)<EOL><DEDENT>result = await self.handle_request(request)<EOL><DEDENT><DEDENT><DEDENT>except (ProtocolError, RPCError) as e:<EOL><INDENT>result = e<EOL><DEDENT>except TaskTimeout:<EOL><INDENT>self.logger.info(f'<STR_LIT>')<EOL>result = RPCError(JSONRPC.SERVER_BUSY, '<STR_LIT>')<EOL><DEDENT>except ReplyAndDisconnect as e:<EOL><INDENT>result = e.result<EOL>disconnect = True<EOL><DEDENT>except ExcessiveSessionCostError:<EOL><INDENT>result = RPCError(JSONRPC.EXCESSIVE_RESOURCE_USAGE, '<STR_LIT>')<EOL>disconnect = True<EOL><DEDENT>except CancelledError:<EOL><INDENT>raise<EOL><DEDENT>except Exception:<EOL><INDENT>self.logger.exception(f'<STR_LIT>')<EOL>result = RPCError(JSONRPC.INTERNAL_ERROR, '<STR_LIT>')<EOL><DEDENT>if isinstance(request, Request):<EOL><INDENT>message = request.send_result(result)<EOL>if message:<EOL><INDENT>await self._send_message(message)<EOL><DEDENT><DEDENT>if isinstance(result, Exception):<EOL><INDENT>self._bump_errors(result)<EOL><DEDENT>if disconnect:<EOL><INDENT>await self.close()<EOL><DEDENT>
|
Process a single request, respecting the concurrency limit.
|
f5363:c8:m3
|
def default_connection(self):
|
return JSONRPCConnection(JSONRPCv2)<EOL>
|
Return a default connection if the user provides none.
|
f5363:c8:m6
|
def default_framer(self):
|
return NewlineFramer()<EOL>
|
Return a default framer.
|
f5363:c8:m7
|
async def send_request(self, method, args=()):
|
message, event = self.connection.send_request(Request(method, args))<EOL>return await self._send_concurrent(message, event, <NUM_LIT:1>)<EOL>
|
Send an RPC request over the network.
|
f5363:c8:m9
|
async def send_notification(self, method, args=()):
|
message = self.connection.send_notification(Notification(method, args))<EOL>await self._send_message(message)<EOL>
|
Send an RPC notification over the network.
|
f5363:c8:m10
|
def send_batch(self, raise_errors=False):
|
return BatchRequest(self, raise_errors)<EOL>
|
Return a BatchRequest. Intended to be used like so:
async with session.send_batch() as batch:
batch.add_request("method1")
batch.add_request("sum", (x, y))
batch.add_notification("updated")
for result in batch.results:
...
Note that in some circumstances exceptions can be raised; see
BatchRequest doc string.
|
f5363:c8:m11
|
async def close(self):
|
if self.server:<EOL><INDENT>self.server.close()<EOL>await self.server.wait_closed()<EOL>self.server = None<EOL><DEDENT>
|
Close the listening socket. This does not close any ServerSession
objects created to handle incoming connections.
|
f5363:c9:m2
|
def sha256(x):
|
return _sha256(x).digest()<EOL>
|
Simple wrapper of hashlib sha256.
|
f5364:m0
|
def double_sha256(x):
|
return sha256(sha256(x))<EOL>
|
SHA-256 of SHA-256, as used extensively in bitcoin.
|
f5364:m1
|
def frame(self, message):
|
raise NotImplementedError<EOL>
|
Return the framed message.
|
f5364:c0:m0
|
def received_bytes(self, data):
|
raise NotImplementedError<EOL>
|
Pass incoming network bytes.
|
f5364:c0:m1
|
async def receive_message(self):
|
raise NotImplementedError<EOL>
|
Wait for a complete unframed message to arrive, and return it.
|
f5364:c0:m2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.