source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_http.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import queue
import threading
import pretend
import warehouse.http
_REQUEST = pretend.stub(
log=pretend.stub(debug=pretend.call_recorder(lambda *args: None))
)
class TestSession:
def test_create(self):
config = {"verify": "foo"}
factory = warehouse.http.ThreadLocalSessionFactory(config)
session_a, session_b = factory(_REQUEST), factory(_REQUEST)
assert session_a is session_b
assert session_a.verify == session_b.verify == config["verify"]
def test_threads(self):
def _test_factory(fifo, start):
start.wait()
factory = warehouse.http.ThreadLocalSessionFactory()
# the actual session instance is stuck into the queue here as to
# maintain a reference so it's not gc'd (which can result in id
# reuse)
fifo.put((threading.get_ident(), factory(_REQUEST)))
start = threading.Event()
fifo = queue.Queue()
threads = [
threading.Thread(target=_test_factory, args=(fifo, start))
for _ in range(10)
]
for thread in threads:
thread.start()
start.set()
for thread in threads:
thread.join()
# data pushed into the queue is (threadid, session).
# this basically proves that the session object id is different per
# thread
results = [fifo.get() for _ in range(len(threads))]
idents, objects = zip(*results)
assert len(set(idents)) == len(threads)
assert len(set(id(obj) for obj in objects)) == len(threads)
def test_includeme():
config = pretend.stub(
registry=pretend.stub(settings={}),
add_request_method=pretend.call_recorder(lambda *args, **kwargs: None),
)
warehouse.http.includeme(config)
assert len(config.add_request_method.calls) == 1
call = config.add_request_method.calls[0]
assert isinstance(call.args[0], warehouse.http.ThreadLocalSessionFactory)
assert call.kwargs == {"name": "http", "reify": True}
|
vpp_papi.py
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import os
import logging
import collections
import struct
import json
import threading
import fnmatch
import atexit
from cffi import FFI
import cffi
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
ffi = FFI()
ffi.cdef("""
typedef void (*vac_callback_t)(unsigned char * data, int len);
typedef void (*vac_error_callback_t)(void *, unsigned char *, int);
int vac_connect(char * name, char * chroot_prefix, vac_callback_t cb,
int rx_qlen);
int vac_disconnect(void);
int vac_read(char **data, int *l, unsigned short timeout);
int vac_write(char *data, int len);
void vac_free(void * msg);
int vac_get_msg_index(unsigned char * name);
int vac_msg_table_size(void);
int vac_msg_table_max_index(void);
void vac_rx_suspend (void);
void vac_rx_resume (void);
void vac_set_error_handler(vac_error_callback_t);
""")
# Barfs on failure, no need to check success.
vpp_api = ffi.dlopen('libvppapiclient.so')
def vpp_atexit(self):
"""Clean up VPP connection on shutdown."""
if self.connected:
self.logger.debug('Cleaning up VPP on exit')
self.disconnect()
vpp_object = None
def vpp_iterator(d):
if sys.version[0] == '2':
return d.iteritems()
else:
return d.items()
@ffi.callback("void(unsigned char *, int)")
def vac_callback_sync(data, len):
vpp_object.msg_handler_sync(ffi.buffer(data, len))
@ffi.callback("void(unsigned char *, int)")
def vac_callback_async(data, len):
vpp_object.msg_handler_async(ffi.buffer(data, len))
@ffi.callback("void(void *, unsigned char *, int)")
def vac_error_handler(arg, msg, msg_len):
vpp_object.logger.warning("VPP API client:: %s", ffi.string(msg, msg_len))
class Empty(object):
pass
class FuncWrapper(object):
def __init__(self, func):
self._func = func
self.__name__ = func.__name__
def __call__(self, **kwargs):
return self._func(**kwargs)
class VPP():
"""VPP interface.
This class provides the APIs to VPP. The APIs are loaded
from provided .api.json files and makes functions accordingly.
These functions are documented in the VPP .api files, as they
are dynamically created.
Additionally, VPP can send callback messages; this class
provides a means to register a callback function to receive
these messages in a background thread.
"""
def __init__(self, apifiles=None, testmode=False, async_thread=True,
logger=logging.getLogger('vpp_papi'), loglevel='debug', read_timeout=0):
"""Create a VPP API object.
apifiles is a list of files containing API
descriptions that will be loaded - methods will be
dynamically created reflecting these APIs. If not
provided this will load the API files from VPP's
default install location.
"""
global vpp_object
vpp_object = self
self.logger = logger
logging.basicConfig(level=getattr(logging, loglevel.upper()))
self.messages = {}
self.id_names = []
self.id_msgdef = []
self.connected = False
self.header = struct.Struct('>HI')
self.apifiles = []
self.event_callback = None
self.message_queue = queue.Queue()
self.read_timeout = read_timeout
self.vpp_api = vpp_api
if async_thread:
self.event_thread = threading.Thread(
target=self.thread_msg_handler)
self.event_thread.daemon = True
self.event_thread.start()
if not apifiles:
# Pick up API definitions from default directory
try:
apifiles = self.find_api_files()
except RuntimeError:
# In test mode we don't care that we can't find the API files
if testmode:
apifiles = []
else:
raise
for file in apifiles:
with open(file) as apidef_file:
api = json.load(apidef_file)
for t in api['types']:
self.add_type(t[0], t[1:])
for m in api['messages']:
self.add_message(m[0], m[1:])
self.apifiles = apifiles
# Basic sanity check
if len(self.messages) == 0 and not testmode:
raise ValueError(1, 'Missing JSON message definitions')
# Make sure we allow VPP to clean up the message rings.
atexit.register(vpp_atexit, self)
# Register error handler
vpp_api.vac_set_error_handler(vac_error_handler)
# Support legacy CFFI
# from_buffer supported from 1.8.0
(major, minor, patch) = [int(s) for s in cffi.__version__.split('.', 3)]
if major >= 1 and minor >= 8:
self._write = self._write_new_cffi
else:
self._write = self._write_legacy_cffi
class ContextId(object):
"""Thread-safe provider of unique context IDs."""
def __init__(self):
self.context = 0
self.lock = threading.Lock()
def __call__(self):
"""Get a new unique (or, at least, not recently used) context."""
with self.lock:
self.context += 1
return self.context
get_context = ContextId()
@classmethod
def find_api_dir(cls):
"""Attempt to find the best directory in which API definition
files may reside. If the value VPP_API_DIR exists in the environment
then it is first on the search list. If we're inside a recognized
location in a VPP source tree (src/scripts and src/vpp-api/python)
then entries from there to the likely locations in build-root are
added. Finally the location used by system packages is added.
:returns: A single directory name, or None if no such directory
could be found.
"""
dirs = []
if 'VPP_API_DIR' in os.environ:
dirs.append(os.environ['VPP_API_DIR'])
# perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir;
# in which case, plot a course to likely places in the src tree
import __main__ as main
if hasattr(main, '__file__'):
# get the path of the calling script
localdir = os.path.dirname(os.path.realpath(main.__file__))
else:
# use cwd if there is no calling script
localdir = os.cwd()
localdir_s = localdir.split(os.path.sep)
def dmatch(dir):
"""Match dir against right-hand components of the script dir"""
d = dir.split('/') # param 'dir' assumes a / separator
l = len(d)
return len(localdir_s) > l and localdir_s[-l:] == d
def sdir(srcdir, variant):
"""Build a path from srcdir to the staged API files of
'variant' (typically '' or '_debug')"""
# Since 'core' and 'plugin' files are staged
# in separate directories, we target the parent dir.
return os.path.sep.join((
srcdir,
'build-root',
'install-vpp%s-native' % variant,
'vpp',
'share',
'vpp',
'api',
))
srcdir = None
if dmatch('src/scripts'):
srcdir = os.path.sep.join(localdir_s[:-2])
elif dmatch('src/vpp-api/python'):
srcdir = os.path.sep.join(localdir_s[:-3])
elif dmatch('test'):
# we're apparently running tests
srcdir = os.path.sep.join(localdir_s[:-1])
if srcdir:
# we're in the source tree, try both the debug and release
# variants.
x = 'vpp/share/vpp/api'
dirs.append(sdir(srcdir, '_debug'))
dirs.append(sdir(srcdir, ''))
# Test for staged copies of the scripts
# For these, since we explicitly know if we're running a debug versus
# release variant, target only the relevant directory
if dmatch('build-root/install-vpp_debug-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, '_debug'))
if dmatch('build-root/install-vpp-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, ''))
# finally, try the location system packages typically install into
dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api')))
# check the directories for existance; first one wins
for dir in dirs:
if os.path.isdir(dir):
return dir
return None
@classmethod
def find_api_files(cls, api_dir=None, patterns='*'):
"""Find API definition files from the given directory tree with the
given pattern. If no directory is given then find_api_dir() is used
to locate one. If no pattern is given then all definition files found
in the directory tree are used.
:param api_dir: A directory tree in which to locate API definition
files; subdirectories are descended into.
If this is None then find_api_dir() is called to discover it.
:param patterns: A list of patterns to use in each visited directory
when looking for files.
This can be a list/tuple object or a comma-separated string of
patterns. Each value in the list will have leading/trialing
whitespace stripped.
The pattern specifies the first part of the filename, '.api.json'
is appended.
The results are de-duplicated, thus overlapping patterns are fine.
If this is None it defaults to '*' meaning "all API files".
:returns: A list of file paths for the API files found.
"""
if api_dir is None:
api_dir = cls.find_api_dir()
if api_dir is None:
raise RuntimeError("api_dir cannot be located")
if isinstance(patterns, list) or isinstance(patterns, tuple):
patterns = [p.strip() + '.api.json' for p in patterns]
else:
patterns = [p.strip() + '.api.json' for p in patterns.split(",")]
api_files = []
for root, dirnames, files in os.walk(api_dir):
# iterate all given patterns and de-dup the result
files = set(sum([fnmatch.filter(files, p) for p in patterns], []))
for filename in files:
api_files.append(os.path.join(root, filename))
return api_files
def status(self):
"""Debug function: report current VPP API status to stdout."""
print('Connected') if self.connected else print('Not Connected')
print('Read API definitions from', ', '.join(self.apifiles))
def __struct(self, t, n=None, e=-1, vl=None):
"""Create a packing structure for a message."""
base_types = {'u8': 'B',
'u16': 'H',
'u32': 'I',
'i32': 'i',
'u64': 'Q',
'f64': 'd', }
pack = None
if t in base_types:
pack = base_types[t]
if not vl:
if e > 0 and t == 'u8':
# Fixed byte array
s = struct.Struct('>' + str(e) + 's')
return s.size, s
if e > 0:
# Fixed array of base type
s = struct.Struct('>' + base_types[t])
return s.size, [e, s]
elif e == 0:
# Old style variable array
s = struct.Struct('>' + base_types[t])
return s.size, [-1, s]
else:
# Variable length array
if t == 'u8':
s = struct.Struct('>s')
return s.size, [vl, s]
else:
s = struct.Struct('>' + base_types[t])
return s.size, [vl, s]
s = struct.Struct('>' + base_types[t])
return s.size, s
if t in self.messages:
size = self.messages[t]['sizes'][0]
# Return a list in case of array
if e > 0 and not vl:
return size, [e, lambda self, encode, buf, offset, args: (
self.__struct_type(encode, self.messages[t], buf, offset,
args))]
if vl:
return size, [vl, lambda self, encode, buf, offset, args: (
self.__struct_type(encode, self.messages[t], buf, offset,
args))]
elif e == 0:
# Old style VLA
raise NotImplementedError(1,
'No support for compound types ' + t)
return size, lambda self, encode, buf, offset, args: (
self.__struct_type(encode, self.messages[t], buf, offset, args)
)
raise ValueError(1, 'Invalid message type: ' + t)
def __struct_type(self, encode, msgdef, buf, offset, kwargs):
"""Get a message packer or unpacker."""
if encode:
return self.__struct_type_encode(msgdef, buf, offset, kwargs)
else:
return self.__struct_type_decode(msgdef, buf, offset)
def __struct_type_encode(self, msgdef, buf, offset, kwargs):
off = offset
size = 0
for k in kwargs:
if k not in msgdef['args']:
raise ValueError(1,'Non existing argument [' + k + ']' + \
' used in call to: ' + \
self.id_names[kwargs['_vl_msg_id']] + '()' )
for k, v in vpp_iterator(msgdef['args']):
off += size
if k in kwargs:
if type(v) is list:
if callable(v[1]):
e = kwargs[v[0]] if v[0] in kwargs else v[0]
if e != len(kwargs[k]):
raise (ValueError(1, 'Input list length mismatch: %s (%s != %s)' % (k, e, len(kwargs[k]))))
size = 0
for i in range(e):
size += v[1](self, True, buf, off + size,
kwargs[k][i])
else:
if v[0] in kwargs:
l = kwargs[v[0]]
if l != len(kwargs[k]):
raise ValueError(1, 'Input list length mismatch: %s (%s != %s)' % (k, l, len(kwargs[k])))
else:
l = len(kwargs[k])
if v[1].size == 1:
buf[off:off + l] = bytearray(kwargs[k])
size = l
else:
size = 0
for i in kwargs[k]:
v[1].pack_into(buf, off + size, i)
size += v[1].size
else:
if callable(v):
size = v(self, True, buf, off, kwargs[k])
else:
if type(kwargs[k]) is str and v.size < len(kwargs[k]):
raise ValueError(1, 'Input list length mismatch: %s (%s < %s)' % (k, v.size, len(kwargs[k])))
v.pack_into(buf, off, kwargs[k])
size = v.size
else:
size = v.size if not type(v) is list else 0
return off + size - offset
def __getitem__(self, name):
if name in self.messages:
return self.messages[name]
return None
def get_size(self, sizes, kwargs):
total_size = sizes[0]
for e in sizes[1]:
if e in kwargs and type(kwargs[e]) is list:
total_size += len(kwargs[e]) * sizes[1][e]
return total_size
def encode(self, msgdef, kwargs):
# Make suitably large buffer
size = self.get_size(msgdef['sizes'], kwargs)
buf = bytearray(size)
offset = 0
size = self.__struct_type(True, msgdef, buf, offset, kwargs)
return buf[:offset + size]
def decode(self, msgdef, buf):
return self.__struct_type(False, msgdef, buf, 0, None)[1]
def __struct_type_decode(self, msgdef, buf, offset):
res = []
off = offset
size = 0
for k, v in vpp_iterator(msgdef['args']):
off += size
if type(v) is list:
lst = []
if callable(v[1]): # compound type
size = 0
if v[0] in msgdef['args']: # vla
e = res[v[2]]
else: # fixed array
e = v[0]
res.append(lst)
for i in range(e):
(s, l) = v[1](self, False, buf, off + size, None)
lst.append(l)
size += s
continue
if v[1].size == 1:
if type(v[0]) is int:
size = len(buf) - off
else:
size = res[v[2]]
res.append(buf[off:off + size])
else:
e = v[0] if type(v[0]) is int else res[v[2]]
if e == -1:
e = (len(buf) - off) / v[1].size
lst = []
res.append(lst)
size = 0
for i in range(e):
lst.append(v[1].unpack_from(buf, off + size)[0])
size += v[1].size
else:
if callable(v):
size = 0
(s, l) = v(self, False, buf, off, None)
res.append(l)
size += s
else:
res.append(v.unpack_from(buf, off)[0])
size = v.size
return off + size - offset, msgdef['return_tuple']._make(res)
def ret_tup(self, name):
if name in self.messages and 'return_tuple' in self.messages[name]:
return self.messages[name]['return_tuple']
return None
def add_message(self, name, msgdef, typeonly=False):
if name in self.messages:
raise ValueError('Duplicate message name: ' + name)
args = collections.OrderedDict()
argtypes = collections.OrderedDict()
fields = []
msg = {}
total_size = 0
sizes = {}
for i, f in enumerate(msgdef):
if type(f) is dict and 'crc' in f:
msg['crc'] = f['crc']
continue
field_type = f[0]
field_name = f[1]
if len(f) == 3 and f[2] == 0 and i != len(msgdef) - 2:
raise ValueError('Variable Length Array must be last: ' + name)
size, s = self.__struct(*f)
args[field_name] = s
if type(s) == list and type(s[0]) == int and type(s[1]) == struct.Struct:
if s[0] < 0:
sizes[field_name] = size
else:
sizes[field_name] = size
total_size += s[0] * size
else:
sizes[field_name] = size
total_size += size
argtypes[field_name] = field_type
if len(f) == 4: # Find offset to # elements field
idx = list(args.keys()).index(f[3]) - i
args[field_name].append(idx)
fields.append(field_name)
msg['return_tuple'] = collections.namedtuple(name, fields,
rename=True)
self.messages[name] = msg
self.messages[name]['args'] = args
self.messages[name]['argtypes'] = argtypes
self.messages[name]['typeonly'] = typeonly
self.messages[name]['sizes'] = [total_size, sizes]
return self.messages[name]
def add_type(self, name, typedef):
return self.add_message('vl_api_' + name + '_t', typedef,
typeonly=True)
def make_function(self, name, i, msgdef, multipart, async):
if (async):
f = lambda **kwargs: (self._call_vpp_async(i, msgdef, **kwargs))
else:
f = lambda **kwargs: (self._call_vpp(i, msgdef, multipart,
**kwargs))
args = self.messages[name]['args']
argtypes = self.messages[name]['argtypes']
f.__name__ = str(name)
f.__doc__ = ", ".join(["%s %s" %
(argtypes[k], k) for k in args.keys()])
return f
@property
def api(self):
if not hasattr(self, "_api"):
raise Exception("Not connected, api definitions not available")
return self._api
def _register_functions(self, async=False):
self.id_names = [None] * (self.vpp_dictionary_maxid + 1)
self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1)
self._api = Empty()
for name, msgdef in vpp_iterator(self.messages):
if self.messages[name]['typeonly']:
continue
crc = self.messages[name]['crc']
n = name + '_' + crc[2:]
i = vpp_api.vac_get_msg_index(n.encode())
if i > 0:
self.id_msgdef[i] = msgdef
self.id_names[i] = name
multipart = True if name.find('_dump') > 0 else False
f = self.make_function(name, i, msgdef, multipart, async)
setattr(self._api, name, FuncWrapper(f))
# old API stuff starts here - will be removed in 17.07
if hasattr(self, name):
raise NameError(
3, "Conflicting name in JSON definition: `%s'" % name)
setattr(self, name, f)
# old API stuff ends here
else:
self.logger.debug(
'No such message type or failed CRC checksum: %s', n)
def _write_new_cffi(self, buf):
"""Send a binary-packed message to VPP."""
if not self.connected:
raise IOError(1, 'Not connected')
return vpp_api.vac_write(ffi.from_buffer(buf), len(buf))
def _write_legacy_cffi(self, buf):
"""Send a binary-packed message to VPP."""
if not self.connected:
raise IOError(1, 'Not connected')
return vpp_api.vac_write(str(buf), len(buf))
def _read(self):
if not self.connected:
raise IOError(1, 'Not connected')
mem = ffi.new("char **")
size = ffi.new("int *")
rv = vpp_api.vac_read(mem, size, self.read_timeout)
if rv:
raise IOError(rv, 'vac_read failed')
msg = bytes(ffi.buffer(mem[0], size[0]))
vpp_api.vac_free(mem[0])
return msg
def connect_internal(self, name, msg_handler, chroot_prefix, rx_qlen,
async):
pfx = chroot_prefix.encode() if chroot_prefix else ffi.NULL
rv = vpp_api.vac_connect(name.encode(), pfx, msg_handler, rx_qlen)
if rv != 0:
raise IOError(2, 'Connect failed')
self.connected = True
self.vpp_dictionary_maxid = vpp_api.vac_msg_table_max_index()
self._register_functions(async=async)
# Initialise control ping
crc = self.messages['control_ping']['crc']
self.control_ping_index = vpp_api.vac_get_msg_index(
('control_ping' + '_' + crc[2:]).encode())
self.control_ping_msgdef = self.messages['control_ping']
return rv
def connect(self, name, chroot_prefix=None, async=False, rx_qlen=32):
"""Attach to VPP.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
async - if true, messages are sent without waiting for a reply
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
msg_handler = vac_callback_sync if not async else vac_callback_async
return self.connect_internal(name, msg_handler, chroot_prefix, rx_qlen,
async)
def connect_sync(self, name, chroot_prefix=None, rx_qlen=32):
"""Attach to VPP in synchronous mode. Application must poll for events.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
return self.connect_internal(name, ffi.NULL, chroot_prefix, rx_qlen,
async=False)
def disconnect(self):
"""Detach from VPP."""
rv = vpp_api.vac_disconnect()
self.connected = False
return rv
def msg_handler_sync(self, msg):
"""Process an incoming message from VPP in sync mode.
The message may be a reply or it may be an async notification.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
# If we have a context, then use the context to find any
# request waiting for a reply
context = 0
if hasattr(r, 'context') and r.context > 0:
context = r.context
msgname = type(r).__name__
if context == 0:
# No context -> async notification that we feed to the callback
self.message_queue.put_nowait(r)
else:
raise IOError(2, 'RPC reply message received in event handler')
def decode_incoming_msg(self, msg):
if not msg:
self.logger.warning('vpp_api.read failed')
return
i, ci = self.header.unpack_from(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgdef = self.id_msgdef[i]
if not msgdef:
raise IOError(2, 'Reply message undefined')
r = self.decode(msgdef, msg)
return r
def msg_handler_async(self, msg):
"""Process a message from VPP in async mode.
In async mode, all messages are returned to the callback.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def _control_ping(self, context):
"""Send a ping command."""
self._call_vpp_async(self.control_ping_index,
self.control_ping_msgdef,
context=context)
def _call_vpp(self, i, msgdef, multipart, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
multipart - True if the message returns multiple
messages in return.
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
The return value is the message or message array containing
the response. It will raise an IOError exception if there was
no response within the timeout window.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
kwargs['_vl_msg_id'] = i
b = self.encode(msgdef, kwargs)
vpp_api.vac_rx_suspend()
self._write(b)
if multipart:
# Send a ping after the request - we use its response
# to detect that we have seen all results.
self._control_ping(context)
# Block until we get a reply.
rl = []
while (True):
msg = self._read()
if not msg:
raise IOError(2, 'VPP API client: read failed')
r = self.decode_incoming_msg(msg)
msgname = type(r).__name__
if context not in r or r.context == 0 or context != r.context:
self.message_queue.put_nowait(r)
continue
if not multipart:
rl = r
break
if msgname == 'control_ping_reply':
break
rl.append(r)
vpp_api.vac_rx_resume()
return rl
def _call_vpp_async(self, i, msgdef, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
kwargs['_vl_msg_id'] = i
b = self.encode(msgdef, kwargs)
self._write(b)
def register_event_callback(self, callback):
"""Register a callback for async messages.
This will be called for async notifications in sync mode,
and all messages in async mode. In sync mode, replies to
requests will not come here.
callback is a fn(msg_type_name, msg_type) that will be
called when a message comes in. While this function is
executing, note that (a) you are in a background thread and
may wish to use threading.Lock to protect your datastructures,
and (b) message processing from VPP will stop (so if you take
a long while about it you may provoke reply timeouts or cause
VPP to fill the RX buffer). Passing None will disable the
callback.
"""
self.event_callback = callback
def thread_msg_handler(self):
"""Python thread calling the user registerd message handler.
This is to emulate the old style event callback scheme. Modern
clients should provide their own thread to poll the event
queue.
"""
while True:
r = self.message_queue.get()
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
GetCourse.py
|
import requests as reqs
import json
import datetime as dt
import telebot
import threading
import os
import configparser as cp
import time
from telebot import types
from multiprocessing import Value
from timeloop import Timeloop
from datetime import timedelta
from requests.models import Response
# Инициализация #
config = cp.ConfigParser(interpolation=None)
config.read('settings.conf')
# Сохранение в конфиг
def saveToConfig(users, file = 'settings.conf', config = config):
commitUsersToConfig(users)
with open(file, 'w') as configfile:
config.write(configfile)
# Запись в конфиг значения
def commitToConfig(section, key, value, config = config):
sect = str(section)
try:
config[sect][key] = str(value)
except KeyError:
config.add_section(sect)
config[sect][key] = str(value)
# Запись в конфиг структуры пользователей
def commitUsersToConfig(users, config = config):
listStr = ''
for id in usersList:
listStr += ',' + str(id)
if listStr != '':
commitToConfig('users', 'list', listStr[1:], config)
for user in users:
section = user['id']
for key in user:
commitToConfig(section, key, user[key], config)
# Загрузка из конфига
def getFromConfig(section, key, defaultValue, config = config):
try:
return config[section][key]
except KeyError:
return defaultValue
# Загрузка даты/времени UTC из конфига
def getDateTimeFromConfig(section, key, defaultValue, config = config):
return dt.datetime.fromtimestamp(float(getFromConfig(section, key, defaultValue, config)))
# Предзагрузка настроек пользователя
def preloadUserSettings(usersList):
users = []
#
for user in usersList:
if user.isnumeric():
obj = {}
obj['id'] = user
obj['usdSub'] = getFromConfig(user, 'usdSub', 0)
obj['eurSub'] = getFromConfig(user, 'eurSub', 0)
obj['usdLimit'] = getFromConfig(user, 'usdLimit', 0.01)
obj['eurLimit'] = getFromConfig(user, 'eurLimit', 0.01)
obj['chart'] = getFromConfig(user, 'chart', 0)
users.append(obj)
return users
# Чтение конфига
getCourseUrl = getFromConfig('main', 'url', '')
token = getFromConfig('telegram', 'token', 'Empty-Token')
usersList = getFromConfig('users', 'list', '').split(',')
lastDateTime = getDateTimeFromConfig('tmp', 'lastDateTime', 0)
lastUsdSell = float(getFromConfig('tmp', 'lastUsdSell', 0.01))
lastUsdBuy = float(getFromConfig('tmp', 'lastUsdBuy', 0.01))
lastEurSell = float(getFromConfig('tmp', 'lastEurSell', 0.01))
lastEurBuy = float(getFromConfig('tmp', 'lastEurBuy', 0.01))
bot = telebot.TeleBot(token)
usdSell = Value('d', lastUsdSell)
eurSell = Value('d', lastEurSell)
usdCsvDir = os.getcwd()+'\\USD\\'
if not os.path.exists(usdCsvDir):
os.mkdir(usdCsvDir)
eurCsvDir = os.getcwd()+'\\EUR\\'
if not os.path.exists(eurCsvDir):
os.mkdir(eurCsvDir)
rubleSign = '\u20bd'
eurSign = '\u20ac'
usdSign = '\u0024'
tempDateUsd = lastDateTime
tempDateEur = lastDateTime
usdRate = []
eurRate = []
users = preloadUserSettings(usersList)
print(usersList)
print(users)
setUsdLimitFlag = False
setEurLimitFlag = False
# Создание кнопок
getCourseButtonSet = types.InlineKeyboardMarkup()
currentCourseBtn = types.InlineKeyboardButton(text='Показать курс', callback_data='current')
getCourseButtonSet.add(currentCourseBtn)
subscribeButtonsSet = types.InlineKeyboardMarkup()
subscribeAllBtn = types.InlineKeyboardButton(text='всё', callback_data='sub_all')
subscribeUsdBtn = types.InlineKeyboardButton(text='доллары', callback_data='sub_usd')
subscribeEurBtn = types.InlineKeyboardButton(text='евро', callback_data='sub_eur')
subscribeButtonsSet.add(subscribeUsdBtn)
subscribeButtonsSet.add(subscribeEurBtn)
subscribeButtonsSet.add(subscribeAllBtn)
unsubscribeButtonsSet = types.InlineKeyboardMarkup()
unsubscribeAllBtn = types.InlineKeyboardButton(text='всего', callback_data='unsub_all')
unsubscribeUsdBtn = types.InlineKeyboardButton(text='долларов', callback_data='unsub_usd')
unsubscribeEurBtn = types.InlineKeyboardButton(text='евро', callback_data='unsub_eur')
unsubscribeButtonsSet.add(unsubscribeUsdBtn)
unsubscribeButtonsSet.add(unsubscribeEurBtn)
unsubscribeButtonsSet.add(unsubscribeAllBtn)
menuButtonsSet = types.InlineKeyboardMarkup()
menuSubBtn = types.InlineKeyboardButton(text='Подписка', callback_data='subscribe')
menuUnsubBtn = types.InlineKeyboardButton(text='Отписка', callback_data='unsubscribe')
menuInfoBtn = types.InlineKeyboardButton(text='Информирование', callback_data='inform')
menuExitBtn = types.InlineKeyboardButton(text='Выход из настроек', callback_data='exit_settings')
menuButtonsSet.add(menuSubBtn)
menuButtonsSet.add(menuUnsubBtn)
menuButtonsSet.add(menuInfoBtn)
menuButtonsSet.add(menuExitBtn)
infoButtonsSet = types.InlineKeyboardMarkup()
infoLimitBtn = types.InlineKeyboardButton(text='Установка предела', callback_data='limit')
infoGraphBtn = types.InlineKeyboardButton(text='График в конце дня', callback_data='graph')
infoButtonsSet.add(infoLimitBtn)
infoButtonsSet.add(infoGraphBtn)
limitButtonsSet = types.InlineKeyboardMarkup()
limitSetBtn = types.InlineKeyboardButton(text='Установить предел', callback_data='limit_set')
limitCancelBtn = types.InlineKeyboardButton(text='Отменить предел', callback_data='limit_cancel')
limitButtonsSet.add(limitSetBtn)
limitButtonsSet.add(limitCancelBtn)
graphButtonsSet = types.InlineKeyboardMarkup()
graphSetBtn = types.InlineKeyboardButton(text='Формировать график', callback_data='graph_set')
graphCancelBtn = types.InlineKeyboardButton(text='Отменить формирование', callback_data='graph_cancel')
graphButtonsSet.add(graphSetBtn)
graphButtonsSet.add(graphCancelBtn)
# Конец инициализации #
# Поиск в строке данными из массива
def findArrayInMessage(substrings, message):
for item in substrings:
if item in message:
return True
return False
# Сохранение курса в файл
def saveCourseToCsv(currency, date, data):
raw = ''
csvPath=''
for item in data:
raw += item + ','
raw += '\n'
if currency == 'USD':
csvPath = usdCsvDir + date + '.csv'
if currency == 'EUR':
csvPath = eurCsvDir + date + '.csv'
if not os.path.exists(csvPath):
raw = 'Текущее время,Актуальность,Продажа,Покупка\n' + raw
csv = open(csvPath, 'a')
csv.write(raw)
csv.close()
# Конвертер валют
def convert(raw, course, currencyName):
array = raw.split()
if len(array) != 2:
return 'Неверный формат данных.\n Попробуйте отправить, например, '"'100 rub'"'\n'
return format(float(array[0]) * course, '.2f') + ' ' + currencyName + '\n'
# Сохранение настроек пользователя
def saveUserSettings(id, key, value, users = users):
for user in users:
if user['id'] == str(id):
user[key] = str(value)
print(users)
return
users.append({'id': str(id), key: value})
print(users)
# Загрузка настройки пользователя
def loadUserSettings(id, key, default=0, users = users):
for user in users:
if user['id'] == str(id):
try:
return user[key]
except KeyError:
return default
return default
# Создание нового пользователя
def createNewUser(id, users = users, list = usersList):
obj = {}
obj['id'] = id
obj['usdSub'] = 0
obj['eurSub'] = 0
obj['usdLimit'] = 0.01
obj['eurLimit'] = 0.01
obj['chart'] = 0
users.append(obj)
list.append(id)
# Конвертирование текста в число с плавающей запятой
def toFloat(str):
num = str.replace(',', '.')
try:
return float(num)
except ValueError:
return 0.0
# Форматирование результата курса в текст
def courseToText(course, rawData):
sell = lastUsdSell
buy = lastUsdBuy
if len(rawData) == 4:
sell = rawData[2]
buy = rawData[3]
elif course == 'EUR':
sell = lastEurSell
buy = lastEurBuy
result = course + ': \n'
result += 'Продажа: ' + str(sell) + ' ' + rubleSign + '\n'
result += 'Покупка: ' + str(buy) + ' ' + rubleSign
return result
# Найти пользователя по id
def findUserById(id, users):
for item in users:
if item['id'] == str(id):
return item
# Проверка нижней границы курса
def checkLimits(id, rawData):
result = {}
#
usdSub = loadUserSettings(id, 'usdSub', False)
eurSub = loadUserSettings(id, 'eurSub', False)
if usdSub == '1' and rawData.get('USD', False) and len(rawData['USD']) == 4:
sell = float(rawData['USD'][2])
sellLimit = float(loadUserSettings(id, 'usdLimit'))
if sell <= sellLimit:
result['USD'] = str(rawData['USD'][2])
findUserById(id, users)['usdLimit'] = str(float(sell - 0.01, '.2f'))
if eurSub == '1' and rawData.get('EUR', False) and len(rawData['EUR']) == 4:
sell = float(rawData['EUR'][2])
sellLimit = float(loadUserSettings(id, 'eurLimit'))
if sell <= sellLimit:
result['EUR'] = str(rawData['EUR'][2])
findUserById(str(id), users)['eurLimit'] = str(float(sell - 0.01, '.2f'))
return result
# Получение курса валюты
def getCourse():
global tempDateUsd
global tempDateEur
global usdRate
global eurRate
global lastDateTime
global lastUsdSell
global lastUsdBuy
global lastEurSell
global lastEurBuy
# Запрос курса валюты
while True:
try:
resp = reqs.get(getCourseUrl)
break
except ConnectionError:
time.sleep(10)
data = resp.json()
# Разбор полученного json
# и запись результата в usdRate и eurRate
usd = 'USD'
eur = 'EUR'
breakCount = 2
currencies = data['GroupedRates']
for i in range(len(currencies)):
moneyRate = currencies[i]['MoneyRates']
fromCurrency = moneyRate[0]['FromCurrency']
fromCode = fromCurrency['Code']
sell = moneyRate[0]['BankSellAt']
buy = moneyRate[0]['BankBuyAt']
date = dt.datetime.fromisoformat(moneyRate[0]['StartDate'])
if fromCode == usd and tempDateUsd < date:
with usdSell.get_lock():
usdSell.value = sell
usdRate.clear()
usdRate.append(dt.datetime.now().strftime('%X'))
usdRate.append(date.strftime('%X'))
usdRate.append(str(sell))
usdRate.append(str(buy))
saveCourseToCsv(fromCode, date.strftime('%Y-%m-%d'), usdRate)
tempDateUsd = date
lastDateTime = date
lastUsdSell = sell
lastUsdBuy = buy
breakCount -= 1
if fromCode == eur and tempDateEur < date:
with eurSell.get_lock():
eurSell.value = sell
eurRate.clear()
eurRate.append(dt.datetime.now().strftime('%X'))
eurRate.append(date.strftime('%X'))
eurRate.append(str(sell))
eurRate.append(str(buy))
saveCourseToCsv(fromCode, date.strftime('%Y-%m-%d'), eurRate)
tempDateEur = date
lastDateTime = date
lastEurSell = sell
lastEurBuy = buy
breakCount -= 1
if breakCount == 0:
commitToConfig('tmp', 'lastDateTime', str(lastDateTime.timestamp()))
commitToConfig('tmp', 'lastUsdSell', str(lastUsdSell))
commitToConfig('tmp', 'lastUsdBuy', str(lastUsdBuy))
commitToConfig('tmp', 'lastEurSell', str(lastEurSell))
commitToConfig('tmp', 'lastEurBuy', str(lastEurBuy))
break
result = {usd: usdRate, eur: eurRate}
return result
@bot.message_handler(commands=['start'])
def get_course(message):
id = message.chat.id
isKnownId = False
for knownId in usersList:
if str(id) == knownId:
isKnownId = True
break
if isKnownId == False:
createNewUser(id)
bot.send_message(id, 'Настройка работы:', reply_markup=menuButtonsSet)
# Обработка входящего сообщения
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
global setUsdLimitFlag
global setEurLimitFlag
#
factor = 0.0
response = ''
#
id = message.from_user.id
#
if setUsdLimitFlag:
saveUserSettings(id, 'usdLimit', toFloat(message.text))
setUsdLimitFlag = False
if setEurLimitFlag:
response = 'Установите нижний лимит для евро:'
bot.send_message(id, response)
else:
response = 'Установка лимитов завершена.'
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif setEurLimitFlag:
saveUserSettings(id, 'eurLimit', toFloat(message.text))
setEurLimitFlag = False
response = 'Установка лимитов завершена.'
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif findArrayInMessage(['rub', 'rur', 'руб'], message.text.lower()):
factor = 1/usdSell.value
bot.send_message(id, convert(message.text, factor, usdSign))
factor = 1/eurSell.value
bot.send_message(id, convert(message.text, factor, eurSign), reply_markup=getCourseButtonSet)
elif findArrayInMessage(['usd', 'доллар'], message.text.lower()):
factor = usdSell.value
bot.send_message(id, convert(message.text, factor, rubleSign), reply_markup=getCourseButtonSet)
elif findArrayInMessage(['eur', 'евро'], message.text.lower()):
factor = eurSell.value
bot.send_message(id, convert(message.text, factor, rubleSign), reply_markup=getCourseButtonSet)
else:
bot.send_message(id, 'Ошибка!', reply_markup=getCourseButtonSet)
# Обработчик нажатий на кнопки
@bot.callback_query_handler(func=lambda call: True)
def callback_worker(call):
global setUsdLimitFlag
global setEurLimitFlag
#
id = call.message.chat.id
if call.data == 'current':
data = getCourse()
response = 'Подпишитесь хотя бы на одну валюту!'
buttonsSet = subscribeButtonsSet
isUsdResponse = False
print(loadUserSettings(id, 'usdSub'))
print(loadUserSettings(id, 'eurSub'))
if loadUserSettings(id, 'usdSub') == '1':
response = courseToText('USD', data['USD'])
isUsdResponse = True
buttonsSet = getCourseButtonSet
if loadUserSettings(id, 'eurSub') == '1':
if isUsdResponse:
response += '\n\n'
response += courseToText('EUR', data['EUR'])
else:
response = courseToText('EUR', data['EUR'])
buttonsSet = getCourseButtonSet
bot.send_message(id, response, reply_markup=buttonsSet)
elif call.data == 'subscribe':
bot.send_message(id, 'Подписаться на ...', reply_markup=subscribeButtonsSet)
elif call.data == 'unsubscribe':
bot.send_message(id, 'Отписаться от ...', reply_markup=unsubscribeButtonsSet)
elif call.data == 'inform':
bot.send_message(id, 'Информирование:', reply_markup=infoButtonsSet)
elif call.data == 'sub_all':
response = 'Вы подписаны на все валюты.'
saveUserSettings(id, 'usdSub', 1)
saveUserSettings(id, 'eurSub', 1)
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif call.data == 'sub_usd':
response = 'Вы подписаны на курс доллара.'
saveUserSettings(id, 'usdSub', 1)
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif call.data == 'sub_eur':
response = 'Вы подписаны на курс евро.'
saveUserSettings(id, 'eurSub', 1)
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif call.data == 'unsub_all':
response = 'Вы отписались от всех валют.'
saveUserSettings(id, 'usdSub', 0)
saveUserSettings(id, 'eurSub', 0)
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif call.data == 'unsub_usd':
response = 'Вы отписались от курса доллара.'
saveUserSettings(id, 'usdSub', 0)
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif call.data == 'unsub_eur':
response = 'Вы отписались от курса евро.'
saveUserSettings(id, 'eurSub', 0)
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif call.data == 'limit':
setUsdLimitFlag = loadUserSettings(id, 'usdSub', False)
setEurLimitFlag = loadUserSettings(id, 'eurSub', False)
response = 'Установите нижний лимит для доллара:'
if not setUsdLimitFlag:
response = 'Установите нижний лимит для евро:'
bot.send_message(id, response)
elif call.data == 'graph':
response = 'Слать график в конце дня:'
bot.send_message(id, response, reply_markup=graphButtonsSet)
elif call.data == 'limit_set':
response='Включено уведомление при достижении нижнего лимита.'
saveUserSettings(id, 'usdLimit', 1)
saveUserSettings(id, 'eurSub', 1)
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif call.data == 'limit_cancel':
response='Уведомления по валютам отлючены.'
saveUserSettings(id, 'usdLimit', 0)
saveUserSettings(id, 'eurSub', 0)
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif call.data == 'graph_set':
response = 'График будет формироваться в конце дня.'
saveUserSettings(id, 'chart', 1)
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif call.data == 'graph_cancel':
response = 'Формирование графика отменено.'
saveUserSettings(id, 'chart', 0)
bot.send_message(id, response, reply_markup=menuButtonsSet)
elif call.data == 'exit_settings':
response = 'Настройка завершена.'
bot.send_message(id, response, reply_markup=getCourseButtonSet)
# Запуск бота
def start_bot_polling():
print('polling start\n')
bot.infinity_polling(interval=0)
# __run__
getCourse()
t = threading.Thread(target=start_bot_polling, daemon=True)
t.start()
# Запуск периодического опроса курса
tl = Timeloop()
@tl.job(interval=timedelta(seconds=300))
def auto_send_message():
rawData = getCourse()
for user in users:
id = user['id']
result = checkLimits(id, rawData)
if len(result) > 0:
for cur in result:
response = 'Выгодный курс ' + cur + ':\n'
response += 'Продажа: ' + result[cur] + ' ' + rubleSign
bot.send_message(id, response)
tl.start(block=True)
# Завершение работы скрипта
bot.stop_polling()
t.join()
print('polling end\n')
saveToConfig(users)
|
test_async.py
|
import threading
import time
import unittest
from common import TestCase
import pyuv
class AsyncTest(TestCase):
def test_async1(self):
self.async_cb_called = 0
self.prepare_cb_called = 0
def async_cb(async):
with self.lock:
self.async_cb_called += 1
n = self.async_cb_called
if n == 3:
self.async.close()
self.prepare.close()
def prepare_cb(prepare):
if self.prepare_cb_called:
return
self.prepare_cb_called += 1
self.thread = threading.Thread(target=thread_cb)
self.thread.start()
def thread_cb():
while True:
with self.lock:
n = self.async_cb_called
if n == 3:
break
self.async.send()
self.async = pyuv.Async(self.loop, async_cb)
self.prepare = pyuv.Prepare(self.loop)
self.prepare.start(prepare_cb)
self.lock = threading.Lock()
self.loop.run()
self.assertEqual(self.async_cb_called, 3)
self.assertEqual(self.prepare_cb_called, 1)
def test_async2(self):
self.prepare_cb_called = 0
self.check_cb_called = 0
def prepare_cb(prepare):
self.prepare_cb_called += 1
self.thread = threading.Thread(target=thread_cb)
self.thread.start()
def check_cb(check):
self.check_cb_called += 1
self.loop.stop()
def thread_cb():
time.sleep(0.01)
self.async.send()
self.async = pyuv.Async(self.loop)
self.prepare = pyuv.Prepare(self.loop)
self.prepare.start(prepare_cb)
self.check = pyuv.Check(self.loop)
self.check.start(check_cb)
self.loop.run()
self.assertEqual(self.prepare_cb_called, 1)
self.assertEqual(self.check_cb_called, 1)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
test_sentry.py
|
import multiprocessing
import numpy as np
import pytest
import sentry_sdk
import sentry_sdk.serializer
import sentry_sdk.utils
from sentry_sdk.client import Client
from sentry_sdk.hub import Hub
from sentry_sdk.serializer import serialize
from PartSeg.common_backend.base_argparser import safe_repr
from PartSegCore.analysis.batch_processing.batch_backend import prepare_error_data
def test_message_clip(monkeypatch):
message = "a" * 5000
assert len(sentry_sdk.utils.strip_string(message).value) == 512
monkeypatch.setattr(sentry_sdk.utils, "MAX_STRING_LENGTH", 10 ** 4)
assert len(sentry_sdk.utils.strip_string(message)) == 5000
def test_sentry_serialize_clip(monkeypatch):
message = "a" * 5000
try:
raise ValueError("eeee")
except ValueError as e:
event, hint = sentry_sdk.utils.event_from_exception(e)
event["message"] = message
cliped = serialize(event)
assert len(cliped["message"]) == 512
monkeypatch.setattr(sentry_sdk.utils, "MAX_STRING_LENGTH", 10 ** 4)
cliped = serialize(event)
assert len(cliped["message"]) == 5000
def test_sentry_report(monkeypatch):
message = "a" * 5000
happen = [False]
def check_event(event):
happen[0] = True
assert len(event["message"]) == 512
assert len(event["extra"]["lorem"]) == 512
try:
raise ValueError("eeee")
except ValueError as e:
event, hint = sentry_sdk.utils.event_from_exception(e)
event["message"] = message
client = Client("https://aaa@test.pl/77")
Hub.current.bind_client(client)
monkeypatch.setattr(client.transport, "capture_event", check_event)
with sentry_sdk.push_scope() as scope:
scope.set_extra("lorem", message)
sentry_sdk.capture_event(event, hint=hint)
assert happen[0] is True
def test_sentry_report_no_clip(monkeypatch):
message = "a" * 5000
happen = [False]
monkeypatch.setattr(sentry_sdk.utils, "MAX_STRING_LENGTH", 10 ** 4)
def check_event(event):
happen[0] = True
assert len(event["message"]) == 5000
assert len(event["extra"]["lorem"]) == 5000
try:
raise ValueError("eeee")
except ValueError as e:
event, hint = sentry_sdk.utils.event_from_exception(e)
event["message"] = message
client = Client("https://aaa@test.pl/77")
Hub.current.bind_client(client)
monkeypatch.setattr(client.transport, "capture_event", check_event)
with sentry_sdk.push_scope() as scope:
scope.set_extra("lorem", message)
event_id = sentry_sdk.capture_event(event, hint=hint)
assert event_id is not None
assert happen[0] is True
def exception_fun(num: int):
if num < 1:
raise ValueError("test")
exception_fun(num - 1)
def executor_fun(que: multiprocessing.Queue):
try:
exception_fun(10)
except ValueError as e:
ex, (event, tr) = prepare_error_data(e)
que.put((ex, event, tr))
def test_exception_pass(monkeypatch):
def check_event(event):
assert len(event["exception"]["values"][0]["stacktrace"]["frames"]) == 12
message_queue = multiprocessing.Queue()
p = multiprocessing.Process(target=executor_fun, args=(message_queue,))
p.start()
p.join()
ex, event, tr = message_queue.get()
assert isinstance(ex, ValueError)
assert isinstance(event, dict)
client = Client("https://aaa@test.pl/77")
Hub.current.bind_client(client)
monkeypatch.setattr(client.transport, "capture_event", check_event)
event_id = sentry_sdk.capture_event(event)
assert event_id is not None
@pytest.mark.parametrize("dtype", [np.uint8, np.int8, np.float32])
def test_numpy_array_serialize(monkeypatch, dtype):
arr = np.zeros((10, 10), dtype=dtype)
arr[1, 5] = 10
monkeypatch.setattr(sentry_sdk.serializer, "safe_repr", safe_repr)
res = serialize(arr)
assert res == f"array(size={arr.size}, shape={arr.shape}, dtype={arr.dtype}, min={arr.min()}, max={arr.max()})"
|
login.py
|
import os, sys, time, re, io
import threading
import json, xml.dom.minidom
import copy, pickle, random
import traceback, logging
import requests
from pyqrcode import QRCode
from .. import config, utils
from ..returnvalues import ReturnValue
from ..storage.templates import wrap_user_dict
from .contact import update_local_chatrooms, update_local_friends
from .messages import produce_msg
logger = logging.getLogger('itchat')
def load_login(core):
core.login = login
core.get_QRuuid = get_QRuuid
core.get_QR = get_QR
core.check_login = check_login
core.web_init = web_init
core.show_mobile_login = show_mobile_login
core.start_receiving = start_receiving
core.get_msg = get_msg
core.logout = logout
def login(self, enableCmdQR=False, picDir=None, qrCallback=None,
loginCallback=None, exitCallback=None):
if self.alive or self.isLogging:
logger.warning('itchat has already logged in.')
return
self.isLogging = True
while self.isLogging:
uuid = push_login(self)
if uuid:
qrStorage = io.BytesIO()
else:
logger.info('Getting uuid of QR code.')
while not self.get_QRuuid():
time.sleep(1)
logger.info('Downloading QR code.')
qrStorage = self.get_QR(enableCmdQR=enableCmdQR,
picDir=picDir, qrCallback=qrCallback)
logger.info('Please scan the QR code to log in.')
isLoggedIn = False
while not isLoggedIn:
status = self.check_login()
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=self.uuid, status=status, qrcode=qrStorage.getvalue())
if status == '200':
isLoggedIn = True
elif status == '201':
if isLoggedIn is not None:
logger.info('Please press confirm on your phone.')
isLoggedIn = None
elif status != '408':
break
if isLoggedIn:
break
logger.info('Log in time out, reloading QR code.')
else:
return # log in process is stopped by user
logger.info('Loading the contact, this may take a little while.')
self.web_init()
self.show_mobile_login()
self.get_contact(True)
if hasattr(loginCallback, '__call__'):
r = loginCallback()
else:
utils.clear_screen()
if os.path.exists(picDir or config.DEFAULT_QR):
os.remove(picDir or config.DEFAULT_QR)
logger.info('Login successfully as %s' % self.storageClass.nickName)
self.start_receiving(exitCallback)
self.isLogging = False
def push_login(core):
cookiesDict = core.s.cookies.get_dict()
if 'wxuin' in cookiesDict:
url = '%s/cgi-bin/mmwebwx-bin/webwxpushloginurl?uin=%s' % (
config.BASE_URL, cookiesDict['wxuin'])
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(url, headers=headers).json()
if 'uuid' in r and r.get('ret') in (0, '0'):
core.uuid = r['uuid']
return r['uuid']
return False
def get_QRuuid(self):
url = '%s/jslogin' % config.BASE_URL
params = {
'appid' : 'wx782c26e4c19acffb',
'fun' : 'new', }
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
self.uuid = data.group(2)
return self.uuid
def get_QR(self, uuid=None, enableCmdQR=False, picDir=None, qrCallback=None):
uuid = uuid or self.uuid
picDir = picDir or config.DEFAULT_QR
qrStorage = io.BytesIO()
qrCode = QRCode('https://login.weixin.qq.com/l/' + uuid)
qrCode.png(qrStorage, scale=10)
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=uuid, status='0', qrcode=qrStorage.getvalue())
else:
if enableCmdQR:
utils.print_cmd_qr(qrCode.text(1), enableCmdQR=enableCmdQR)
else:
with open(picDir, 'wb') as f:
f.write(qrStorage.getvalue())
utils.print_qr(picDir)
return qrStorage
def check_login(self, uuid=None):
uuid = uuid or self.uuid
url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL
localTime = int(time.time())
params = 'loginicon=true&uuid=%s&tip=0&r=%s&_=%s' % (
uuid, localTime / 1579, localTime)
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.code=(\d+)'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
process_login_info(self, r.text)
return '200'
elif data:
return data.group(1)
else:
return '400'
def process_login_info(core, loginContent):
''' when finish login (scanning qrcode)
* syncUrl and fileUploadingUrl will be fetched
* deviceid and msgid will be generated
* skey, wxsid, wxuin, pass_ticket will be fetched
'''
regx = r'window.redirect_uri="(\S+)";'
core.loginInfo['url'] = re.search(regx, loginContent).group(1)
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(core.loginInfo['url'], headers=headers, allow_redirects=False)
core.loginInfo['url'] = core.loginInfo['url'][:core.loginInfo['url'].rfind('/')]
for indexUrl, detailedUrl in (
("wx.qq.com" , ("file.wx.qq.com", "webpush.wx.qq.com")),
("wx2.qq.com" , ("file.wx2.qq.com", "webpush.wx2.qq.com")),
("wx8.qq.com" , ("file.wx8.qq.com", "webpush.wx8.qq.com")),
("qq.com" , ("file.wx.qq.com", "webpush.wx.qq.com")),
("web2.wechat.com" , ("file.web2.wechat.com", "webpush.web2.wechat.com")),
("wechat.com" , ("file.web.wechat.com", "webpush.web.wechat.com"))):
fileUrl, syncUrl = ['https://%s/cgi-bin/mmwebwx-bin' % url for url in detailedUrl]
if indexUrl in core.loginInfo['url']:
core.loginInfo['fileUrl'], core.loginInfo['syncUrl'] = \
fileUrl, syncUrl
break
else:
core.loginInfo['fileUrl'] = core.loginInfo['syncUrl'] = core.loginInfo['url']
core.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17]
core.loginInfo['BaseRequest'] = {}
for node in xml.dom.minidom.parseString(r.text).documentElement.childNodes:
if node.nodeName == 'skey':
core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = node.childNodes[0].data
elif node.nodeName == 'wxsid':
core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = node.childNodes[0].data
elif node.nodeName == 'wxuin':
core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
core.loginInfo['pass_ticket'] = core.loginInfo['BaseRequest']['DeviceID'] = node.childNodes[0].data
def web_init(self):
url = '%s/webwxinit?r=%s' % (self.loginInfo['url'], int(time.time()))
data = { 'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
# deal with login info
utils.emoji_formatter(dic['User'], 'NickName')
self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount'])
self.loginInfo['User'] = wrap_user_dict(utils.struct_friend_info(dic['User']))
self.memberList.append(self.loginInfo['User'])
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncKey']['List']])
self.storageClass.userName = dic['User']['UserName']
self.storageClass.nickName = dic['User']['NickName']
# deal with contact list returned when init
contactList = dic.get('ContactList', [])
chatroomList, otherList = [], []
for m in contactList:
if m['Sex'] != 0:
otherList.append(m)
elif '@@' in m['UserName']:
m['MemberList'] = [] # don't let dirty info pollute the list
chatroomList.append(m)
elif '@' in m['UserName']:
# mp will be dealt in update_local_friends as well
otherList.append(m)
if chatroomList:
update_local_chatrooms(self, chatroomList)
if otherList:
update_local_friends(self, otherList)
return dic
def show_mobile_login(self):
url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'Code' : 3,
'FromUserName' : self.storageClass.userName,
'ToUserName' : self.storageClass.userName,
'ClientMsgId' : int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
return ReturnValue(rawResponse=r)
def start_receiving(self, exitCallback=None, getReceivingFnOnly=False):
self.alive = True
def maintain_loop():
retryCount = 0
while self.alive:
try:
i = sync_check(self)
if i is None:
self.alive = False
elif i == '0':
pass
else:
msgList, contactList = self.get_msg()
if msgList:
msgList = produce_msg(self, msgList)
for msg in msgList:
self.msgList.put(msg)
if contactList:
chatroomList, otherList = [], []
for contact in contactList:
if '@@' in contact['UserName']:
chatroomList.append(contact)
else:
otherList.append(contact)
chatroomMsg = update_local_chatrooms(self, chatroomList)
chatroomMsg['User'] = self.loginInfo['User']
self.msgList.put(chatroomMsg)
update_local_friends(self, otherList)
retryCount = 0
except:
retryCount += 1
logger.error(traceback.format_exc())
if self.receivingRetryCount < retryCount:
self.alive = False
else:
time.sleep(1)
self.logout()
if hasattr(exitCallback, '__call__'):
exitCallback()
else:
logger.info('LOG OUT!')
if getReceivingFnOnly:
return maintain_loop
else:
maintainThread = threading.Thread(target=maintain_loop)
maintainThread.setDaemon(True)
maintainThread.start()
def sync_check(self):
url = '%s/synccheck' % self.loginInfo.get('syncUrl', self.loginInfo['url'])
params = {
'r' : int(time.time() * 1000),
'skey' : self.loginInfo['skey'],
'sid' : self.loginInfo['wxsid'],
'uin' : self.loginInfo['wxuin'],
'deviceid' : self.loginInfo['deviceid'],
'synckey' : self.loginInfo['synckey'],
'_' : int(time.time() * 1000),}
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers, timeout=config.TIMEOUT)
regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}'
pm = re.search(regx, r.text)
if pm is None or pm.group(1) != '0':
logger.debug('Unexpected sync check result: %s' % r.text)
return None
return pm.group(2)
def get_msg(self):
url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['wxsid'],
self.loginInfo['skey'],self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'SyncKey' : self.loginInfo['SyncKey'],
'rr' : ~int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, data=json.dumps(data), headers=headers, timeout=config.TIMEOUT)
dic = json.loads(r.content.decode('utf-8', 'replace'))
if dic['BaseResponse']['Ret'] != 0: return None, None
self.loginInfo['SyncKey'] = dic['SyncCheckKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncCheckKey']['List']])
return dic['AddMsgList'], dic['ModContactList']
def logout(self):
if self.alive:
url = '%s/webwxlogout' % self.loginInfo['url']
params = {
'redirect' : 1,
'type' : 1,
'skey' : self.loginInfo['skey'], }
headers = { 'User-Agent' : config.USER_AGENT }
self.s.get(url, params=params, headers=headers)
self.alive = False
self.isLogging = False
self.s.cookies.clear()
del self.chatroomList[:]
del self.memberList[:]
del self.mpList[:]
return ReturnValue({'BaseResponse': {
'ErrMsg': 'logout successfully.',
'Ret': 0, }})
|
kvstore_client_tests.py
|
#!/usr/bin/env python
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from openr.utils import socket
from openr.clients import kvstore_client
from openr.KvStore import ttypes as kv_store_types
import zmq
import unittest
from multiprocessing import Process
value1 = kv_store_types.Value()
value1.originatorId = 'san jose 1'
value2 = kv_store_types.Value()
value2.originatorId = 'san jose 2'
value3 = kv_store_types.Value()
value3.originatorId = 'san jose 3'
value4 = kv_store_types.Value()
value4.originatorId = 'san jose 4'
value5 = kv_store_types.Value()
value5.originatorId = 'san francisco 1'
kv_store_cache = {'san jose 1': value1, 'san jose 2': value2,
'san jose 3': value3, 'san jose 4': value4,
'san francisco 1': value5}
class KVStore():
def __init__(self, zmq_ctx, url):
self._kv_store_server_socket = socket.Socket(zmq_ctx, zmq.REP)
self._kv_store_server_socket.bind(url)
self._kv_store = kv_store_cache
def _get_keys(self, request):
keys = request.keyGetParams.keys
publication = kv_store_types.Publication({})
for key in keys:
if key in self._kv_store:
publication.keyVals[key] = self._kv_store[key]
return publication
def _dump_all_with_prefix(self, request):
prefix = request.keyDumpParams.prefix
publication = kv_store_types.Publication({})
for key in self._kv_store:
if key.startswith(prefix):
publication.keyVals[key] = self._kv_store[key]
return publication
def process_request(self):
request = self._kv_store_server_socket.recv_thrift_obj(kv_store_types.Request)
options = {kv_store_types.Command.KEY_GET: self._get_keys,
kv_store_types.Command.KEY_DUMP: self._dump_all_with_prefix}
publication = options[request.cmd](request)
self._kv_store_server_socket.send_thrift_obj(publication)
class TestKVStoreClient(unittest.TestCase):
def test(self):
num_req = 5
def _kv_store_server():
kv_store_server = KVStore(zmq.Context(), "tcp://*:5000")
for _ in range(num_req):
kv_store_server.process_request()
def _kv_store_client():
kv_store_client_inst = kvstore_client.KvStoreClient(
zmq.Context(), "tcp://localhost:5000")
publication = kv_store_client_inst.get_keys(
['san jose 1', 'san francisco 1', 'virginia'])
key_values = publication.keyVals
self.assertEqual(
key_values, {'san jose 1': value1, 'san francisco 1': value5})
publication = kv_store_client_inst.dump_all_with_prefix('san jose 3')
key_values = publication.keyVals
self.assertEqual(key_values, {'san jose 3': value3})
publication = kv_store_client_inst.dump_all_with_prefix('san jose')
key_values = publication.keyVals
self.assertEqual(len(key_values), 4)
publication = kv_store_client_inst.dump_all_with_prefix('')
key_values = publication.keyVals
self.assertEqual(len(key_values), 5)
publication = kv_store_client_inst.dump_all_with_prefix('virginia')
key_values = publication.keyVals
self.assertEqual(len(key_values), 0)
p = Process(target=_kv_store_server)
p.start()
q = Process(target=_kv_store_client)
q.start()
p.join()
q.join()
|
log.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import io
import logging
import os
import re
import sys
import threading
import time
from types import TracebackType
from typing import Iterable, Optional, Pattern, Sequence
PERFORMANCE: int = 15
PROMPT: int = 50
SUCCESS: int = 60
LOG: logging.Logger = logging.getLogger(__name__)
stdout: io.StringIO = io.StringIO(newline="")
__handler: Optional["TimedStreamHandler"] = None
class Color:
YELLOW: str = "\033[33m"
RED: str = "\033[31m"
GREEN: str = "\033[32m"
class Format:
BOLD: str = "\033[1m"
CLEAR_LINE: str = "\x1b[0G\x1b[K"
CLEAR: str = "\033[0m"
TRUNCATE_OVERFLOW: str = "\033[?7l"
WRAP_OVERFLOW: str = "\033[?7h"
NEWLINE: str = "\n"
CURSOR_UP_LINE: str = "\x1b[1A"
HIDE_CURSOR: str = "\x1b[?25l"
SHOW_CURSOR: str = "\x1b[?25h"
class Character:
LAMBDA: str = "ƛ"
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__("%(asctime)s %(levelname)s %(message)s")
def format(self, record: logging.LogRecord) -> str:
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD: float = 0.5
LINE_BREAKING_LEVELS: Sequence[str] = ["ERROR", "WARNING", "SUCCESS"]
_terminate: bool = False
_last_update: float = 0.0
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator: str = ""
self.setLevel(logging.INFO)
self._record: Optional[logging.LogRecord] = None
self._active_lines: int = 0
# Preamble preparing terminal.
sys.stderr.write(
Format.NEWLINE
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self) -> str:
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record: logging.LogRecord, age: Optional[float] = None) -> None:
suffix = ""
color = ""
active_lines = record.msg.count("\n") + 1
truncate = Format.TRUNCATE_OVERFLOW
if record.levelname in self.LINE_BREAKING_LEVELS:
record.msg += "\n"
if record.levelname == "ERROR":
color = Color.RED
self._record = None
active_lines = 0
truncate = Format.WRAP_OVERFLOW
elif record.levelname == "WARNING":
color = Color.YELLOW
self._record = None
active_lines = 0
truncate = Format.WRAP_OVERFLOW
elif record.levelname == "PROMPT":
color = Color.YELLOW
self._record = None
active_lines = 0
truncate = Format.WRAP_OVERFLOW
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
truncate = Format.WRAP_OVERFLOW
elif age:
if age > 10:
color = Color.YELLOW
if age > 30:
color = Color.RED
suffix = " {}[{:.1f}s]{}".format(
color if color else "", age, Format.CLEAR if color else ""
)
else:
self._record = record
self._last_update = time.time()
timed_record = copy.copy(record)
timed_record.msg = (
"{clear_line}{color} {cursor}{clear} {truncate}{message}{suffix}"
).format(
clear_line=self.clear_lines(),
color=color,
cursor=Character.LAMBDA,
clear=Format.CLEAR,
truncate=truncate,
message=record.msg,
suffix=suffix,
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
record = self._record
if record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(record, age)
time.sleep(0.1)
def terminate(self) -> None:
self._terminate = True
if self._active_lines > 0:
sys.stderr.write(self.clear_lines())
self._active_lines = 0
# Reset terminal.
sys.stderr.write(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR)
sys.stderr.flush()
def initialize(noninteractive: bool) -> None:
global __handler
if __handler:
LOG.debug("Log handler already exists, skipping initialization.")
return
if noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
__handler = None
else:
stream_handler = TimedStreamHandler()
__handler = stream_handler
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=[stream_handler])
def start_logging_to_directory(noninteractive: bool, log_directory: str) -> None:
if not noninteractive and log_directory is not None:
if not os.path.exists(log_directory):
os.makedirs(log_directory)
handler = logging.FileHandler(os.path.join(log_directory, "pyre.stderr"))
handler.setFormatter(SectionFormatter())
handler.setLevel(logging.DEBUG)
logger = logging.getLogger()
logger.addHandler(handler)
def cleanup() -> None:
global __handler
handler = __handler
if handler:
handler.terminate()
__handler = None
output = stdout.getvalue()
if output:
sys.stdout.write(output)
if not output.endswith("\n"):
sys.stdout.write("\n")
class StreamLogger:
_should_stop_reading_stream = False
_current_section: Optional[str]
_server_log_pattern: Pattern[str] = re.compile(
r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} (\w+) (.*)"
)
def __init__(self, stream: Iterable[str]) -> None:
self._reader = threading.Thread(target=self._read_stream, args=(stream,))
self._reader.daemon = True
self._current_section = None
def join(self) -> None:
self._reader.join()
def _log_server_stderr_message(self, server_message: str) -> None:
line = server_message.rstrip()
match = self._server_log_pattern.match(line)
if match:
section = match.groups()[0]
message = match.groups()[1]
self._current_section = section
else:
section = self._current_section
message = line
if section == "ERROR":
LOG.error(message)
elif section == "INFO":
LOG.info(message)
elif section == "DUMP":
LOG.warning(message)
elif section == "WARNING":
LOG.warning(message)
elif section == "PROGRESS":
LOG.info(message)
elif section == "PARSER":
LOG.error(message)
elif section is not None:
LOG.debug("[%s] %s", section, message)
else:
LOG.debug(line)
def _read_stream(self, stream: Iterable[str]) -> None:
try:
for line in stream:
if self._should_stop_reading_stream:
return
self._log_server_stderr_message(line)
except Exception:
pass
def __enter__(self) -> "StreamLogger":
self._should_stop_reading_stream = False
self._reader.start()
return self
def __exit__(
self,
_type: Optional[BaseException],
_value: Optional[BaseException],
_traceback: Optional[TracebackType],
) -> None:
self._should_stop_reading_stream = True
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
|
process_replay.py
|
#!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
import signal
from collections import namedtuple
import capnp
from tqdm import tqdm
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from common.timeout import Timeout
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.car.car_helpers import get_car, interfaces
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
TIMEOUT = 15
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance', 'fake_pubsubmaster'])
def wait_for_event(evt):
if not evt.wait(TIMEOUT):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock, fingerprint):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock, fingerprint):
if fingerprint:
CarInterface, _, _ = interfaces[fingerprint]
CP = CarInterface.get_params(fingerprint)
else:
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def controlsd_rcv_callback(msg, CP, cfg, fsm):
# no sendcan until controlsd is initialized
socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
if "sendcan" in socks and fsm.frame < 2000:
socks.remove("sendcan")
return socks, len(socks) > 0
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "ubloxRaw": [], "managerState": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=controlsd_rcv_callback,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan"], "radarState": ["longitudinalPlan"],
"carState": [], "controlsState": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=False,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
fake_pubsubmaster=False,
),
]
def replay_process(cfg, lr, fingerprint=None):
if cfg.fake_pubsubmaster:
return python_replay_process(cfg, lr, fingerprint)
else:
return cpp_replay_process(cfg, lr, fingerprint)
def python_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
params = Params()
params.clear_all()
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("Passive", False)
params.put_bool("CommunityFeaturesToggle", True)
os.environ['NO_RADAR_SLEEP'] = "1"
# TODO: remove after getting new route for civic & accord
migration = {
"HONDA CIVIC 2016 TOURING": "HONDA CIVIC 2016",
"HONDA ACCORD 2018 SPORT 2T": "HONDA ACCORD 2018",
"HONDA ACCORD 2T 2018": "HONDA ACCORD 2018",
}
if fingerprint is not None:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = fingerprint
else:
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = migration.get(msg.carParams.carFingerprint, msg.carParams.carFingerprint)
if len(msg.carParams.carFw) and (car_fingerprint in FW_VERSIONS):
params.put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock, fingerprint)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs, disable=CI):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
log_msgs = []
os.environ["SIMULATION"] = "1" # Disable submaster alive checks
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
try:
with Timeout(TIMEOUT):
while not all(pm.all_readers_updated(s) for s in cfg.pub_sub.keys()):
time.sleep(0)
# Make sure all subscribers are connected
sockets = {s: messaging.sub_sock(s, timeout=2000) for s in sub_sockets}
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for i, msg in enumerate(tqdm(pub_msgs, disable=False)):
pm.send(msg.which(), msg.as_builder())
resp_sockets = cfg.pub_sub[msg.which()] if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is None:
print(f"Warning, no response received {i}")
else:
log_msgs.append(response)
if not len(resp_sockets): # We only need to wait if we didn't already wait for a response
while not pm.all_readers_updated(msg.which()):
time.sleep(0)
finally:
managed_processes[cfg.proc_name].signal(signal.SIGKILL)
managed_processes[cfg.proc_name].stop()
return log_msgs
|
base.py
|
"""
Module containing base classes that represent object entities that can accept
configuration, start/stop/run/abort, create results and have some state.
"""
import os
import sys
import signal
import time
import uuid
import threading
import inspect
import psutil
import functools
from collections import deque, OrderedDict
from schema import Or, And, Use
from testplan.common.globals import get_logger
from testplan.common.config import Config
from testplan.common.config import ConfigOption
from testplan.common.utils.exceptions import format_trace
from testplan.common.utils.thread import execute_as_thread
from testplan.common.utils.timing import wait
from testplan.common.utils.path import makeemptydirs, makedirs, default_runpath
class Environment(object):
"""
A collection of resources that can be started/stopped.
:param parent: Reference to parent object.
:type parent: :py:class:`Entity <testplan.common.entity.base.Entity>`
"""
def __init__(self, parent=None):
self._resources = OrderedDict()
self.parent = parent
self.start_exceptions = OrderedDict()
self.stop_exceptions = OrderedDict()
self._logger = None
@property
def cfg(self):
"""Configuration obejct of parent object."""
return self.parent.cfg if self.parent else None
@property
def runpath(self):
"""Runpath of parent object."""
return self.parent.runpath if self.parent else None
@property
def logger(self):
if self._logger is None:
if self.parent is not None:
self._logger = self.parent.logger
else:
self._logger = Entity.logger
return self._logger
def add(self, item, uid=None):
"""
Adds a :py:class:`Resource <testplan.common.entity.base.Resource>` to
the Environment.
:param item: Resource to be added.
:type item: :py:class:`Resource <testplan.common.entity.base.Resource>`
:param uid: Unique identifier.
:type uid: ``str`` or ``NoneType``
:return: Unique identifier assigned to item added.
:rtype: ``str``
"""
if uid is None:
uid = item.uid()
item.context = self
if uid in self._resources:
raise RuntimeError('Uid {} already in context.'.format(uid))
self._resources[uid] = item
return uid
def remove(self, uid):
"""
Remove resource with the given uid from the environment.
"""
del self._resources[uid]
def first(self):
return next(uid for uid in self._resources.keys())
def __getattr__(self, item):
context = self.__getattribute__('_resources')
if item in context:
return context[item]
if self.parent and self.parent.cfg.initial_context:
if item in self.parent.cfg.initial_context:
return self.parent.cfg.initial_context[item]
return self.__getattribute__(item)
def __getitem__(self, item):
return getattr(self, item)
def __contains__(self, item):
return item in self._resources
def __iter__(self):
return iter(self._resources.values())
def __repr__(self):
if self.parent and self.parent.cfg.initial_context:
ctx = self.parent.cfg.initial_context
initial = {key: val for key, val in ctx.items()}
res = {key: val for key, val in self._resources.items()}
initial.update(res)
return '{}[{}]'.format(self.__class__.__name__, initial)
else:
return '{}[{}]'.format(self.__class__.__name__,
list(self._resources.items()))
def all_status(self, target):
"""
Check all resources has target status.
"""
return all(self._resources[resource].status.tag == target
for resource in self._resources)
def start(self):
"""
Start all resources sequentially and log errors.
"""
# Trigger start all resources
for resource in self._resources.values():
try:
self.logger.debug('Starting {}'.format(resource))
resource.start()
if resource.cfg.async_start is False:
resource.wait(resource.STATUS.STARTED)
self.logger.debug('Started {}'.format(resource))
except Exception as exc:
msg = 'While starting resource [{}]{}{}'.format(
resource.cfg.name, os.linesep,
format_trace(inspect.trace(), exc))
self.logger.error(msg)
self.start_exceptions[resource] = msg
# Environment start failure. Won't start the rest.
break
# Wait resources status to be STARTED.
for resource in self._resources.values():
if resource in self.start_exceptions:
break
if resource.cfg.async_start is False:
continue
else:
resource.wait(resource.STATUS.STARTED)
def stop(self, reversed=False):
"""
Stop all resources in reverse order and log exceptions.
"""
resources = list(self._resources.values())
if reversed is True:
resources = resources[::-1]
# Stop all resources
for resource in resources:
if (resource.status.tag is None) or (
resource.status.tag == resource.STATUS.STOPPED):
# Skip resources not even triggered to start.
continue
try:
self.logger.debug('Stopping {}'.format(resource))
resource.stop()
self.logger.debug('Stopped {}'.format(resource))
except Exception as exc:
msg = 'While stopping resource [{}]{}{}'.format(
resource.cfg.name, os.linesep,
format_trace(inspect.trace(), exc))
self.stop_exceptions[resource] = msg
# Wait resources status to be STOPPED.
for resource in resources:
if resource in self.stop_exceptions:
continue
elif resource.status.tag is None:
# Skip resources not even triggered to start.
continue
else:
resource.wait(resource.STATUS.STOPPED)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
class StatusTransitionException(Exception):
"""To be raised on illegal state transition attempt."""
pass
class EntityStatus(object):
"""
Represents current status of an
:py:class:`Entity <testplan.common.entity.base.Entity>` object.
TODO: Utilise metadata sto store information.
"""
NONE = None
PAUSING = 'PAUSING'
PAUSED = 'PAUSED'
RESUMING = 'RESUMING'
def __init__(self):
"""TODO."""
self._current = self.NONE
self._metadata = OrderedDict()
self._transitions = self.transitions()
@property
def tag(self):
"""Current status value."""
return self._current
@property
def metadata(self):
"""TODO."""
return self._metadata
def change(self, new):
"""Transition to new state."""
current = self._current
try:
if current == new or new in self._transitions[current]:
self._current = new
else:
msg = 'On status change from {} to {}'.format(current, new)
raise StatusTransitionException(msg)
except KeyError as exc:
msg = 'On status change from {} to {} - {}'.format(
current, new, exc)
raise StatusTransitionException(msg)
def update_metadata(self, **metadata):
"""TODO."""
self._metadata.update(metadata)
def clear_metadata(self):
"""TODO."""
self._metadata = OrderedDict()
def transitions(self):
""""
Returns all legal transitions of the status of the
:py:class:`Entity <testplan.common.entity.base.Entity>`.
"""
return {
self.PAUSING: {self.PAUSED},
self.PAUSED: {self.RESUMING}
}
class EntityConfig(Config):
"""
Configuration object for
:py:class:`Entity <testplan.common.entity.base.Entity>` object.
All classes that inherit
:py:class:`Entity <testplan.common.entity.base.Entity>` can define a
configuration that inherits this ones schema.
"""
@classmethod
def get_options(cls):
"""Config options for base Entity class."""
return {
ConfigOption(
'runpath', default=None,
block_propagation=False): Or(None, str, lambda x: callable(x)),
ConfigOption('initial_context', default={}): dict,
ConfigOption('path_cleanup', default=False): bool,
ConfigOption('status_wait_timeout', default=3600): int,
ConfigOption('abort_wait_timeout', default=30): int,
# active_loop_sleep impacts cpu usage in interactive mode
ConfigOption('active_loop_sleep', default=0.005): float
}
class Entity(object):
"""
Base class for :py:class:`Entity <testplan.common.entity.base.Entity>`
and :py:class:`Resource <testplan.common.entity.base.Resource>` objects
providing common functionality like runpath creation, abort policy
and common attributes.
:param runpath: Path to be used for temp/output files by entity.
:type runpath: ``str`` or ``NoneType`` callable that returns ``str``
:param initial_context: Initial key: value pair context information.
:type initial_context: ``dict``
:param path_cleanup: Remove previous runpath created dirs/files.
:type path_cleanup: ``bool``
:param status_wait_timeout: Timeout for wait status events.
:type status_wait_timeout: ``int``
:param abort_wait_timeout: Timeout for entity abort.
:type abort_wait_timeout: ``int``
:param active_loop_sleep: Sleep time on busy waiting loops.
:type active_loop_sleep: ``float``
"""
CONFIG = EntityConfig
STATUS = EntityStatus
def __init__(self, **options):
self._cfg = self.__class__.CONFIG(**options)
self._status = self.__class__.STATUS()
self._wait_handlers = {}
self._runpath = None
self._scratch = None
self._parent = None
self._uid = None
self._should_abort = False
self._aborted = False
def __str__(self):
return '{}[{}]'.format(self.__class__.__name__, self.uid())
@property
def cfg(self):
"""Configuration object."""
return self._cfg
@property
def status(self):
"""Status object."""
return self._status
@property
def aborted(self):
"""Returns if entity was aborted."""
return self._aborted
@property
def active(self):
"""Entity not aborting/aborted."""
return self._should_abort is False and self._aborted is False
@property
def runpath(self):
"""Path to be used for temp/output files by entity."""
return self._runpath
@property
def scratch(self):
"""Path to be used for temp files by entity."""
return self._scratch
@property
def parent(self):
"""
Returns parent :py:class:`Entity <testplan.common.entity.base.Entity>`.
"""
return self._parent
@parent.setter
def parent(self, value):
"""Reference to parent object."""
self._parent = value
@property
def logger(self):
"""Entity logger object."""
return get_logger()
def pause(self):
"""Pause entity execution."""
self.status.change(self.STATUS.PAUSING)
self.pausing()
def resume(self):
"""Resume entity execution."""
self.status.change(self.STATUS.RESUMING)
self.resuming()
def abort(self):
"""
Default abort policy. First abort all dependencies and then itself.
"""
self._should_abort = True
for dep in self.abort_dependencies():
self._abort_entity(dep)
self.aborting()
self._aborted = True
def abort_dependencies(self):
"""Default empty generator."""
return
yield
def _abort_entity(self, entity, wait_timeout=None):
"""Method to abort an entity and log exceptions."""
timeout = wait_timeout or self.cfg.abort_wait_timeout
try:
self.logger.debug('Aborting {}'.format(entity))
entity.abort()
self.logger.debug('Aborted {}'.format(entity))
except Exception as exc:
self.logger.error(format_trace(inspect.trace(), exc))
self.logger.error('Exception on aborting {} - {}'.format(
self, exc))
else:
if wait(lambda: entity.aborted is True, timeout) is False:
self.logger.error('Timeout on waiting to abort {}.'.format(
self))
def aborting(self):
"""
Aborting logic for self.
"""
self.logger.debug('Abort logic not implemented for {}[{}]'.format(
self.__class__.__name__, self.uid()))
def pausing(self):
raise NotImplementedError()
def resuming(self):
raise NotImplementedError()
def wait(self, target_status, timeout=None):
"""Wait until objects status becomes target status."""
timeout = timeout or self.cfg.status_wait_timeout
if target_status in self._wait_handlers:
self._wait_handlers[target_status](timeout=timeout)
else:
wait(lambda: self.status.tag == target_status, timeout=timeout)
def uid(self):
"""Unique identifier of self."""
if not self._uid:
self._uid = uuid.uuid4()
return self._uid
def generate_runpath(self):
"""
Returns runpath directory based on parent object and configuration.
"""
if self.parent and self.parent.runpath:
return os.path.join(self.parent.runpath, self.uid())
runpath = self.cfg.runpath
if runpath:
return self.cfg.runpath(self) if callable(runpath) else runpath
else:
return default_runpath(self)
def make_runpath_dirs(self):
"""
Creates runpath related directories.
"""
self._runpath = self.generate_runpath()
self._scratch = os.path.join(self._runpath, 'scratch')
if self.runpath is None:
raise RuntimeError('{} runpath cannot be None'.format(
self.__class__.__name__
))
self.logger.debug('{} has {} runpath and pid {}'.format(
self.__class__.__name__, self.runpath, os.getpid()))
if self.cfg.path_cleanup is False:
makedirs(self._runpath)
makedirs(self._scratch)
else:
makeemptydirs(self._runpath)
makeemptydirs(self._scratch)
class RunnableStatus(EntityStatus):
"""
Status of a
:py:class:`Runnable <testplan.common.entity.base.Runnable>` entity.
"""
EXECUTING = 'EXECUTING'
RUNNING = 'RUNNING'
FINISHED = 'FINISHED'
PAUSING = 'PAUSING'
PAUSED = 'PAUSED'
def transitions(self):
""""
Defines the status transitions of a
:py:class:`Runnable <testplan.common.entity.base.Runnable>` entity.
"""
transitions = super(RunnableStatus, self).transitions()
overrides = {
self.NONE: {self.RUNNING},
self.RUNNING: {self.FINISHED, self.EXECUTING, self.PAUSING},
self.EXECUTING: {self.RUNNING},
self.PAUSING: {self.PAUSED},
self.PAUSED: {self.RESUMING},
self.RESUMING: {self.RUNNING},
self.FINISHED: {self.RUNNING}
}
transitions.update(overrides)
return transitions
class RunnableIHandlerConfig(Config):
"""
Configuration object for
:py:class:`RunnableIHandler <testplan.common.entity.base.RunnableIHandler>` object.
"""
@classmethod
def get_options(cls):
return {'target': object,
ConfigOption('http_handler', default=None): object,
ConfigOption('http_handler_startup_timeout', default=10): int,
ConfigOption('max_operations', default=5):
And(Use(int), lambda n: n > 0)}
class RunnableIRunner(object):
EMPTY_DICT = dict()
EMPTY_TUPLE = tuple()
def __init__(self, runnable):
self._runnable = runnable
@staticmethod
def set_run_status(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self._runnable.status.change(Runnable.STATUS.RUNNING)
for result in func(self, *args, **kwargs):
yield result
self._runnable.status.change(Runnable.STATUS.FINISHED)
return wrapper
def run(self):
yield self._runnable.run, tuple(), dict()
class RunnableIHandler(Entity):
"""
Interactive base handler for a runnable object.
:param target: Target runnable object.
:type target: Subclass of
:py:class:`~testplan.common.entity.base.Runnable`.
:param http_handler: Optional HTTP requests handler.
:type http_handler: ``Object``.
:param http_handler_startup_timeout: Timeout value on starting the handler.
:type http_handler_startup_timeout: ``int``
:param max_operations: Max simultaneous operations.
:type max_operations: ``int`` greater than 0.
Also inherits all
:py:class:`~testplan.common.entity.base.Entity` options.
"""
CONFIG = RunnableIHandlerConfig
STATUS = RunnableStatus
def __init__(self, **options):
super(RunnableIHandler, self).__init__(**options)
self._cfg.parent = self.target.cfg
self._queue = [] # Stores uids
self._operations = {} # Ops uid - > (method, args, kwargs)
self._results = {} # Ops uid -> result
self._next_uid = 0
self._http_handler = self._setup_http_handler()
def _setup_http_handler(self):
http_handler = self.cfg.http_handler(ihandler=self)\
if self.cfg.http_handler else None
http_handler.cfg.parent = self.cfg
return http_handler
@property
def http_handler_info(self):
"""Connection information for http handler."""
return self._http_handler.ip, self._http_handler.port
@property
def target(self):
return self._cfg.target
def abort_dependencies(self):
yield self.target
def add_operation(self, operation, *args, **kwargs):
if len(list(self._operations.keys())) >= self.cfg.max_operations:
raise RuntimeError('Max operations ({}) reached.'.format(
self.cfg.max_operations))
uid = self._next_uid
self._next_uid = (self._next_uid + 1) % self.cfg.max_operations
self._operations[uid] = (operation, args, kwargs)
self._queue.append(uid)
return uid
def _get_result(self, uid):
result = self._results[uid]
del self._results[uid]
return result
def _wait_result(self, uid):
while self.active and self.target.active:
try:
result = self._get_result(uid)
except KeyError:
time.sleep(self.cfg.active_loop_sleep)
else:
return result
def _start_http_handler(self):
thread = threading.Thread(target=self._http_handler.run)
thread.daemon = True
thread.start()
wait(lambda: self._http_handler.port is not None,
self.cfg.http_handler_startup_timeout,
raise_on_timeout=True )
self.logger.test_info('{} listening on: {}:{}'.format(
self._http_handler.__class__.__name__,
self._http_handler.ip, self._http_handler.port))
def __call__(self, *args, **kwargs):
self.status.change(RunnableStatus.RUNNING)
self.logger.test_info('Starting {} for {}'.format(
self.__class__.__name__, self.target))
if self._http_handler is not None:
self._start_http_handler()
while self.active and self.target.active:
if self.status.tag == RunnableStatus.RUNNING:
try:
uid = self._queue.pop(0)
operation, args, kwargs = self._operations[uid]
except IndexError:
time.sleep(self.cfg.active_loop_sleep)
else:
try:
try:
owner = '{}.{}, '.format(
self, operation.im_class.__name__)
except AttributeError:
owner = ''
self.logger.debug(
'Performing operation:{}{}'.format(
owner, operation.__name__))
start_time = time.time()
result = operation(*args, **kwargs)
self._results[uid] = result
self.logger.debug(
'Finished operation {}{} - {}s'.format(
owner, operation.__name__,
round(time.time() - start_time, 5)))
except Exception as exc:
self.logger.test_info(
format_trace(inspect.trace(), exc))
self._results[uid] = exc
finally:
del self._operations[uid]
self.status.change(RunnableStatus.FINISHED)
def pausing(self):
"""Set pausing status."""
self.status.change(RunnableStatus.PAUSED)
def resuming(self):
"""Set resuming status."""
self.status.change(RunnableStatus.RUNNING)
def aborting(self):
"""
Aborting logic for self.
"""
pass
class RunnableConfig(EntityConfig):
"""
Configuration object for
:py:class:`~testplan.common.entity.base.Runnable` entity.
"""
@classmethod
def get_options(cls):
"""Runnable specific config options."""
return {
# Interactive needs to have blocked propagation.
# IHandlers explicitly enable interactive mode of runnables.
ConfigOption('interactive', default=False): bool,
ConfigOption(
'interactive_block',
default=hasattr(sys.modules['__main__'], '__file__')): bool,
ConfigOption('interactive_handler', default=RunnableIHandler):
object,
ConfigOption('interactive_runner', default=RunnableIRunner):
object
}
class RunnableResult(object):
"""
Result object of a
:py:class:`~testplan.common.entity.base.Runnable` entity.
"""
def __init__(self):
self.step_results = OrderedDict()
def __repr__(self):
return '{}[{}]'.format(self.__class__.__name__, self.__dict__)
class Runnable(Entity):
"""
An object that defines steps, a run method to execute the steps and
provides results with the
:py:class:`~testplan.common.entity.base.RunnableResult`
object.
It contains an
:py:class:`~testplan.common.entity.base.Environment`
object of
:py:class:`~testplan.common.entity.base.Resource` objects
that can be started/stopped and utilized by the steps defined.
:param interactive: Enable interactive execution mode.
:type interactive: ``bool``
:param interactive_no_block: Do not block on run() on interactive mode.
:type interactive_no_block: ``bool``
:param interactive_handler: Handler of interactive mode of the object.
:type interactive_handler: Subclass of
:py:class:`~testplan.common.entity.base.RunnableIHandler`
:param interactive_runner: Interactive runner set for the runnable.
:type interactive_runner: Subclass of
:py:class:`~testplan.common.entity.base.RunnableIRunner`
Also inherits all
:py:class:`~testplan.common.entity.base.Entity` options.
"""
CONFIG = RunnableConfig
STATUS = RunnableStatus
RESULT = RunnableResult
ENVIRONMENT = Environment
def __init__(self, **options):
super(Runnable, self).__init__(**options)
self._environment = self.__class__.ENVIRONMENT(parent=self)
self._result = self.__class__.RESULT()
self._steps = deque()
self._ihandler = None
@property
def result(self):
"""
Returns a
:py:class:`~testplan.common.entity.base.RunnableResult`
"""
return self._result
@property
def resources(self):
"""
Returns the
:py:class:`Environment <testplan.common.entity.base.Environment>`
of :py:class:`Resources <testplan.common.entity.base.Resource>`.
"""
return self._environment
@property
def interactive(self):
"""
TODO
"""
return self._ihandler
# Shortcut for interactive handler
i = interactive
def _add_step(self, step, *args, **kwargs):
self._steps.append((step, args, kwargs))
def pre_step_call(self, step):
"""Callable to be invoked before each step."""
pass
def skip_step(self, step):
"""Callable to determine if step should be skipped."""
return False
def post_step_call(self, step):
"""Callable to be invoked before each step."""
pass
def _run(self):
self.status.change(RunnableStatus.RUNNING)
while self.active:
if self.status.tag == RunnableStatus.RUNNING:
try:
func, args, kwargs = self._steps.popleft()
self.pre_step_call(func)
if self.skip_step(func) is False:
self.logger.debug('Executing step of {} - {}'.format(
self, func.__name__))
start_time = time.time()
self._execute_step(func, *args, **kwargs)
self.logger.debug(
'Finished step of {}, {} - {}s'.format(
self, func.__name__,
round(time.time() - start_time, 5)))
self.post_step_call(func)
except IndexError:
self.status.change(RunnableStatus.FINISHED)
break
time.sleep(self.cfg.active_loop_sleep)
def _run_batch_steps(self):
start_threads, start_procs = self._get_start_info()
self._add_step(self.setup)
self.pre_resource_steps()
self._add_step(self.resources.start)
self.main_batch_steps()
self._add_step(self.resources.stop, reversed=True)
self.post_resource_steps()
self._add_step(self.teardown)
self._run()
self._post_run_checks(start_threads, start_procs)
def _get_start_info(self):
"""
:return: lists of threads and child processes, to be passed to the
_post_run_checks method after the run has finished.
"""
start_threads = threading.enumerate()
current_proc = psutil.Process()
start_children = current_proc.children()
return start_threads, start_children
def _post_run_checks(self, start_threads, start_procs):
"""
Compare the current running threads and processes to those that were
alive before we were run. If there are any differences that indicates
we have either gained or lost threads or processes during the run,
which may indicate insufficient cleanup. Warnings will be logged.
"""
end_threads = threading.enumerate()
if start_threads != end_threads:
new_threads = [
thr for thr in end_threads if thr not in start_threads]
self.logger.warning('New threads are still alive after run: %s',
new_threads)
dead_threads = [
thr for thr in start_threads if thr not in end_threads]
self.logger.warning('Threads have died during run: %s',
dead_threads)
current_proc = psutil.Process()
end_procs = current_proc.children()
if start_procs != end_procs:
new_procs = [
proc for proc in end_procs if proc not in start_procs]
self.logger.warning('New processes are still alive after run: %s',
new_procs)
dead_procs = [
thr for thr in start_procs if thr not in end_procs]
self.logger.warning('Child processes have died during run: %s',
dead_procs)
def _execute_step(self, step, *args, **kwargs):
try:
res = step(*args, **kwargs)
except Exception as exc:
print('Exception on {} {}, step {} - {}'.format(
self.__class__.__name__, self.uid(), step.__name__, exc))
self.logger.error(format_trace(inspect.trace(), exc))
res = exc
finally:
self.result.step_results[step.__name__] = res
self.status.update_metadata(**{str(step): res})
def pre_resource_steps(self):
"""Steps to run before environment started."""
pass
def main_batch_steps(self):
"""Steps to run after environment started."""
pass
def post_resource_steps(self):
"""Steps to run after environment stopped."""
pass
def pausing(self):
for resource in self.resources:
resource.pause()
self.status.change(RunnableStatus.PAUSED)
def resuming(self):
for resource in self.resources:
resource.resume()
self.status.change(RunnableStatus.RUNNING)
def abort_dependencies(self):
"""
Yield all dependencies to be aborted before self abort.
"""
for resource in self.resources:
yield resource
def setup(self):
"""Setup step to be executed first."""
pass
def teardown(self):
"""Teardown step to be executed last."""
pass
def should_run(self):
"""Determines if current object should run."""
return True
def run(self):
"""Executes the defined steps and populates the result object."""
try:
if self.cfg.interactive is True:
if self._ihandler is not None:
raise RuntimeError('{} already has an active {}'.format(
self, self._ihandler))
self.logger.test_info(
'Starting {} in interactive mode'.format(self))
self._ihandler = self.cfg.interactive_handler(target=self)
thread = threading.Thread(target=self._ihandler)
thread.start()
# Check if we are on interactive session.
if self.cfg.interactive_block is True:
while self._ihandler.active:
time.sleep(self.cfg.active_loop_sleep)
return self._ihandler
else:
self._run_batch_steps()
except Exception as exc:
self._result.run = exc
self.logger.error(format_trace(inspect.trace(), exc))
else:
# TODO fix swallow exceptions in self._result.step_results.values()
self._result.run = self.status.tag == RunnableStatus.FINISHED and\
self.run_result() is True
return self._result
def run_result(self):
"""Returns if a run was successful."""
return all(not isinstance(val, Exception) and val is not False
for val in self._result.step_results.values())
def dry_run(self):
"""A testing process that creates result for each step."""
raise NotImplementedError
class FailedAction(object):
"""
Simple Falsey container that can be used for
returning results of certain failed async actions.
The `error_msg` can later on be used for enriching the error messages.
"""
def __init__(self, error_msg):
self.error_msg = error_msg
def __bool__(self):
return False
__nonzero__ = __bool__
class ResourceConfig(EntityConfig):
"""
Configuration object for
:py:class:`~testplan.common.entity.base.Resource` entity.
"""
@classmethod
def get_options(cls):
"""Resource specific config options."""
return {
ConfigOption('async_start', default=True): bool
}
class ResourceStatus(EntityStatus):
"""
Status of a
:py:class:`Resource <testplan.common.entity.base.Resource>` entity.
"""
STARTING = 'STARTING'
STARTED = 'STARTED'
STOPPING = 'STOPPING'
STOPPED = 'STOPPED'
def transitions(self):
""""
Defines the status transitions of a
:py:class:`Resource <testplan.common.entity.base.Resource>` entity.
"""
transitions = super(ResourceStatus, self).transitions()
overrides = {
self.NONE: {self.STARTING},
self.STARTING: {self.STARTED, self.STOPPING},
self.STARTED: {self.PAUSING, self.STOPPING},
self.PAUSING: {self.PAUSED},
self.PAUSED: {self.RESUMING, self.STOPPING},
self.RESUMING: {self.STARTED},
self.STOPPING: {self.STOPPED},
self.STOPPED: {self.STARTING}
}
transitions.update(overrides)
return transitions
class Resource(Entity):
"""
An object that can be started/stopped and expose its context
object of key/value pair information.
A Resource is usually part of an
:py:class:`~testplan.common.entity.base.Environment`
object of a
:py:class:`~testplan.common.entity.base.Runnable` object.
:param async_start: Resource can start asynchronously.
:type async_start: ``bool``
Also inherits all
:py:class:`~testplan.common.entity.base.Entity` options.
"""
CONFIG = ResourceConfig
STATUS = ResourceStatus
def __init__(self, **options):
super(Resource, self).__init__(**options)
self._context = None
self._wait_handlers.update(
{self.STATUS.STARTED: self._wait_started,
self.STATUS.STOPPED: self._wait_stopped})
@property
def context(self):
"""Key/value pair information of a Resource."""
return self._context
@context.setter
def context(self, context):
"""Set the Resource context."""
self._context = context
def start(self):
"""
Triggers the start logic of a Resource by executing
:py:meth:`Resource.starting <testplan.common.entity.base.Resource.starting>`
method.
"""
self.status.change(self.STATUS.STARTING)
self.starting()
def stop(self):
"""
Triggers the stop logic of a Resource by executing
:py:meth:`Resource.stopping <testplan.common.entity.base.Resource.stopping>`
method.
"""
self.status.change(self.STATUS.STOPPING)
if self.active:
self.stopping()
def _wait_started(self, timeout=None):
self.status.change(self.STATUS.STARTED)
def _wait_stopped(self, timeout=None):
self.status.change(self.STATUS.STOPPED)
def starting(self):
"""
Start logic for Resource that also sets the status to *STARTED*.
"""
raise NotImplementedError()
def stopping(self):
"""
Stop logic for Resource that also sets the status to *STOPPED*.
"""
raise NotImplementedError()
def pausing(self):
"""Pause the resource."""
self.status.change(self.status.PAUSED)
def resuming(self):
"""Resume the resource."""
self.status.change(self.status.STARTED)
def restart(self, timeout=None):
"""Stop and start the resource."""
self.stop()
self._wait_stopped(timeout=timeout)
self.start()
self._wait_started(timeout=timeout)
def __enter__(self):
self.start()
self.wait(self.STATUS.STARTED)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.wait(self.STATUS.STOPPED)
@property
def is_alive(self):
"""
Called to periodically poll the resource health. Default implementation
assumes the resource is always healthy.
"""
return True
def pending_work(self):
"""Resource has pending work."""
return False
class RunnableManagerConfig(EntityConfig):
"""
Configuration object for
:py:class:`RunnableManager <testplan.common.entity.base.RunnableManager>`
entity.
"""
@classmethod
def get_options(cls):
"""RunnableManager specific config options."""
return {
ConfigOption('parse_cmdline', default=True): bool,
ConfigOption('port', default=None):
Or(None,
And(Use(int),
lambda n: n > 0)),
ConfigOption('abort_signals', default=[
signal.SIGINT, signal.SIGTERM]): [int]
}
class RunnableManager(Entity):
"""
Executes a
:py:class:`Runnable <testplan.common.entity.base.Runnable>` entity
in a separate thread and handles the abort signals.
:param parse_cmdline: Parse command lne arguments.
:type parse_cmdline: ``bool``
:param port: TODO port for interactive mode.
:type port: ``bool``
:param abort_signals: Signals to catch and trigger abort.
:type abort_signals: ``list`` of signals
Also inherits all
:py:class:`~testplan.common.entity.base.Entity` options.
"""
CONFIG = RunnableManagerConfig
def __init__(self, **options):
super(RunnableManager, self).__init__(**options)
if self._cfg.parse_cmdline is True:
options = self._enrich_options(options)
self._runnable = self._initialize_runnable(**options)
def _enrich_options(self, options):
return options
def __getattr__(self, item):
try:
return self.__getattribute__(item)
except AttributeError:
if '_runnable' in self.__dict__:
return getattr(self._runnable, item)
raise
@property
def runpath(self):
"""Expose the runnable runpath."""
return self._runnable.runpath
@property
def cfg(self):
"""Expose the runnable configuration object."""
return self._runnable.cfg
@property
def status(self):
"""Expose the runnable status."""
return self._runnable.status
@property
def active(self):
"""Expose the runnable active attribute."""
return self._runnable.active
def run(self):
"""
Executes target runnable defined in configuration in a separate thread.
:return: Runnable result object.
:rtype: :py:class:`RunnableResult <testplan.common.entity.base.RunnableResult>`
"""
for sig in self._cfg.abort_signals:
signal.signal(sig, self._handle_abort)
execute_as_thread(self._runnable.run, daemon=True, join=True,
break_join=lambda: self.aborted is True)
if self._runnable.interactive is not None:
return self._runnable.interactive
if isinstance(self._runnable.result, Exception):
raise self._runnable.result
return self._runnable.result
def _initialize_runnable(self, **options):
runnable_class = self._cfg.runnable
runnable_config = dict(**options)
return runnable_class(**runnable_config)
def _handle_abort(self, signum, frame):
for sig in self._cfg.abort_signals:
signal.signal(sig, signal.SIG_IGN)
self.logger.debug('Signal handler called for signal {} from {}'.format(
signum, threading.current_thread()))
self.abort()
def pausing(self):
"""Pause the runnable execution."""
self._runnable.pause()
def resuming(self):
"""Resume the runnable execution."""
self._runnable.resume()
def abort_dependencies(self):
"""Dependencies to be aborted first."""
yield self._runnable
def aborting(self):
"""Suppressing not implemented debug log by parent class."""
pass
|
AudioProcessor.py
|
from CommonData import *
import matplotlib.pyplot as plt
import sys
import threading
import time
class AudioProcessor:
# Parameters of AudioProcessor that shouldn't be touched within the class
CHUNK = 1024
DEFAULT_ARRAY = np.zeros( CHUNK )
def __init__(self):
# Initialize AudioData() in self._data
self._data = AudioData()
self._loop_active = True
self._thread = None
return
# This function returns a numpy array which its values are dependent on its given arguments (np.array())
def _processData(self, data):
#data = -data
return data
# This is the main function that AudioProcessor.py performs
def _doProcessLoop(self):
# Initialize CHUNK * 2 to be used to compare values later
CHUNK2 = 2 * AudioProcessor.CHUNK
# Set default data for plotting
x = np.arange(0, AudioProcessor.CHUNK, 1)
data_in = AudioProcessor.DEFAULT_ARRAY
data_out = AudioProcessor.DEFAULT_ARRAY
# Enable plot interaction
plt.ion()
# Start two figures (one for input data and one for processed/output data)
fig1 = plt.figure()
fig2 = plt.figure()
# Add a sublot to get the axes object for both figures
ax1 = fig1.add_subplot(1, 1, 1)
ax2 = fig2.add_subplot(1, 1, 1)
# Plot the input/output data graph with the default values (blue for input and red for output),
# retrieve the line objects as well
line1, = ax1.plot(x, data_in, 'b--')
line2, = ax2.plot(x, data_out, 'r--')
# Set the y axis' range to be max = 4000 and min = -4000
ax1.set_ylim((4000, -4000))
ax2.set_ylim((4000, -4000))
# Main loop, breaking the loop gracefully (without ctrl-C) is by setting the _loop_active flag to False by an outside process
while (self._loop_active):
# Get the input data from CommonData.py
data_in = self._data.getDataIn()
if (data_in is None or len(data_in) != CHUNK2):
data_in = AudioProcessor.DEFAULT_ARRAY
# Get the output data (processed data) by passing data_in through self._processData
data_out = self._processData(data_in)
if (data_out is None or len(data_out) != CHUNK2):
data_out = AudioProcessor.DEFAULT_ARRAY
# Immediately sent output data to CommonData.py
self._data.setDataOut(data_out)
# Make the length of both arrays to be of length CHUNK by
# extracting the value of the array whose index is even.
data_in = data_in[::2]
data_out = data_out[::2]
# Set the y data of the graphs to its respective arrays
line1.set_ydata(data_in)
line2.set_ydata(data_out)
# Draw each figure's canvases
fig1.canvas.draw()
fig2.canvas.draw()
# Flush the events to erase it
fig1.canvas.flush_events()
fig2.canvas.flush_events()
# Have the process to sleep to ensure that the program doesn't hog CPU cycles
time.sleep(0.01)
# Close the plots
plt.close(fig1)
plt.close(fig2)
return
# This function performs the same purpose as AudioStream's startStreamLoop(self)
def startProcessLoop(self):
if (self._thread is not None):
if (self._thread.is_alive()):
return False
self._thread = None
self._loop_active = True
self._thread = threading.Thread(target=self._doProcessLoop)
self._thread.start()
return True
# This function performs the same purpose as AudioStream's haltStreamLoop(self)
def haltProcessLoop(self):
if (self._thread is None):
return False
elif (not self._thread.is_alive()):
self._thread = None
return False
self._loop_active = False
self._thread.join(timeout = 1)
self._thread = None
return
def _test():
audio_processor = AudioProcessor()
audio_processor.startProcessLoop()
input("blocked; press any key to exit")
audio_processor.haltProcessLoop()
return
if __name__ == "__main__":
_test()
input("blocked; press any key to continue")
_test()
|
util.py
|
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import subprocess
import os
import sys
import logging
import time
import settings
import re
import uuid
import requests_unixsocket
import requests
from threading import Thread
from threading import Event
# We use an integer for time and record microseconds.
SECONDSTOMICROSECONDS = 1000000
def all_subclasses(obj):
"""Used to introspect all descendents of a class. Used because metaclasses
are a PITA when doing multiple inheritance"""
sc_recr = []
for sc_obj in obj.__subclasses__():
sc_recr.append(sc_obj)
sc_recr.extend(all_subclasses(sc_obj))
return sc_recr
def time_str(dt):
return time.strftime("%Y-%m-%dT%H:%M:%S", dt.timetuple())
def sizeof_fmt(num):
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size/1094933#1094933
for x in ["bytes", "KB", "MB", "GB", "TB", "EB", "ZB", "YB"]:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
def sizeof_fmt_detailed(num):
for x in ["", "kB", "MB", "GB", "TB", "EB", "ZB", "YB"]:
if num < 1024.0 * 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return int(num)
def target_label_split(label):
"""
Split a target label into a tuple of it's parts: (fsname, target type, index)
"""
a = label.rsplit("-", 1)
if len(a) == 1:
# MGS
return (None, a[0][0:3], None)
return (a[0], a[1][0:3], int(a[1][3:], 16))
class timeit(object):
def __init__(self, logger):
self.logger = logger
def __call__(self, method):
from functools import wraps
@wraps(method)
def timed(*args, **kw):
if self.logger.level <= logging.DEBUG:
ts = time.time()
result = method(*args, **kw)
te = time.time()
print_args = False
if print_args:
self.logger.debug(
"Ran %r (%s, %r) in %2.2fs"
% (method.__name__, ", ".join(["%s" % (a,) for a in args]), kw, te - ts)
)
else:
self.logger.debug("Ran %r in %2.2fs" % (method.__name__, te - ts))
return result
else:
return method(*args, **kw)
return timed
class dbperf(object):
enabled = False
logger = logging.getLogger("dbperf")
def __init__(self, label=""):
# Avoid importing this at module scope in order
# to co-habit with chroma_settings()
from django.db import connection
self.connection = connection
self.label = label
self.logger.disabled = not self.enabled
if self.enabled and not len(self.logger.handlers):
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(logging.FileHandler("dbperf.log"))
def __enter__(self):
if settings.DEBUG:
self.t_initial = time.time()
self.q_initial = len(self.connection.queries)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
self.t_final = time.time()
self.q_final = len(self.connection.queries)
t = self.t_final - self.t_initial
q = self.q_final - self.q_initial
if q:
logfile = open("%s.log" % self.label, "w")
for query in self.connection.queries[self.q_initial :]:
logfile.write("(%s) %s\n" % (query["time"], query["sql"]))
logfile.close()
if q:
avg_t = int((t / q) * 1000)
else:
avg_t = 0
self.logger.debug("%s: %d queries in %.2fs (avg %dms)" % (self.label, q, t, avg_t))
self.q = q
def site_dir():
def _search_path(path):
if os.path.exists(os.path.join(path, "settings.py")):
return path
else:
if path == "/":
raise RuntimeError("Can't find settings.py")
else:
return _search_path(os.path.dirname(path))
return _search_path(os.path.dirname(__file__))
def chroma_settings():
"""
Walk back up parent directories until settings.py is found.
Insert that directory as the first entry in sys.path.
Import the settings module, then return it to the caller.
"""
sys.path.insert(0, site_dir())
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
import settings
return settings
class CommandError(Exception):
def __init__(self, cmd, rc, stdout, stderr):
self.cmd = cmd
self.rc = rc
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return """Command failed: %s
return code %s
stdout: %s
stderr: %s""" % (
self.cmd,
self.rc,
self.stdout,
self.stderr,
)
class CommandLine(object):
def try_shell(self, cmdline, mystdout=subprocess.PIPE, mystderr=subprocess.PIPE, stdin_text=None, shell=False):
rc, out, err = self.shell(cmdline, mystdout, mystderr, stdin_text, shell=shell)
if rc != 0:
raise CommandError(cmdline, rc, out, err)
else:
return rc, out, err
def shell(self, cmdline, mystdout=subprocess.PIPE, mystderr=subprocess.PIPE, stdin_text=None, shell=False):
if stdin_text is not None:
stdin = subprocess.PIPE
else:
stdin = None
p = subprocess.Popen(cmdline, stdout=mystdout, stderr=mystderr, stdin=stdin, shell=shell)
if stdin_text is not None:
p.stdin.write(stdin_text)
out, err = p.communicate()
rc = p.wait()
return rc, out, err
def normalize_nids(nid_list):
"""Cope with the Lustre and users sometimes calling tcp0 'tcp' to allow
direct comparisons between NIDs"""
return [normalize_nid(n) for n in nid_list]
def normalize_nid(string):
"""Cope with the Lustre and users sometimes calling tcp0 'tcp' to allow
direct comparisons between NIDs"""
if not re.search(r"\d+$", string):
string += "0"
# remove _ from nids (i.e. @tcp_0 -> @tcp0
i = string.find("_")
if i > -1:
string = string[:i] + string[i + 1 :]
return string
def runningInDocker():
with open("/proc/self/cgroup", "r") as procfile:
for line in procfile:
fields = line.strip().split("/")
if fields[1] == "docker":
return True
return False
def post_data_to_tcp_or_socket(post_data):
if runningInDocker():
return requests.post("http://{}:{}".format(settings.PROXY_HOST, settings.ACTION_RUNNER_PORT), json=post_data)
SOCKET_PATH = "http+unix://%2Fvar%2Frun%2Fiml-action-runner.sock/"
return requests_unixsocket.post(SOCKET_PATH, json=post_data)
def start_action_local_with_tcp_or_socket(command, args, request_id):
post_data = {"LOCAL": {"type": "ACTION_START", "action": command, "args": args, "id": str(request_id)}}
return post_data_to_tcp_or_socket(post_data)
def cancel_action_local_with_tcp_or_socket(request_id):
post_data = {"LOCAL": {"type": "ACTION_CANCEL", "id": str(request_id)}}
return post_data_to_tcp_or_socket(post_data)
def start_action_with_tcp_or_socket(host, command, args, request_id):
post_data = {"REMOTE": (host, {"type": "ACTION_START", "action": command, "args": args, "id": str(request_id)})}
return post_data_to_tcp_or_socket(post_data)
def cancel_action_with_tcp_or_socket(host, request_id):
post_data = {"REMOTE": (host, {"type": "ACTION_CANCEL", "id": str(request_id)})}
return post_data_to_tcp_or_socket(post_data)
class RustAgentCancellation(Exception):
pass
def invoke_rust_local_action(command, args={}, cancel_event=Event()):
"""
Talks to the iml-action-runner service
"""
request_id = uuid.uuid4()
trigger = Event()
class ActionResult:
ok = None
error = None
def start_action(ActionResult, trigger):
try:
ActionResult.ok = start_action_local_with_tcp_or_socket(command, args, request_id).content
except Exception as e:
ActionResult.error = e
finally:
trigger.set()
t = Thread(target=start_action, args=(ActionResult, trigger))
t.start()
# Wait for action completion, waking up every second to
# check cancel_event
while True:
if cancel_event.is_set():
cancel_action_local_with_tcp_or_socket(request_id).content
raise RustAgentCancellation()
else:
trigger.wait(timeout=1.0)
if trigger.is_set():
break
if ActionResult.error is not None:
raise ActionResult.error
else:
return ActionResult.ok
def invoke_rust_agent(host, command, args={}, cancel_event=Event()):
"""
Talks to the iml-action-runner service
"""
request_id = uuid.uuid4()
trigger = Event()
class ActionResult:
ok = None
error = None
def start_action(ActionResult, trigger):
try:
ActionResult.ok = start_action_with_tcp_or_socket(host, command, args, request_id).content
except Exception as e:
ActionResult.error = e
finally:
trigger.set()
t = Thread(target=start_action, args=(ActionResult, trigger))
t.start()
# Wait for action completion, waking up every second to
# check cancel_event
while True:
if cancel_event.is_set():
cancel_action_with_tcp_or_socket(host, request_id).content
raise RustAgentCancellation()
else:
trigger.wait(timeout=1.0)
if trigger.is_set():
break
if ActionResult.error is not None:
raise ActionResult.error
else:
return ActionResult.ok
|
main.py
|
# coding=utf-8
# 18:34 24/09/19 Project starts.
# 16:31 29/09/19 Entire project restructured.
# 23:40 11/10/19 First version (1.0) finished.
# 20:41 14/10/19 Version (1.0) debugged.
# 16:46 26/10/19 Project successfully modularized.
# 23:57 26/10/19 Version (1.1) finished and debugged.
# Generic imports
import os
import sys
import threading
from webbrowser import open as wopen
# Imported files
import egg
import size
import auto
# Tkinter imports
from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
from tkinter import ttk
class Main:
def __init__(self):
self.raiz = Tk()
# --------NORMAL VARIABLES--------------
self.raiz.geometry("660x525")
self.raiz.title("!bin - Keeping things simple!")
self.raiz.resizable(False, False)
self.raiz.iconbitmap(self.resource_path("assets/bin_small_cont.ico"))
self.hOffset = 115
self.vOffset = -95
self.dirEntryVar = StringVar()
self.manualDir = ""
self.checkVal = IntVar()
self.checkVal.set(1)
self.sizeVar = StringVar()
self.sizeVar.set("0.0 MB")
self.videoListLen = 0
# ----------INTERFACE INSTANCES----------------
self.frame1 = Frame(
self.raiz,
width=660,
height=620,
)
self.container2 = ttk.LabelFrame(
self.frame1,
width=600,
height=300,
text="Results"
)
self.frame2 = Frame(
self.frame1,
width=378,
height=242,
)
self.container1 = ttk.LabelFrame(
self.frame1,
width=470,
height=140,
text="Scan videos"
)
self.sizeLabel = ttk.Label(
self.frame1,
text="Memory occupied by videos:",
font=("Calibri", 10)
)
self.songDirLabel = ttk.Label(
self.frame1,
text="Custom 'Songs' folder:",
font=("Calibri", 11)
)
self.sizeLabelDyn = ttk.Label(
self.frame1,
textvariable=self.sizeVar,
font=("Calibri", 11),
)
self.authorLabel = ttk.Label(
self.frame1,
text="Axyss - 2019 ©",
font=("Calibri", 11)
)
self.checkBoxLabel = Label(
self.frame1,
text="Use default 'Songs' folder",
font=("Calibri", 11)
)
self.checkBox1 = ttk.Checkbutton(
self.frame1,
takefocus=False,
cursor="hand2",
variable=self.checkVal,
command=lambda: self.check_switch(),
onvalue=1,
offvalue=0
)
self.dirEntryWidget = ttk.Entry(
self.frame1,
width=50,
textvariable=self.dirEntryVar,
state="disabled"
)
self.browseButton = ttk.Button(
self.frame1,
text="Browse...",
width=13,
command=lambda: self.browse_window(),
state="disabled"
)
self.progressBar = ttk.Progressbar(
self.frame1,
orient="horizontal",
length=128,
mode="determinate",
maximum=99999
) # Here because must be rendered before the findVideosButton
self.findVideosButton = ttk.Button(
self.frame1,
text="Find videos",
width=20,
command=lambda: self.find_thread()
)
self.videoList = Listbox(
self.frame2,
width=72,
height=15,
borderwidth=0,
highlightthickness=1,
relief="solid",
highlightbackground="#A4A4A4",
)
self.yscrollVideo = ttk.Scrollbar(
self.frame2,
command=self.videoList.yview
)
self.xscrollVideo = ttk.Scrollbar(
self.frame2,
command=self.videoList.xview,
orient="horizontal"
)
self.videoList.config(yscrollcommand=self.yscrollVideo.set)
self.videoList.config(xscrollcommand=self.xscrollVideo.set)
self.deleteButton = ttk.Button(
self.frame1,
text="Delete videos",
width=15,
command=lambda: self.delete_thread()
)
# ---------------ICON SET-UP---------------
self.aminoBut = Button(self.frame1)
self.aminoIco = PhotoImage(file=self.resource_path("assets/amino_ico.png"))
self.aminoBut.config(
image=self.aminoIco,
border=0,
cursor="hand2",
relief="sunken",
takefocus=False,
command=lambda: wopen(
"https://aminoapps.com/c/osu-amino-2/join/"
)
)
self.twitterBut = Button(self.frame1)
self.twitterIco = PhotoImage(file=self.resource_path("assets/twitter_ico.png"))
self.twitterBut.config(
image=self.twitterIco,
border=0,
cursor="hand2",
relief="sunken",
takefocus=False,
command=lambda: wopen(
"https://twitter.com/Axyss_"
)
)
self.githubBut = Button(self.frame1)
self.githubIco = PhotoImage(file=self.resource_path("assets/github_ico.png"))
self.githubBut.config(
image=self.githubIco,
border=0,
cursor="hand2",
relief="sunken",
takefocus=False,
command=lambda: wopen(
"https://github.com/Axyss"
)
)
self.binBut = Button(self.frame1)
self.binIco = PhotoImage(file=self.resource_path("assets/bin_ico.png"))
self.binBut.config(
image=self.binIco,
border=0,
relief="sunken",
takefocus=False,
command=lambda: self.egg_run()
)
# ------------------------------MAIN METHODS------------------------------
def start(self):
self.reset() # Removes all info from previous executions
if (auto.auto_obj.choose_dir(self.checkVal.get(), self.dirEntryVar.get())) == "error":
return None # Stops the function in case some part of choose_dir() returns error
self.findVideosButton.config(state="disabled") # Disables the find videos button
self.deleteButton.config(state="disabled") # Disables the delete button
auto.auto_obj.check_dirs()
auto.auto_obj.filter_videos()
self.gen_listbox(auto.auto_obj.filtered_video_list) # Generates the listbox with the file names
self.sizeVar.set(size.size_obj.obtain_size(auto.auto_obj.filtered_video_list)) # Updates the file size label
self.findVideosButton.config(state="enabled") # Enables the find videos button
self.deleteButton.config(state="enabled") # Enables the Delete videos button
# Shows the appropriate window
if len(auto.auto_obj.filtered_video_list) == 0:
messagebox.showinfo("Information", "Congrats, you have no videos :)")
else:
messagebox.showinfo("Information", "Scanning completed!")
def browse_window(self):
"""Manages the custom window option"""
self.manualDir = filedialog.askdirectory(initialdir="/")
if self.manualDir != "":
os.chdir(self.manualDir)
print("Entering", self.manualDir)
self.dirEntryVar.set(self.manualDir)
def reset(self):
"""Calls all reset methods from any other
classes and resets the own class variables"""
size.size_obj.reset()
self.sizeVar.set("0.0 MB")
auto.auto_obj.reset()
self.videoList.delete(0, END)
def delete(self):
"""Deletes filtered beatmaps"""
if len(auto.auto_obj.filtered_video_list) == 0:
messagebox.showerror("Error", "First run a scan.")
return None
else:
decision = messagebox.askquestion(
"Warn",
"Are you sure you want to delete?\nThis action cannot be undone."+
"\n\nTip: make a backup of your 'Songs' folder first.",
icon='warning'
)
if decision == "yes":
for i in auto.auto_obj.filtered_video_list:
os.remove(i)
# print (f"File {i} removed.") # Debug
self.reset()
messagebox.showinfo(
"Information",
"All beatmap videos were successfully deleted."
)
else:
return None
def increase_progress(self, video_list):
"""Increases the progressbar value base on the lenght of the video list"""
a = 0
if a == 0: # Executes this part once
self.videoListLen = 100000/len(video_list) # Obtains how much to increase the progressbar each iteration.
a += 1
self.progressBar.step(self.videoListLen)
def gen_listbox(self, video_list):
"""Draw the parameter given in the listbox"""
for item in video_list:
pos1 = item.rfind("/") # Finds the first slash
sub_item = item[0:pos1] # Creates a subString from 0 to the first slash
pos2 = sub_item.rfind("/") # Finds the second slash using the last subString
final_item = item[pos2:] # Creates a subString from second slash to the end
self.videoList.insert(END, " " + final_item) # Sets the beatmap name in the listbox
self.increase_progress(video_list)
def check_switch(self):
"""Alternates the state of the checkbutton, entry and browse button"""
if self.checkVal.get() == 0:
self.dirEntryWidget.config(state="normal")
self.browseButton.config(state="normal")
else:
self.dirEntryWidget.config(state="disabled")
self.browseButton.config(state="disabled")
def find_thread(self):
"""Starts the function start() in a different thread."""
threading.Thread(
target=lambda: self.start(),
args=""
).start()
def delete_thread(self):
"""Starts the function delete() in a different thread"""
threading.Thread(
target=lambda: self.delete(),
args=""
).start()
def egg_run(self):
if egg.egg_obj.egg():
del self.binIco
del self.binBut
Label(self.frame1,
text="I'll be back\n -Binny",
font=("COMIC SANS MS", 15)
).place(
x=25,
y=55
)
def resource_path(self, relative_path):
"""Folder getter in case onefile mode is used in Pyinstaller"""
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
# -------------------------OSU!BIN GRAPHICAL DESIGN---------------------------
def render(self):
"""Renders the graphical interface"""
# Frames
self.frame1.pack()
self.frame2.place(x=55, y=300 + self.vOffset)
# Containers
self.container1.place(x=45 + self.hOffset, y=120 + self.vOffset)
self.container2.place(x=30, y=272 + self.vOffset)
# Labels
self.sizeLabel.place(x=10, y=595 + self.vOffset)
self.sizeLabelDyn.place(x=170, y=593 + self.vOffset)
self.authorLabel.place(x=550, y=594 + self.vOffset)
self.checkBoxLabel.place(x=110 + self.hOffset, y=148 + self.vOffset)
self.songDirLabel.place(x=60 + self.hOffset, y=190 + self.vOffset)
# Buttons
self.findVideosButton.place(x=368 + self.hOffset, y=148 + self.vOffset)
self.deleteButton.place(x=515, y=400 + self.vOffset)
self.browseButton.place(x=410 + self.hOffset, y=218 + self.vOffset)
# Icon buttons
self.binBut.place(x=13, y=25)
self.twitterBut.place(x=590, y=434)
self.githubBut.place(x=550, y=434)
# Scrollbars
self.yscrollVideo.grid(row=0, column=1, sticky="ns")
self.xscrollVideo.grid(row=1, column=0, sticky="ew")
# Misc
self.checkBox1.place(x=80 + self.hOffset, y=150 + self.vOffset)
self.dirEntryWidget.place(x=80 + self.hOffset, y=220 + self.vOffset)
self.videoList.grid(row=0, column=0)
self.progressBar.place(x=484, y=64)
# Unused
# self.aminoBut.place(x=508,y=432)
self.raiz.mainloop()
if __name__ == "__main__":
windowo = Main()
if os.name != "nt": # If the user does not use a Windows system, then the warn down below will be shown up.
messagebox.showinfo("Warn",
"Oh oh, looks like you are not using Windows,\n" +
"automatic folder detection may not work for you.\n" +
"Use the: 'custom Songs folder' option if this happens.",
icon='warning'
)
windowo.render()
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Version 3.2.3 is currently recommended when
SSL is enabled, since this version worked the best with SSL in
internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled.
Be aware that there is a known
`SSL error <https://github.com/cherrypy/cherrypy/issues/1298>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionaddedd:: 2016.11.0
log_error_file
Path to a file to write HTTP error logs.
.. versionaddedd:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`client interfaces <netapi-clients>`
documentation, but in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<netapi-clients>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import os
import signal
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy # pylint: disable=3rd-party-module-not-gated
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
raise cherrypy.HTTPError(403, 'Bad IP')
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except (cherrypy.TimeoutError, salt.exceptions.SaltClientTimeout):
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
if six.PY3:
response = salt.utils.to_bytes(response)
return response
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
if six.PY3:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
entity.fp.read(fp_out=contents)
contents.seek(0)
body_str = contents.read()
body_bytes = salt.utils.to_bytes(body_str)
body_bytes = six.BytesIO(body_bytes)
body_bytes.seek(0)
# Patch fp
entity.fp = body_bytes
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = six.StringIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = contents.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.cors_tool = cherrypy.Tool('before_request_body',
cors_tool, priority=50)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if cherrypy.session.get('user'):
chunk['__current_eauth_user'] = cherrypy.session.get('user')
if cherrypy.session.get('groups'):
chunk['__current_eauth_groups'] = cherrypy.session.get('groups')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.list_job' if jid else 'jobs.list_jobs',
'jid': jid,
}]
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns:
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
@cherrypy.config(**{'tools.salt_token.on': True})
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = six.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, six.StringIO(pub_key))
tarball.addfile(priv_key_file, six.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
cherrypy.session['user'] = token['name']
if 'groups' in token:
cherrypy.session['groups'] = token['groups']
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
if token['eauth'] == 'django' and '^model' in eauth:
perms = token['auth_list']
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>` Other than that this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of :term:`lowstate` data describing Salt commands must be
sent in the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronity into account when designing an application. Below are some
general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript appliction is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
# Late import
try:
from cherrypy.lib import cpstats
except ImportError:
logger.error('Import of cherrypy.cpstats failed. Possible '
'upstream bug here: https://github.com/cherrypy/cherrypy/issues/1444')
return {}
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
default_index = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'index.html'))
return cherrypy.lib.static.serve_file(
apiopts.get('app', default_index))
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
TestLooperServer.py
|
import collections
import json
import logging
import threading
import traceback
import base64
import time
import random
import socket
import test_looper.data_model.TestDefinition as TestDefinition
import test_looper.core.SimpleServer as SimpleServer
import test_looper.core.socket_util as socket_util
import test_looper.core.algebraic as algebraic
import test_looper.core.algebraic_to_json as algebraic_to_json
CLEANUP_TASK_FREQUENCY = 30
TerminalInputMsg = algebraic.Alternative("TerminalInputMsg")
TerminalInputMsg.KeyboardInput = {"bytes": str}
TerminalInputMsg.Resize = {"cols": int, "rows": int}
ServerToClientMsg = algebraic.Alternative("ServerToClientMsg")
ServerToClientMsg.IdentifyCurrentState = {}
ServerToClientMsg.TerminalInput = {'deploymentId': str, 'msg': TerminalInputMsg}
ServerToClientMsg.TestAssignment = {'testId': str, 'testDefinition': TestDefinition.TestDefinition }
ServerToClientMsg.CancelTest = {'testId': str}
ServerToClientMsg.AcknowledgeFinishedTest = {'testId': str}
ServerToClientMsg.DeploymentAssignment = {'deploymentId': str, 'testDefinition': TestDefinition.TestDefinition }
ServerToClientMsg.ShutdownDeployment = {'deploymentId': str}
ServerToClientMsg.GrantOrDenyPermissionToHitGitRepo = {'requestUniqueId': str, "allowed": bool}
ClientToServerMsg = algebraic.Alternative("ClientToServerMsg")
WorkerState = algebraic.Alternative("WorkerState")
WorkerState.Waiting = {}
WorkerState.WorkingOnDeployment = {'deploymentId': str, 'logs_so_far': str}
WorkerState.WorkingOnTest = {'testId': str, 'logs_so_far': str, 'artifacts': algebraic.List(str)}
WorkerState.TestFinished = {'testId': str, 'success': bool, 'testSuccesses': algebraic.Dict(str,(bool, bool)), 'artifacts': algebraic.List(str)} #testSuccess: name->(success,hasLogs)
ClientToServerMsg.CurrentState = {'machineId': str, 'state': WorkerState}
ClientToServerMsg.WaitingHeartbeat = {}
ClientToServerMsg.TestHeartbeat = {'testId': str}
ClientToServerMsg.ArtifactUploaded = {'testId': str, 'artifact': str}
ClientToServerMsg.TestLogOutput = {'testId': str, 'log': str}
ClientToServerMsg.DeploymentHeartbeat = {'deploymentId': str}
ClientToServerMsg.DeploymentExited = {'deploymentId': str}
ClientToServerMsg.DeploymentTerminalOutput = {'deploymentId': str, 'data': str}
ClientToServerMsg.TestFinished = {'testId': str, 'success': bool, 'testSuccesses': algebraic.Dict(str,(bool, bool)), 'artifacts': algebraic.List(str)} #testSuccess: name->(success,hasLogs)
ClientToServerMsg.RequestPermissionToHitGitRepo = {'requestUniqueId': str, 'curTestOrDeployId': str}
ClientToServerMsg.GitRepoPullCompleted = {'requestUniqueId': str}
SOCKET_CLEANUP_TIMEOUT = 360
class Session(object):
def __init__(self, server, testManager, machine_management, socket, address):
self.server = server
self.socket = socket
self.address = address
self.testManager = testManager
self.machine_management = machine_management
self.currentTestId = None
self.currentDeploymentId = None
self.socketLock = threading.Lock()
self.machineId = None
self.lastMessageTimestamp = time.time()
logging.info("Incoming Server Connection initialized.")
def stillLooksAlive(self):
"""Close socket if no traffic in a long time. Returns whether to keep polling..."""
try:
if time.time() - self.lastMessageTimestamp > SOCKET_CLEANUP_TIMEOUT:
logging.info("Clearing out socket for machine %s as we have not heard from it in %s seconds.", self.machineId, SOCKET_CLEANUP_TIMEOUT)
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
return False
return True
except:
logging.error("Exception clearing old socket: %s", traceback.format_exc())
return False
def __call__(self):
try:
self.send(ServerToClientMsg.IdentifyCurrentState())
while not self.server.shouldStop():
msg = algebraic_to_json.Encoder().from_json(
json.loads(self.readString()),
ClientToServerMsg
)
self.lastMessageTimestamp = time.time()
self.processMsg(msg)
except socket_util.SocketException as e:
logging.info("Socket error: %s", e.message)
except:
logging.error("Exception: %s", traceback.format_exc())
finally:
self.socket.close()
def send(self, msg):
self.writeString(json.dumps(algebraic_to_json.Encoder().to_json(msg)))
def processMsg(self, msg):
if msg.matches.CurrentState:
self.machineId = msg.machineId
logging.info("WorkerChannel initialized with machineId=%s", self.machineId)
self.testManager.machineInitialized(msg.machineId, time.time())
if msg.state.matches.WorkingOnDeployment:
deploymentId = msg.state.deploymentId
if not self.testManager.handleDeploymentConnectionReinitialized(deploymentId, time.time(), msg.state.logs_so_far):
self.send(ServerToClientMsg.ShutdownDeployment(deploymentId))
else:
self.currentDeploymentId = msg.state.deploymentId
def onMessage(msg):
if self.currentDeploymentId == deploymentId:
self.send(ServerToClientMsg.TerminalInput(deploymentId=deploymentId,msg=msg))
self.testManager.subscribeToClientMessages(deploymentId, onMessage)
elif msg.state.matches.WorkingOnTest:
if not self.testManager.handleTestConnectionReinitialized(msg.state.testId, time.time(), msg.state.logs_so_far, msg.state.artifacts):
self.send(ServerToClientMsg.CancelTest(msg.state.testId))
else:
self.currentTestId = msg.state.testId
elif msg.state.matches.TestFinished:
self.testManager.recordTestResults(msg.state.success, msg.state.testId, msg.state.testSuccesses, msg.state.artifacts, time.time())
self.send(ServerToClientMsg.AcknowledgeFinishedTest(msg.state.testId))
elif msg.matches.RequestPermissionToHitGitRepo:
if self.currentDeploymentId != msg.curTestOrDeployId and self.currentTestId != msg.curTestOrDeployId:
allowed = False
logging.warn("Denying git repo hit for unknown test/deploy id %s", msg.curTestOrDeployId)
else:
try:
allowed = self.testManager.tryToAllocateGitRepoLock(msg.requestUniqueId, self.currentDeploymentId or self.currentTestId)
except:
logging.error("Allocating git repo lock failed!\n:%s", traceback.format_exc())
allowed = False
self.send(ServerToClientMsg.GrantOrDenyPermissionToHitGitRepo(requestUniqueId=msg.requestUniqueId, allowed=allowed))
elif msg.matches.GitRepoPullCompleted:
self.testManager.gitRepoLockReleased(msg.requestUniqueId)
elif msg.matches.WaitingHeartbeat:
if self.machineId is None:
return
self.testManager.machineHeartbeat(self.machineId, time.time())
if self.currentDeploymentId is None and self.currentTestId is None:
deploymentId, testDefinition = self.testManager.startNewDeployment(self.machineId, time.time())
if deploymentId is not None:
self.currentDeploymentId = deploymentId
self.send(
ServerToClientMsg.DeploymentAssignment(
deploymentId=deploymentId,
testDefinition=testDefinition
)
)
def onMessage(msg):
if self.currentDeploymentId == deploymentId:
self.send(ServerToClientMsg.TerminalInput(deploymentId=deploymentId,msg=msg))
self.testManager.subscribeToClientMessages(deploymentId, onMessage)
else:
t0 = time.time()
testId, testDefinition = self.testManager.startNewTest(self.machineId, time.time())
if testId is not None:
self.currentTestId = testId
self.send(
ServerToClientMsg.TestAssignment(
testId=testId,
testDefinition=testDefinition
)
)
logging.info("Allocated new test %s to machine %s in %s seconds.", testId, self.machineId, time.time() - t0)
elif msg.matches.ArtifactUploaded:
if msg.testId == self.currentTestId:
self.testManager.recordTestArtifactUploaded(self.currentTestId, msg.artifact, time.time(), isCumulative=False)
elif msg.matches.TestHeartbeat or msg.matches.TestLogOutput:
if msg.matches.TestHeartbeat:
log = None
else:
log = msg.log
if msg.testId == self.currentTestId:
if not self.testManager.testHeartbeat(msg.testId, time.time(), log):
logging.info("Server canceling test %s on machine %s", msg.testId, self.machineId)
self.send(ServerToClientMsg.CancelTest(testId=msg.testId))
self.currentTestId = None
elif msg.matches.DeploymentExited:
if msg.deploymentId == self.currentDeploymentId:
self.testManager.shutdownDeployment(msg.deploymentId, time.time())
self.send(ServerToClientMsg.ShutdownDeployment(msg.deploymentId))
self.currentDeploymentId = None
elif msg.matches.DeploymentHeartbeat or msg.matches.DeploymentTerminalOutput:
log = msg.data if msg.matches.DeploymentTerminalOutput else None
if msg.deploymentId == self.currentDeploymentId:
if not self.testManager.handleMessageFromDeployment(msg.deploymentId, time.time(), log):
self.send(ServerToClientMsg.ShutdownDeployment(msg.deploymentId))
self.currentDeploymentId = None
elif msg.matches.TestFinished:
self.testManager.recordTestResults(msg.success, msg.testId, msg.testSuccesses, msg.artifacts, time.time())
self.currentTestId = None
self.send(ServerToClientMsg.AcknowledgeFinishedTest(msg.testId))
def readString(self):
return socket_util.readString(self.socket)
def writeString(self, s):
with self.socketLock:
return socket_util.writeString(self.socket, s)
class TestLooperServer(SimpleServer.SimpleServer):
#if we modify this protocol version, the loopers should reboot and pull a new copy of the code
protocolVersion = '2.2.6'
def __init__(self, server_ports, testManager, httpServer, machine_management):
"""
Initialize a TestLooperServer
"""
if httpServer.certs is not None:
cert_and_keyfile = (httpServer.certs.cert, httpServer.certs.key)
else:
cert_and_keyfile = None
SimpleServer.SimpleServer.__init__(self, server_ports.server_worker_port, cert_and_key_paths = cert_and_keyfile)
self.port_ = server_ports.server_worker_port
self.testManager = testManager
self.httpServer = httpServer
self.machine_management = machine_management
self.workerThread = threading.Thread(target=self.executeManagerWork)
self.workerThread.daemon=True
self.sessions = []
def executeManagerWork(self):
try:
lastSweep = None
while not self.shouldStop():
task = self.testManager.performBackgroundWork(time.time())
if lastSweep is None or time.time() - lastSweep > CLEANUP_TASK_FREQUENCY:
lastSweep = time.time()
try:
self.testManager.performCleanupTasks(time.time())
except:
logging.critical("Test manager failed during cleanup:\n%s", traceback.format_exc())
if task:
logging.info("Performed %s", task)
if task is None:
time.sleep(.1)
except:
logging.critical("Manager worker thread exiting:\n%s", traceback.format_exc())
finally:
logging.info("Manager worker thread exited")
def port(self):
return self.port_
def initialize(self):
logging.info("Initializing TestManager.")
self.testManager.markRepoListDirty(time.time())
#start something to touch all the objects we can reach in the
#background
touchAllThread = threading.Thread(
target=self.testManager.touchAllTestsAndRuns,
args=(time.time(),)
)
touchAllThread.daemon=True
touchAllThread.start()
try:
self.testManager.pruneDeadWorkers(time.time())
except:
logging.error("Server had an exception during initialization:\n%s", traceback.format_exc())
try:
self.testManager.checkAllTestPriorities(time.time(), resetUnbootable=False)
except:
logging.error("Server had an exception during initialization:\n%s", traceback.format_exc())
logging.info("DONE Initializing TestManager.")
def runListenLoop(self):
logging.info("Starting TestLooperServer listen loop")
self.httpServer.start()
logging.info("HTTP server started")
try:
self.initialize()
logging.info("TestLooper initialized")
self.workerThread.start()
super(TestLooperServer, self).runListenLoop()
finally:
self.httpServer.stop()
logging.info("Listen loop stopped")
def stop(self):
super(TestLooperServer, self).stop()
logging.info("waiting for worker thread...")
self.workerThread.join()
logging.info("successfully stopped TestLooperServer")
def _onConnect(self, socket, address):
logging.debug("Accepting connection from %s", address)
newSession = Session(
self,
self.testManager,
self.machine_management,
socket,
address
)
self.sessions.append(newSession)
self.sessions = [
x for x in self.sessions if x.stillLooksAlive()
]
logging.info("Creating new session with %s sessions alive", len(self.sessions))
threading.Thread(target=newSession).start()
|
grab_api.py
|
# coding: utf-8
import six
from tests.util import build_grab, temp_file
from tests.util import BaseGrabTestCase
from tests.util import reset_request_counter
from grab import GrabMisuseError, GrabError
class GrabApiTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_incorrect_option_name(self):
grab = build_grab()
self.assertRaises(GrabMisuseError, grab.setup,
save_the_word=True)
def test_clone(self):
grab = build_grab()
self.server.response['get.data'] = 'Moon'
grab.go(self.server.get_url())
self.assertTrue(b'Moon' in grab.doc.body)
self.server.response['post.data'] = 'Foo'
grab2 = grab.clone(method='post', post='')
grab2.go(self.server.get_url())
self.assertTrue(b'Foo' in grab2.doc.body)
def test_empty_clone(self):
grab = build_grab()
grab.clone()
def test_adopt(self):
grab = build_grab()
self.server.response['get.data'] = 'Moon'
grab.go(self.server.get_url())
grab2 = build_grab()
self.assertEqual(grab2.config['url'], None)
grab2.adopt(grab)
self.assertTrue(b'Moon' in grab2.doc.body)
self.assertEqual(grab2.config['url'], self.server.get_url())
def test_empty_adopt(self):
grab = build_grab()
grab2 = build_grab()
grab2.adopt(grab)
def test_default_content_for_fake_response(self):
content = b'<strong>test</strong>'
grab = build_grab(document_body=content)
self.assertEqual(grab.doc.body, content)
def test_inheritance(self):
from grab import Grab
class SimpleExtension(object):
data = {'counter': 0}
@classmethod
def get_data(cls):
return cls.data
class CustomGrab(Grab, SimpleExtension):
pass
SimpleExtension.get_data()['counter'] = 0
CustomGrab()
# self.assertEqual(SimpleExtension.get_data()['counter'], 1)
class VeryCustomGrab(CustomGrab):
pass
SimpleExtension.get_data()['counter'] = 0
VeryCustomGrab()
# self.assertEqual(SimpleExtension.get_data()['counter'], 1)
def test_request_counter(self):
import threading
reset_request_counter()
grab = build_grab()
grab.go(self.server.get_url())
self.assertEqual(grab.request_counter, 1)
grab.go(self.server.get_url())
self.assertEqual(grab.request_counter, 2)
def func():
grab = build_grab()
grab.go(self.server.get_url())
# Make 10 requests in concurrent threads
threads = []
for _ in six.moves.range(10):
thread = threading.Thread(target=func)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
grab.go(self.server.get_url())
self.assertEqual(grab.request_counter, 13)
def test_download(self):
with temp_file() as save_file:
grab = build_grab()
self.server.response['get.data'] = 'FOO'
length = grab.download(self.server.get_url(), save_file)
self.assertEqual(3, length)
def test_make_url_absolute(self):
grab = build_grab()
self.server.response['get.data'] = '<base href="http://foo/bar/">'
grab.go(self.server.get_url())
absolute_url = grab.make_url_absolute('/foobar', resolve_base=True)
self.assertEqual(absolute_url, 'http://foo/foobar')
grab = build_grab()
absolute_url = grab.make_url_absolute('/foobar')
self.assertEqual(absolute_url, '/foobar')
def test_error_request(self):
grab = build_grab()
grab.setup(post={'foo': 'bar'})
self.assertRaises(GrabError, grab.go,
url='Could-not-resolve-host-address')
self.assertEqual(grab.config['post'], None)
self.assertEqual(grab.config['multipart_post'], None)
self.assertEqual(grab.config['method'], None)
self.assertEqual(grab.config['body_storage_filename'], None)
def test_setup_document(self):
data = b'''
<h1>test</h1>
'''
grab = build_grab(data)
self.assertTrue(b'test' in grab.doc.body)
def test_setup_document_invalid_input(self):
data = u'''
<h1>test</h1>
'''
self.assertRaises(GrabMisuseError, build_grab, data)
|
conduct.py
|
from hatano.util import Conf
from hatano.iam import IamRole
from hatano.lmbda import Lambda
from hatano.apigateway import RestApi
from hatano.acm import Cert
from hatano.route53 import Route53
from hatano.s3 import S3
from hatano.cloudfront import CloudFront
from hatano.errors import HatanoError
import os
import threading
import time
class Conductor:
def __init__(self, args):
self.args = args
c = Conf()
if not c.exists():
raise HatanoError("No config file found")
self.conf = c.read()
self.stage = args.stage
self.project = self.conf["project"]
stages = self.conf.get("stage", {})
self.stg_conf = stages.get(self.stage, {})
self.functions = []
if args.function:
self.functions = args.function
else:
self.functions = self.conf.get("function", {}).keys()
self.api = None
self.s3_path = self.stg_conf.get("bucket")
self.s3 = None
self.certified = False
self.domain = ""
self.cdnlink = ""
if self.s3_path:
self.s3 = S3(self.s3_path, self.project, self.stage)
if not self.stg_conf:
raise HatanoError(f"Stage {self.stage} not defined")
def update(self):
if self.args.bucket:
self.deploy_s3()
self.update_funcs()
self.finish()
def update_funcs(self):
self.create_api()
new = False
threads = []
for fname in self.functions:
print(f"Updating function {fname}")
try:
self.update_func(fname)
except:
print(f"Function {fname} doesn't exist. Creating...")
t = threading.Thread(target=self.deploy_func, args=(fname,))
threads.append(t)
t.start()
#self.deploy_func(fname)
new = True
for t in threads:
t.join()
if new:
self.deploy_api()
def update_func(self, name):
fn = self.conf["function"][name]
fn["name"] = name
if "env" not in fn:
fn["env"] = {}
if self.s3:
fn["env"]["DEFAULT_BUCKET"] = self.s3.name()
if self.cdnlink:
fn["env"]["CDN_LINK"] = self.cdnlink
lmb = Lambda(self.stage, fn)
lmb.update_function()
def deploy(self):
self.deploy_s3()
self.create_api()
self.deploy_funcs()
self.deploy_api()
self.deploy_domain()
self.finish()
def deploy_s3(self):
if self.s3:
try:
print(f"Creating s3 bucket {self.s3.name()}")
self.s3.create()
except Exception as e:
print(f"Failed: {e}")
try:
print(f"Uploading contents of {self.s3_path} to s3 bucket {self.s3.name()}")
self.s3.upload_all()
except Exception as e:
print(f"Failed: {e}")
public_dir = "public"
public_path = os.path.join(self.s3_path, public_dir)
if os.path.isdir(public_path):
cf = CloudFront(self.project, self.stage, path=f"/{public_dir}")
dist = cf.create_distribution_s3(self.s3.name())
#print(dist)
self.cdnlink = dist['DomainName']
access_id = dist['DistributionConfig']['Origins']['Items'][0]['S3OriginConfig']['OriginAccessIdentity']
time.sleep(10)
self.s3.put_policy(access_id)
#self.s3.put_cors()
def create_api(self):
# Create REST API
print(f"Creating REST API for {self.project}")
self.api = RestApi(self.project)
def deploy_api(self):
# Deploy the API
print(f"Deploying API stage {self.stage}")
self.api.deploy(self.stage)
def deploy_funcs(self):
# Create each function and link to and endpoint
threads = []
for fname in self.functions:
print(f"Deploying function {fname}")
t = threading.Thread(target=self.deploy_func, args=(fname,))
threads.append(t)
t.start()
#self.deploy_func(fname)
for t in threads:
t.join()
def deploy_func(self, name):
if not self.api:
self.api = RestApi(self.stage)
fn = self.conf["function"][name]
fn["name"] = name
fn["runtime"] = self.conf["runtime"]
if self.s3:
if "env" not in fn:
fn["env"] = {}
fn["env"]["DEFAULT_BUCKET"] = self.s3.name()
fullname = f"{self.project}-{name}-{self.stage}"
http_method = fn.get("method", "")
http_path = fn.get("path")
# Create iam role
print(f" - Creating IAM role ({name})")
iam = IamRole(self.stage, fn)
role = iam.lambda_role()
iam.put_custom_policy()
role_arn = role['Role']['Arn']
# Create lambda
print(f" - Creating lambda ({name})")
lmb = Lambda(self.stage, fn, role_arn=role_arn)
func = lmb.create_function()
func_arn = func['FunctionArn']
lmb.add_permission("apigateway", "InvokeFunction")
# Create resource and endpoint
print(f" - Linking endpoint to lambda ({name})")
resource = self.api.create_resource(http_path)
resource.link_endpoint(http_method, func_arn)
def deploy_domain(self):
domain = self.stg_conf.get("domain")
cert = self.stg_conf.get("cert")
self.domain = domain
# Create domain name
if domain and cert:
print(f"Creating custom domain name {domain}")
cert = Cert(cert)
try:
r = self.api.create_domain(domain, cert.arn)
except Exception as e:
print("Error creating domain", e)
return
cloudfront = r['distributionDomainName']
r53 = Route53()
print("Creating cname record")
try:
r53.add_cname_record(domain, cloudfront)
except Exception as e:
print("Error adding cname record", e)
return
try:
self.api.create_base_path_mapping(domain, "", self.stage)
except Exception as e:
print("Error creating base path mapping", e)
return
self.certified = True
def finish(self):
# Output
print("-"*20)
print("Project:", self.project)
print("Stage:", self.stage)
perm_url = f"{self.api.url}/{self.stage}"
if self.certified:
print(f"https://{self.domain} ({perm_url})")
else:
print(perm_url)
|
core.py
|
import threading
import subprocess
import logging
import time
import os
import traceback
import errno
from collections import namedtuple, deque
from itertools import chain
import psutil
import conf
import client
import utils
import rpc.ttypes as ttypes
from profile import SystemProfiler as _SystemProfiler
logger = logging.getLogger(__name__)
__all__ = ['Profiler', 'ResourceMgr', 'ProcessMgr']
_RunningProc = namedtuple("RunningProc", "processCmd pthread cpus")
#
# _ResourceManager
#
class _ResourceManager(object):
"""
The ResourceManager keeps track of the bookable resources on the
machine. This is currently just cores, but memory and GPUS
in the future.
"""
def __init__(self):
self.__slots = deque(xrange(Profiler.physicalCpus))
self.__slots_all = tuple(self.__slots)
self.__lock = threading.RLock()
logger.info("Intializing resource manager with %d physical cores.",
Profiler.physicalCpus)
def checkout(self, numCores):
if numCores < 1:
raise ttypes.RndException(1, "Cannot reserve 0 slots")
result = []
with self.__lock:
open_slots = self.__slots
logger.info("Open slots: %s", list(open_slots))
if numCores > len(open_slots):
raise ttypes.RndException(1, "No more open slots")
result = [open_slots.pop() for _ in xrange(numCores)]
logger.info("Checked out CPUS: %s", result)
return result
def checkin(self, cores):
with self.__lock:
self.__slots.extend(cores)
avail, total = len(self.__slots), Profiler.physicalCpus
logger.info("Checked in CPUS: %s; Now available: %d / %d", cores, avail, total)
def getSlots(self):
return list(xrange(Profiler.physicalCpus))
def getOpenSlots(self):
with self.__lock:
return list(self.__slots)
#
# _ProcessManager
#
class _ProcessManager(object):
"""
The ProcessManager keeps track of the running tasks. Each task
is executed in a separate ProcessThread.
"""
SAMPLE_INTERVAL_SEC = 10
def __init__(self):
self.__threads = {}
self.__lock = threading.RLock()
self.__timer = None
self.__isReboot = threading.Event()
self.__isShutdown = threading.Event()
self.__sampler = threading.Thread(target=self._processSampler)
self.__sampler.daemon = True
self.__sampler.start()
self.sendPing(True)
@property
def isReboot(self):
return self.__isReboot.is_set()
def runProcess(self, processCmd, wait=-1):
"""
Takes a RunTaskCommand object, reserves resources,
and starts the process. Default mode is to return None
Optionally, a wait time may be specified in float
seconds, to wait until the job has fully started,
before returning. If wait > -1, return a RunningTask object
"""
cpus = ResourceMgr.checkout(processCmd.cores)
pthread = _ProcessThread(processCmd, cpus)
with self.__lock:
self.__threads[processCmd.procId] = _RunningProc(processCmd, pthread, cpus)
pthread.start()
logger.info("process thread started")
if wait == -1:
return
task = pthread.getRunningTask(wait)
return task
def processFinished(self, processResult, cpus=None):
"""
Callback for when a process has finished running.
Receives the RunTaskResult object.
Deallocates the resources.
"""
with self.__lock:
if cpus is None:
cpus = self.__threads[processResult.procId].cpus
ResourceMgr.checkin(cpus)
try:
del self.__threads[processResult.procId]
except Exception, e:
logger.warn("Process %s not found: %s", processResult.procId, e)
def sendPing(self, isReboot=False, repeat=True):
"""
Ping into the server with current task and resource states.
If repeat is True, schedules another ping at an interval defined
by the rndaemon config.
"""
if self.__isShutdown.is_set():
repeat = False
# TODO: What is the purpose of the isReboot flag?
# Using the internal flag to determine if we are in a
# reboot state.
isReboot = self.__isReboot.is_set()
tasks = self.getRunningTasks()
Profiler.sendPing(tasks, isReboot)
# TODO: Maybe there needs to be a seperate thread for this check
# but for now it is part of the ping loop.
if isReboot and not tasks:
logger.info("Task queue is empty and daemon is scheduled for reboot")
try:
Profiler.reboot()
except ttypes.RndException, e:
# on next loop, the server will see that the system
# is no longer in isReboot state
logger.warn(e.why)
self.__isReboot.clear()
else:
# just in case
return
if repeat:
self.__timer = threading.Timer(conf.NETWORK_PING_INTERVAL, self.sendPing)
self.__timer.daemon = True
self.__timer.start()
def killRunningTask(self, procId, reason):
"""
Kill a currently running task by its procId.
"""
logger.info("kill requested for procId %s, %s", procId, reason)
with self.__lock:
try:
pthread = self.__threads[procId].pthread
except KeyError:
err = "Process %s not found" % procId
logger.warn(err)
# TODO: Raise a proper exception type? or
# fail quietly?
raise ttypes.RndException(1, err)
_, not_killed = pthread.killProcess(reason=reason)
if not_killed:
err = "Failed to kill the following pids for prodId %s: %s" % \
(procId, ','.join(not_killed))
logger.warn(err)
raise ttypes.RndException(1, err)
def getRunningTasks(self):
""" Get a list of all running task objects """
with self.__lock:
tasks = [t.pthread.getRunningTask() for t in self.__threads.itervalues()]
return tasks
def shutdown(self):
"""
Gracefully shut down all running tasks so they can report back in
"""
logger.debug("Shutdown requested for process manager.")
self.__isShutdown.set()
with self.__lock:
threads = [proc.pthread for proc in self.__threads.itervalues()]
for t in threads:
t.shutdown()
logger.debug("Asked %d tasks to quit and report. Waiting for them to complete", len(threads))
for t in threads:
if not t.wait(10):
logger.warn("Thread failed to close down after waiting 10 seconds: %r", t)
self.__threads.clear()
del threads
logger.debug("Done waiting on task shutdown")
def reboot(self, now=False):
"""
reboot (bool now=False)
Reboot the system as soon as it becomes idle. That is,
when no tasks are running.
If now == True, reboot immediately, regardless of any
in-progress render tasks.
"""
# TODO: For now, assuming that even if they aren't root,
# that they may have permission to reboot. This means a
# reboot(now=False) will not raise an exception to the caller.
#
# if os.geteuid() != 0:
# err = "rndaemon not running as user with permission to reboot system"
# raise ttypes.RndException(1, err)
self.__isReboot.set()
if now:
logger.info("*SYSTEM GOING DOWN FOR IMMEDIATE REBOOT*")
# stop all of the tasks
self.shutdown()
with self.__lock:
if self.__timer:
self.__timer.cancel()
# The reboot could happen from the ping if the task
# queue is empty.
self.sendPing(repeat=False)
# Otherwise, the reboot will happen here, regardless
# of whether there are active tasks running.
Profiler.reboot()
else:
logger.info("*Reboot scheduled at next idle event*")
def _processSampler(self):
"""
Loop that updates metrics on every running process
at intervals.
"""
while not self.__isShutdown.is_set():
with self.__lock:
pthreads = [t.pthread for t in self.__threads.itervalues()]
for pthread in pthreads:
pthread.updateMetrics()
time.sleep(self.SAMPLE_INTERVAL_SEC)
#
# RunningTask
#
class RunningTask(ttypes.RunningTask):
"""
Subclass of ttypes.RunningTask that adjusts the
__repr__ to only print a reduces amount of the last
log line string.
"""
def __repr__(self):
D = self.__dict__.copy()
# elide the log string if its too big
lastLog = D.get('lastLog')
if lastLog and len(lastLog) > 50:
D['lastLog'] = '%s...' % lastLog[:47]
L = ('%s=%r' % (key, value) for key, value in D.iteritems())
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
#
# _ProcessThread
#
class _ProcessThread(threading.Thread):
"""
The _ProcessThread wraps a running task.
"""
_DO_DISK_IO = hasattr(psutil.Process, "get_io_counters")
def __init__(self, rtc, cpus=None):
threading.Thread.__init__(self)
self.daemon = True
self.__logfp = None
self.__cpus = cpus or set()
self.__rtc = rtc
self.__pptr = None
self.__logfp = None
self.__pid = -1
self.__killThread = None
self.__wasKilled = threading.Event()
self.__hasStarted = threading.Event()
self.__isShutdown = threading.Event()
self.__progress = 0.0
self.__lastLog = ""
self.__killReason = ""
self.__metrics = {
'rssMb': 0,
'maxRssMb': 0,
'cpuPercent': 0,
'diskIO': ttypes.DiskIO(-1,-1,-1,-1),
}
def __repr__(self):
return "<%s: (procId: %s, pid: %d)>" % (
self.__class__.__name__,
self.__rtc.procId,
self.__pid)
def shutdown(self):
"""
Instruct the process to shutdown gracefully.
Returns the same output as killProcess()
"""
logger.debug("Shutdown request received. Killing %r", self)
self.__isShutdown.set()
self.killProcess(block=False, reason="rndaemon shutdown request received")
def wait(self, timeout=None):
"""
Waits for the process to finish.
By default, blocks indefinitely. Specify a
timeout in float seconds to wait. If the timeout
value is exceeded, return False
Returns True if the task ended.
"""
self.join(timeout)
return not self.isAlive()
def getRunningTask(self, wait=-1):
"""
getRunningTask(float wait=-1) -> RunningTask
Returns a RunningTask instance representing
the current state of the task.
If wait > 0, then wait that many seconds for
the process to start. This is useful if you are
creating the process and then checking its running
task right away. Some information may not be
available until after the thread has gotten the
process running.
"""
if wait > 0:
self.__hasStarted.wait(wait)
rt = RunningTask()
rtc = self.__rtc
rt.jobId = rtc.jobId
rt.procId = rtc.procId
rt.taskId = rtc.taskId
rt.layerId = rtc.layerId
rt.pid = self.__pid
metrics = self.__metrics
rt.rssMb = metrics['rssMb']
rt.cpuPercent = metrics['cpuPercent']
if self._DO_DISK_IO:
rt.diskIO = metrics['diskIO']
rt.progress = self.__progress
rt.lastLog = self.__lastLog or None
return rt
def run(self):
"""
Run method called implicitely by start()
Fires up the process to do the actual task.
Logs output, and records resource metrics.
"""
rtc = self.__rtc
retcode = 1
try:
uid = self.__rtc.uid
cpus = self.__cpus
logger.info("Opening log file: %s", rtc.logFile)
self.__logfp = utils.ProcessLog(self.__rtc.logFile, uid=uid, buffering=1)
self.__logfp.writeLogHeader(rtc)
env = os.environ.copy()
env.update(rtc.env)
parser = None
if rtc.taskTypes:
parser = utils.ProcessLogParser.fromTaskTypes(rtc.taskTypes)
if not parser.progress:
parser = None
opts = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'uid': uid,
'cpus': cpus,
'env': env,
}
cmd, opts = Profiler.getSubprocessOpts(rtc.command, **opts)
logger.info("Running command: %s", rtc.command)
self.__logfp.write("[%s] Running process" % time.strftime("%Y-%m-%d %H:%M:%S"))
self.__logfp.flush()
p = subprocess.Popen(cmd, **opts)
self.__pptr = p
self.__pid = p.pid
self.__hasStarted.set()
logger.info("PID: %d", p.pid)
self.updateMetrics()
writeLog = self.__logfp.write
r_pipe = self.__pptr.stdout
for line in iter(r_pipe.readline, ""):
writeLog(line)
self.__lastLog = line
if parser:
prog = parser.parseProgress(line)
if prog is not None:
self.__progress = prog
if self.__isShutdown.is_set():
break
self.__logfp.write("[%s] Process finished" % time.strftime("%Y-%m-%d %H:%M:%S"))
self.__logfp.flush()
try:
retcode = p.wait()
except OSError, e:
if e.errno != errno.ECHILD:
if not self.__isShutdown.is_set():
raise
r_pipe.close()
logger.debug("Return code: %s", retcode)
except Exception, e:
if self.__isShutdown.is_set():
logger.debug("Thread detected shutdown request. Leaving gracefully.")
else:
logger.warn("Failed to execute command: %s", e)
logger.debug(traceback.format_exc())
finally:
self.__completed(retcode)
def updateMetrics(self):
"""
updateMetrics()
Resample information about the currently running
process tree, and update member attributes.
i.e. rss
"""
# logger.debug("updateMetrics(): %r", self)
rss_bytes = 0
cpu_perc = 0
do_disk_io = self._DO_DISK_IO
if do_disk_io:
disk_io = [0,0,0,0]
try:
root_pid = self.__pid
p = psutil.Process(root_pid)
for proc in chain([p], p.children(True)):
this_pid = proc.pid
if proc.status == psutil.STATUS_ZOMBIE:
continue
try:
rss_bytes += proc.memory_info().rss
except psutil.Error, e:
logger.debug("Error while getting memory data for pid %r: %s", this_pid, e)
try:
cpu_perc += proc.cpu_percent(None)
except psutil.Error, e:
logger.debug("Error while getting cpu data for pid %r: %s", this_pid, e)
if do_disk_io:
try:
counters = proc.io_counters()
except psutil.Error, e:
logger.debug("Error while getting disk io data for pid %r: %s", this_pid, e)
else:
for i, val in enumerate(counters):
disk_io[i] += val
except psutil.NoSuchProcess, e:
return
cpu_perc_int = int(round(cpu_perc))
rssMb = rss_bytes / 1024 / 1024
metrics = self.__metrics
maxRss = max(rssMb, metrics['maxRssMb'])
disk_io_t = ttypes.DiskIO(*disk_io) if do_disk_io else None
metrics.update({
'rssMb': rssMb,
'maxRssMb': maxRss,
'cpuPercent': cpu_perc_int,
'diskIO': disk_io_t,
})
logger.debug("metrics: %r", metrics)
def killProcess(self, block=True, reason=''):
"""
killProcess(bool block=True, reason='') -> (list killed_pids, list not_killed)
Stop the entire process tree
Returns a tuple of two lists. The first list contains
the pids from the process tree that were successfully
stopped. The second list contains pids that were not
able to be stopped successfully.
By default the call blocks until the attempt to kill
has completed. Set block=False to issue the kill async.
If the reason for killing the process is passes as a string,
it will be added to the log footer.
"""
self.__killReason = reason
if block:
return self.__killProcess()
# guards against repeat calls to kill while one async
# call is already running
if self.__killThread and self.__killThread.isAlive():
return
t = threading.Thread(target=self.__killProcess)
t.start()
self.__killThread = t
return
def __killProcess(self):
pid = self.__pid
if pid == -1:
return
try:
p = psutil.Process(pid)
except psutil.NoSuchProcess:
return
children = p.get_children(recursive=True)
self.__wasKilled.set()
# kill the top parent
self.__killOneProcess(p)
# make sure each process in the tree is really dead
killed = []
not_killed = []
for child in children:
success = self.__killOneProcess(child)
if success:
killed.append(child.pid)
else:
not_killed.append(child.pid)
return killed, not_killed
def __killOneProcess(self, p):
"""
__killOneProcess(psutil.Process p) -> bool
Try and nicely stop a Process first, then kill it.
Return True if process was killed.
"""
try:
try:
p.wait(0.001)
except psutil.TimeoutExpired:
pass
if not p.is_running():
return True
pid = p.pid
logger.info("Asking nicely for pid %d (%s) to stop", pid, p.name)
p.terminate()
try:
p.wait(5)
except psutil.TimeoutExpired:
pass
if not p.is_running():
return True
logger.info("Killing pid %d (%s)", pid, p.name)
p.kill()
try:
p.wait(1)
except psutil.TimeoutExpired:
pass
if p.is_running():
logger.warn("Failed to properly kill pid %d (taskId: %s)", pid, self.__rtc.taskId)
return False
except psutil.NoSuchProcess:
pass
return True
def __completed(self, retcode):
logger.debug("Process completed: %r, (IsShutdown: %r)", self, self.__isShutdown.is_set())
result = ttypes.RunTaskResult()
result.maxRssMb = self.__metrics['maxRssMb']
result.procId = self.__rtc.procId
result.taskId = self.__rtc.taskId
result.jobId = self.__rtc.jobId
if self.__isShutdown.is_set():
result.exitStatus = 1
result.exitSignal = 86
logger.info("Task closing gracefully from shutdown request")
elif self.__wasKilled.is_set():
result.exitStatus = 1
result.exitSignal = retcode if retcode < 0 else -9
elif retcode < 0:
result.exitStatus = 1
result.exitSignal = retcode
else:
result.exitStatus = retcode
result.exitSignal = 0
logger.info("Process result %s", result)
if not conf.NETWORK_DISABLED:
while True:
try:
service, transport = client.getPlowConnection()
service.taskComplete(result)
transport.close()
break
except Exception, e:
logger.warn("Error talking to plow server, %s, sleeping for 30 seconds", e)
time.sleep(30)
ProcessMgr.processFinished(result, self.__cpus)
if self.__logfp is not None:
attrs = {
'DiskIO': self.__metrics['diskIO'],
'Cpus': len(self.__cpus),
}
if self.__killReason:
attrs['Reason Killed'] = self.__killReason
self.__logfp.writeLogFooterAndClose(result, attrs)
self.__logfp = None
#
# Singleton Instances
#
Profiler = _SystemProfiler()
ResourceMgr = _ResourceManager()
ProcessMgr = _ProcessManager()
|
clean_met.py
|
"""
this is a simple script to clean the met dataset from all the already grayscale images
"""
import argparse
import os
import multiprocessing as mp
import queue
from PIL import Image, UnidentifiedImageError
import numpy as np
import tqdm
def single(root, filename):
""" performs on a single file """
try:
filepath = os.path.join(root, filename)
image = Image.open(filepath)
arr = np.array(image)
if len(arr.shape) != 3:
os.remove(filepath)
except UnidentifiedImageError:
os.remove(filepath)
def clean_folder(root):
""" cleans an image folder removing any corrupt or non RGB images """
filenames = os.listdir(root)
proc_queue = queue.Queue(512)
for filename in tqdm.tqdm(filenames):
while proc_queue.full():
proc_queue.get().join()
proc = mp.Process(target=single, args=(root, filename))
proc.start()
proc_queue.put(proc)
while not proc_queue.empty():
proc_queue.get().join()
def main():
""" runs the main program """
parser = argparse.ArgumentParser()
parser.add_argument("root")
args = parser.parse_args()
clean_folder(args.root)
if __name__ == "__main__":
main()
|
start_pipelined.py
|
"""
Copyright (c) 2018-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import logging
import threading
import subprocess
from enum import Enum
from collections import namedtuple
from concurrent.futures import Future
from magma.pipelined.rule_mappers import RuleIDToNumMapper
from magma.pipelined.app.base import MagmaController, ControllerType
from magma.pipelined.tests.app.exceptions import ServiceRunningError,\
BadConfigError
from ryu.base.app_manager import AppManager
from ryu.lib import hub
class TestSetup(object):
"""
The TestSetup class variables
apps: [Controller]: ryu apps to instantiate
references: [Controller]: futures to get references of
instantiated apps
config: dict: config for ryu app
mconfig: dict: mconfig for ryu app
service_manager: ServiceManager: service manager for ryu app
integ_test: bool: set true when running tests in
integ setting
"""
def __init__(self, apps, references, config, mconfig, loop,
service_manager, integ_test=False, rpc_stubs=None):
self.apps = apps
self.references = references
self.config = config
self.mconfig = mconfig
self.service_manager = service_manager
self.loop = loop
self.integ_test = integ_test
if rpc_stubs is None:
rpc_stubs = {}
self.rpc_stubs = rpc_stubs
Controller = namedtuple('Controller', ['name', 'app_future'])
class PipelinedController(Enum):
InOut = Controller(
'magma.pipelined.app.inout', 'inout'
)
Arp = Controller(
'magma.pipelined.app.arp', 'arpd'
)
Enforcement = Controller(
'magma.pipelined.app.enforcement', 'enforcement'
)
Enforcement_stats = Controller(
'magma.pipelined.app.enforcement_stats', 'enforcement_stats'
)
Testing = Controller(
'magma.pipelined.app.testing', 'testing'
)
Meter = Controller(
'magma.pipelined.app.meter', 'meter'
)
MeterStats = Controller(
'magma.pipelined.app.meter_stats', 'meter_stats'
)
AccessControl = Controller(
'magma.pipelined.app.access_control', 'access_control'
)
Subscriber = Controller(
'magma.pipelined.app.subscriber', 'subscriber'
)
UEMac = Controller(
'magma.pipelined.app.ue_mac', 'ue_mac'
)
PacketTracer = Controller(
'magma.pipelined.app.packet_tracer', 'packet_tracer'
)
def assert_pipelined_not_running():
"""
As Ryu applications shoudn't be started if the magma@pipelined service is
running we need to verify if pipelined is active. If service is running
throws a ServiceRunningError exception.
This can be done using the command:
systemctl is-active magma@pipelined
If service is pipelined, this returns an error code 3 & message "inactive"
"""
try:
output = subprocess.check_output(
["systemctl", "is-active", "magma@pipelined"]
)
except subprocess.CalledProcessError as e:
if "inactive" not in str(e.output, 'utf-8'):
raise ServiceRunningError(
"Pipelined is running, 'systemctl is-active magma@pipelined'" +
"caused an error code %d, exception - %s"
% (e.returncode, str(e.output, 'utf-8').strip())
)
else:
raise ServiceRunningError(
"Pipelined is running, 'systemctl is-active magma@pipelined'" +
"output - %s" % str(output, 'utf-8').strip()
)
class StartThread(object):
"""
Starts ryu applications
Uses ryu hub and ryu app_manager to launch ryu applications. By using
futures get references to the instantiated apps. This allows unittests to
call methods from pipelined apps.
"""
_Event = namedtuple('_Event', ['func', 'future'])
def __init__(self, test_setup, launch_successful_future):
""" If verification fails throw an exception, don't start ryu apps """
if test_setup.integ_test is False:
hub.patch(thread=True)
assert_pipelined_not_running()
self._test_setup = test_setup
self.keep_running = True
self.done = False
self.event_queue = hub.Queue()
thread = threading.Thread(
target=self.start_ryu_apps, args=(launch_successful_future,))
thread.daemon = True
thread.start()
def start_ryu_apps(self, launch_successful_future):
"""
Starts up ryu applications, all the configuration is parsed from the
test_setup config provided in the unit test.
If apps throw an exception on launch, error is passed in the
launch_successful_future and will prevent infinitely waiting.
"""
self.reset_static_vars()
hub.spawn(self._process_queue)
app_lists = [a.value.name for a in self._test_setup.apps]
app_futures = {
controller.value.app_future: future
for (controller, future) in self._test_setup.references.items()
}
manager = AppManager.get_instance()
manager.load_apps(app_lists)
contexts = manager.create_contexts()
contexts['sids_by_ip'] = {} # shared by both metering apps
contexts['rule_id_mapper'] = RuleIDToNumMapper()
contexts['session_rule_version_mapper'] = \
self._test_setup.service_manager.session_rule_version_mapper
contexts['app_futures'] = app_futures
contexts['config'] = self._test_setup.config
contexts['mconfig'] = self._test_setup.mconfig
contexts['loop'] = self._test_setup.loop
contexts['rpc_stubs'] = self._test_setup.rpc_stubs
contexts['service_manager'] = self._test_setup.service_manager
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s %(levelname)s %(name)s] %(message)s')
services = []
try:
services.extend(manager.instantiate_apps(**contexts))
except Exception as e:
launch_successful_future.set_result(
"Ryu apps launch exception: {}".format(e))
raise
launch_successful_future.set_result("Setup successful")
self.run(manager)
def _process_queue(self):
"""
Run a queue to process external events that need to be run in the Ryu
greenthread
"""
while self.keep_running:
try:
event = self.event_queue.get(block=False)
val = event.func()
event.future.set_result(val)
except hub.QueueEmpty:
pass
finally:
hub.sleep(0.1)
def run_in_greenthread(self, func):
"""
When not monkey patching (i.e. when running a gRPC server), you cannot
call directly into a Ryu app. To do this, there needs to be a boundary
between futures and hub.Queues. When this function is called, a lambda
is passed which is sent into a queue to be run by the Ryu greenthread.
"""
ev = self._Event(func=func, future=Future())
self.event_queue.put(ev)
return ev.future.result()
def run(self, manager):
""" Keep running until signalled from test file """
while self.keep_running:
hub.sleep(1)
manager.close()
self.done = True
def reset_static_vars(self):
""" Reset static vars for running nosetests """
AppManager._instance = AppManager()
MagmaController.TABLES = {}
|
loader.py
|
from multiprocessing import Process
from downstream_farmer.shell import main
import argparse
import time
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--number', help='Number of farmers to launch',
type=int, default=1)
args = parser.parse_args()
n = args.number
p = dict()
for i in range(0, n):
p[i] = Process(target=main, args=[['--forcenew']])
p[i].start()
time.sleep(1)
for i in range(0, n):
p[i].join()
|
envs.py
|
from ssbm_gym.ssbm_env import BaseEnv, isDying
from copy import deepcopy
from ssbm_gym import embed
opponent_pid = 1
def make_env(frame_limit, options):
def _init():
env = GoHighEnv(frame_limit=frame_limit, options=options)
return env
return _init
def GoHighEnvVec(num_envs, frame_limit=1e9, options={}):
return SubprocVecEnv([make_env(frame_limit=frame_limit, options=options) for _ in range(num_envs)])
class GoHighEnv(BaseEnv):
def __init__(self, **kwargs):
BaseEnv.__init__(self, **kwargs)
self._embed_obs = MinimalEmbedGame()
# self._embed_obs = embed.EmbedGame()
@property
def action_space(self):
""" [self.action_space] sets self._action_space to be MinimalActionSpace() from ssbm_gym.spaces then returns it """
if self._action_space is not None:
return self._action_space
else:
from ssbm_gym.spaces import MinimalActionSpace
self._action_space = MinimalActionSpace()
return self._action_space
@property
def observation_space(self):
""" currently, observation calls MinimalEmbedGame() """
if self._observation_space is not None:
return self._observation_space
else:
self._observation_space = self._embed_obs
return self._embed_obs
def embed_obs(self, obs):
return self._embed_obs(obs)
def compute_reward(self):
""" [env.compute_reward] """
r = 0.0
if self.prev_obs is not None:
# This is necesarry because the character might be dying during multiple frames
if not isDying(self.prev_obs.players[self.pid]) and \
isDying(self.obs.players[self.pid]):
r -= 1.0
if not isDying(self.prev_obs.players[opponent_pid]) and \
isDying(self.obs.players[opponent_pid]):
r += 1.0
# We give a reward of -0.01 for every percent taken. The max() ensures that not reward is given when a character dies
r += -0.01 * max(0, self.obs.players[self.pid].percent - self.prev_obs.players[self.pid].percent) + 0.01 * max(0, self.obs.players[opponent_pid].percent - self.prev_obs.players[opponent_pid].percent)
# r += self.obs.players[0].y / 50 / 60
return r
def step(self, action):
""" [step action] performs [action] then returns the embedded observation, reward, whether_is_terminal, and a dict of frames """
if self.obs is not None:
self.prev_obs = deepcopy(self.obs)
obs = self.api.step([self.action_space.from_index(action)])
self.obs = obs
reward = self.compute_reward()
done = self.is_terminal()
infos = dict({'frame': self.obs.frame})
return self.embed_obs(self.obs), reward, done, infos
class MinimalEmbedPlayer():
def __init__(self):
self.n = 9
def __call__(self, player_state):
percent = player_state.percent/100.0
facing = player_state.facing
x = player_state.x/10.0
y = player_state.y/10.0
invulnerable = 1.0 if player_state.invulnerable else 0
hitlag_frames_left = player_state.hitlag_frames_left/10.0
hitstun_frames_left = player_state.hitstun_frames_left/10.0
shield_size = player_state.shield_size/100.0
in_air = 1.0 if player_state.in_air else 0.0
return [
percent,
facing,
x, y,
invulnerable,
hitlag_frames_left,
hitstun_frames_left,
shield_size,
in_air
]
class MinimalEmbedGame():
def __init__(self):
self.embed_player = MinimalEmbedPlayer()
self.n = self.embed_player.n
def __call__(self, game_state):
player0 = self.embed_player(game_state.players[0])
player1 = self.embed_player(game_state.players[1])
return player0 + player1 # concatenates lists
import multiprocessing
import cloudpickle
import pickle
class CloudpickleWrapper(object):
def __init__(self, var):
self.var = var
def __getstate__(self):
return cloudpickle.dumps(self.var)
def __setstate__(self, obs):
self.var = pickle.loads(obs)
def _worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.var()
while True:
try:
cmd, data = remote.recv()
if cmd == 'step':
observation, reward, done, info = env.step(data)
if done:
# save final observation where user can get it, then reset
info['terminal_observation'] = observation
remote.send((observation, reward, done, info))
elif cmd == 'reset':
observation = env.reset()
remote.send(observation)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'env_method':
method = getattr(env, data[0])
remote.send(method(*data[1], **data[2]))
elif cmd == 'get_attr':
remote.send(getattr(env, data))
elif cmd == 'set_attr':
remote.send(setattr(env, data[0], data[1]))
else:
raise NotImplementedError
except EOFError:
break
class SubprocVecEnv():
def __init__(self, env_fns, start_method=None):
self.num_envs = len(env_fns)
self.waiting = False
self.closed = False
if start_method is None:
# Fork is not a thread safe method (see issue #217)
# but is more user friendly (does not require to wrap the code in
# a `if __name__ == "__main__":`)
forkserver_available = 'forkserver' in multiprocessing.get_all_start_methods()
start_method = 'forkserver' if forkserver_available else 'spawn'
ctx = multiprocessing.get_context(start_method)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.num_envs)])
self.processes = []
for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns):
args = (work_remote, remote, CloudpickleWrapper(env_fn))
# daemon=True: if the main process crashes, we should not cause things to hang
process = ctx.Process(target=_worker, args=args, daemon=True)
process.start()
self.processes.append(process)
work_remote.close()
self.remotes[0].send(('get_spaces', None))
self.observation_space, self.action_space = self.remotes[0].recv()
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return obs, rews, dones, infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return obs
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for process in self.processes:
process.join()
self.closed = True
def get_attr(self, attr_name, indices=None):
"""Return attribute from vectorized environment (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(('get_attr', attr_name))
return [remote.recv() for remote in target_remotes]
def set_attr(self, attr_name, value, indices=None):
"""Set attribute inside vectorized environments (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(('set_attr', (attr_name, value)))
for remote in target_remotes:
remote.recv()
def env_method(self, method_name, *method_args, indices=None, **method_kwargs):
"""Call instance methods of vectorized environments."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(('env_method', (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
def _get_target_remotes(self, indices):
"""
Get the connection object needed to communicate with the wanted
envs that are in subprocesses.
:param indices: (None,int,Iterable) refers to indices of envs.
:return: ([multiprocessing.Connection]) Connection object to communicate between processes.
"""
indices = self._get_indices(indices)
return [self.remotes[i] for i in indices]
|
ThreadingDoc.py
|
#!/usr/bin/env python
"""
Simple how-to-code-class for show all methods & functions
of the Threading module.
"""
__author__ = "Rafael García Cuéllar"
__email__ = "r.gc@hotmail.es"
__copyright__ = "Copyright (c) 2018 Rafael García Cuéllar"
__license__ = "MIT"
import threading
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from Utilities import getMetaDetails
# This module defines the following functions:
def doTarget():
total = 0
for i in range(1_000):
for j in range(1_000):
total += i * (j + j)
print("Total: %s" % str(total))
if __name__ == '__main__':
# Thread-Local Data
print('############################## T H R E A D L O C A L D A T A ###############################')
mydata = threading.local()
mydata.x = 1; mydata.y = 3
print(f'Local Thread Data (x): {mydata.x}')
print('\n################################## T H R E A D O B J E C T S #################################')
t1 = threading.Thread(group=None, target=doTarget(), daemon=None, name="Thread 1")
t1.setDaemon(False)
t1.start() # t1.run()
print('Is alive? %s' % t1.is_alive())
print('Is Daemon? %s' % t1.isDaemon())
getMetaDetails()
t1.join(timeout=None)
t1._stop()
print('\n#################################### L O C K O B J E C T S ##################################')
lock = threading.Lock()
print('Thread status lock: %s' % lock.acquire())
print('Thread status lock: %s' % lock.acquire(False))
print('Thread status lock: %s' % lock.release())
print('\n################################## R L O C K O B J E C T S ##################################')
rlock = threading.RLock()
print('Thread status rlock: %s' % rlock.acquire())
print('Thread status rlock: %s' % rlock.acquire(False)) # Never stops always will be True - Recursive
print('Thread status rlock: %s' % rlock.release())
print('\n############################# C O N D I T I O N O B J E C T S ##############################')
condition = threading.Condition(lock=None)
print('Condition Blocking: %s ' % condition.acquire(blocking=True))
# Producer-Consumer Example
# condition.wait()
# condition.wait_for(predicate, timeout=None)
# condition.notify()
# condition.notify_all()
print('\n###################################### S E M A P H O R E ######################################')
semaphore = threading.Semaphore(value=10)
boundedSemaphore = threading.BoundedSemaphore(value=5)
print('Semaphore status semaphore: %s' % semaphore.acquire())
print('Semaphore status semaphore: %s' % semaphore.acquire(False))
print('Semaphore status semaphore: %s' % semaphore.release())
print('\n################################### E V E N T O B J E C T S ###################################')
event = threading.Event()
print('Internal Flag status: %s' % event.is_set())
event.set()
print('Internal Flag status: %s' % event.is_set())
event.clear()
print('Internal Flag status: %s' % event.is_set())
# event.wait(timeout=None)
print('\n################################### T I M E R O B J E C T S ###################################')
t = threading.Timer(30.0, doTarget, args=None, kwargs=None)
# t.start()
t.cancel() # stop the timer's action if it's still waiting
print('\n################################ B A R R I E R O B J E C T S ##################################')
barrier = threading.Barrier(2, action=None, timeout=5)
barrier.reset()
print('The number of threads required to pass the barrier.: %s' % str(barrier.parties))
print('The number of threads currently waiting in the barrier: %s' % str(barrier.n_waiting))
print('Barrier broken state: %s' % str(barrier.broken))
barrier.abort()
print('Barrier broken state: %s' % str(barrier.broken))
# def server():
# start_server()
# b.wait()
# while True:
# connection = accept_connection()
# process_server_connection(connection)
# def client():
# b.wait()
# while True:
# connection = make_connection()
# process_client_connection(connection)
""" TIP:
Using locks, conditions, and semaphores in the with statement
"""
|
lambda-handler.py
|
import os
import subprocess
import logging
import base64
from threading import Thread
logging.getLogger().setLevel(logging.INFO)
class ReturningThread(Thread):
"""A wrapper around the Thread class to actually return the threaded function
return value when calling join()"""
def __init__(self, target=None, args=()):
Thread.__init__(self, target=target, args=args)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
def buff_and_print(stream, stream_name):
"""Buffer and log every line of the given stream"""
buff = []
for l in iter(lambda: stream.readline(), b""):
line = l.decode("utf-8")
logging.info("%s: %s", stream_name, line.rstrip())
buff.append(line)
return "".join(buff)
def handler(event, context):
"""An AWS Lambda handler that runs the provided command with bash and returns the standard output"""
# input parameters
logging.debug("event: %s", event)
src_cmd = base64.b64decode(event["cmd"]).decode("utf-8")
logging.info("src_cmd: %s", src_cmd)
# execute the command as bash and return the std outputs
parsed_cmd = ["/bin/bash", "-c", src_cmd]
process = subprocess.Popen(
parsed_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# we need to spin up a thread to avoid deadlock when reading through output pipes
stderr_thread = ReturningThread(
target=buff_and_print, args=(process.stderr, "stderr")
)
stderr_thread.start()
stdout = buff_and_print(process.stdout, "stdout")
stderr = stderr_thread.join()
# multiplex stdout and stderr into the result field
res = stdout if stdout != "" else stderr
return {"result": res, "parsed_cmd": parsed_cmd}
|
spincoater.py
|
import odrive # odrive documentation https://docs.odriverobotics.com/
from odrive.enums import * # control/state enumerations
import serial
import time
import numpy as np
import os
import yaml
import threading
from frgpascal.hardware.helpers import get_port
from frgpascal.hardware.gantry import Gantry
from frgpascal.hardware.switchbox import SingleSwitch
MODULE_DIR = os.path.dirname(__file__)
CALIBRATION_DIR = os.path.join(MODULE_DIR, "calibrations")
with open(os.path.join(MODULE_DIR, "hardwareconstants.yaml"), "r") as f:
constants = yaml.load(f, Loader=yaml.Loader)
# spincoater_serial_number = constants["spincoater"]["serialid"]
# print(constants["spincoater"])
class SpinCoater:
def __init__(self, gantry: Gantry, switch: SingleSwitch):
"""Initialize the spincoater control object
Args:
gantry (Gantry): PASCAL Gantry control object
serial_number (str, optional): Serial number for spincoater arduino, used to find and connect to correct COM port. Defaults to "558383339323513140D1":str.
p0 (tuple, optional): Initial guess for gantry coordinates to drop sample on spincoater. Defaults to (52, 126, 36):tuple.
"""
# constants
# if port is None:
# self.port = get_port(
# constants["spincoater"]["device_identifiers"]
# ) # find port to connect to this device.
# else:
# self.port = port
# self.ARDUINOTIMEOUT = constants["spincoater"]["pollingrate"]
self.switch = switch
self.COMMUNICATION_INTERVAL = constants["spincoater"]["communication_interval"]
self.TIMEOUT = 30
self.ACCELERATIONRANGE = (
constants["spincoater"]["acceleration_min"],
constants["spincoater"]["acceleration_max"],
) # rpm/s
self.SPEEDRANGE = (
constants["spincoater"]["rpm_min"],
constants["spincoater"]["rpm_max"],
) # rpm
self.__rpm = 0 # nominal current rpm. does not take ramping into account
self.__HOMEPOSITION = 0.5 # home coordinate for spincoater chuck, in radial (0-1) coordinates. somewhat arbitrary, but avoid 0 because it wraps around to 1, makes some math annoying
self.__TWISTDELTA = -0.05 # turn to make when twisting off sample from chuck.
self._locked = False # true when chuck is holding at home position
self.gantry = gantry
self.__calibrated = False
# logging
self.__logging_active = False
self.__logdata = {"time": [], "rpm": []}
self.LOGGINGINTERVAL = constants["spincoater"]["logging_interval"]
self.VACUUM_DISENGAGEMENT_TIME = constants["spincoater"][
"vacuum_disengagement_time"
]
# give a little extra z clearance, crashing into the foil around the spincoater is annoying!
self.p0 = np.asarray(constants["spincoater"]["p0"]) + [0, 0, 5]
self.connect()
self._current_rps = 0
def connect(self, **kwargs):
# connect to odrive BLDC controller
print("Connecting to odrive")
# this is admittedly hacky. Connect, reboot (which disonnects), then connect again. Reboot necessary when communication line is broken
self.odrv0 = odrive.find_any()
# try:
# self.odrv0 = odrive.find_any(timeout=10)
# except:
# raise ValueError("Could not find odrive! confirm that 24V PSU is on")
# try:
# self.odrv0.reboot() # reboot the odrive, communication sometimes gets broken when we disconnect/reconnect
# self.odrv0._destroy()
# except:
# pass # this always throws an "object lost" error...which is what we want
# try:
# self.odrv0 = odrive.find_any(timeout=10)
# except:
# raise ValueError("Could not find odrive! confirm that 24V PSU is on")
print("\tFound motor, now calibrating. This takes 10-20 seconds.")
# input("\tPress enter once shroud is out of the way: ")
self.axis = self.odrv0.axis0
self.axis.requested_state = (
AXIS_STATE_FULL_CALIBRATION_SEQUENCE # calibrate the encoder
)
time.sleep(5) # wait for calibration to initiate
while self.axis.current_state != 1:
time.sleep(1) # wait for calibration to complete
print("\tDone calibrating odrive!")
self.axis.requested_state = (
AXIS_STATE_CLOSED_LOOP_CONTROL # normal control mode
)
# odrive defaults
self.axis.motor.config.current_lim = 30 # Amps NOT SAME AS POWER SUPPLY CURRENT
self.axis.controller.config.circular_setpoints = True # position = 0-1 radial
self.axis.trap_traj.config.vel_limit = (
0.5 # for position moves to lock position
)
self.axis.trap_traj.config.accel_limit = 0.5
self.axis.trap_traj.config.decel_limit = 0.5
self.lock()
self.idle()
# start libfibre timer watchdog
self.__connected = True
self._libfibre_watchdog = threading.Thread(target=self.__libfibre_timer_worker)
self._libfibre_watchdog.start()
def disconnect(self):
self.__connected = False
self._libfibre_watchdog.join()
try:
self.odrv0._destroy()
except:
pass # this always throws an "object lost" error...which is what we want
# position calibration methods
def calibrate(self):
"""Prompt user to manually position the gantry over the spincoater using the Gantry GUI. This position will be recorded and used for future pick/place operations to the spincoater chuck"""
# self.gantry.moveto(z=self.gantry.OT2_ZLIM, zhop=False)
# self.gantry.moveto(x=self.gantry.OT2_XLIM, y=self.gantry.OT2_YLIM, zhop=False)
# self.gantry.moveto(x=self.p0[0], y=self.p0[1], avoid_ot2=False, zhop=False)
self.gantry.moveto(*self.p0)
self.gantry.gui()
self.coordinates = self.gantry.position
# self.gantry.moverel(z=10, zhop=False)
self.__calibrated = True
with open(
os.path.join(CALIBRATION_DIR, f"spincoater_calibration.yaml"), "w"
) as f:
yaml.dump(self.coordinates, f)
def _load_calibration(self):
with open(
os.path.join(CALIBRATION_DIR, f"spincoater_calibration.yaml"), "r"
) as f:
self.coordinates = np.array(yaml.load(f, Loader=yaml.FullLoader))
self.__calibrated = True
def __call__(self):
"""Calling the spincoater object will return its gantry coordinates. For consistency with the callable nature of gridded hardware (storage, hotplate, etc)
Raises:
Exception: If spincoater position is not calibrated, error will thrown.
Returns:
tuple: (x,y,z) coordinates for gantry to pick/place sample on spincoater chuck.
"""
if self.__calibrated == False:
raise Exception(f"Need to calibrate spincoater position before use!")
return self.coordinates
# vacuum control methods
def vacuum_on(self):
"""Turn on vacuum solenoid, pull vacuum"""
self.switch.on()
def vacuum_off(self):
"""Turn off vacuum solenoid, do not pull vacuum"""
self.switch.off()
# odrive BLDC motor control methods
def set_rpm(self, rpm: int, acceleration: float = 1000):
"""sends commands to arduino to set a target speed with a target acceleration
Args:
rpm (int): target angular velocity, in rpm
acceleration (float, optional): target angular acceleration, in rpm/second. Defaults to 500.
"""
rps = int(rpm / 60) # convert rpm to rps for odrive
acceleration = int(acceleration / 60) # convert rpm/s to rps/s for odrive
self.axis.controller.config.vel_ramp_rate = acceleration
time.sleep(self.COMMUNICATION_INTERVAL)
self.axis.controller.input_vel = rps
time.sleep(self.COMMUNICATION_INTERVAL)
# if acceleration == 0:
# acceleration = self.ACCELERATIONRANGE[1] # default to max acceleration
if self.axis.current_state != AXIS_STATE_CLOSED_LOOP_CONTROL:
self.axis.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL
self.axis.controller.config.control_mode = CONTROL_MODE_VELOCITY_CONTROL
self.axis.controller.config.input_mode = INPUT_MODE_VEL_RAMP
self._current_rps = rps
self._locked = False
def lock(self):
"""
routine to lock rotor in registered position for sample transfer
"""
if self._locked:
return
if self.axis.current_state != AXIS_STATE_CLOSED_LOOP_CONTROL:
self.axis.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL
self.axis.controller.config.input_mode = INPUT_MODE_TRAP_TRAJ
# self.axis.controller.config.input_mode = INPUT_MODE_POS_FILTER
self.axis.controller.config.control_mode = CONTROL_MODE_POSITION_CONTROL
time.sleep(self.COMMUNICATION_INTERVAL)
self.axis.controller.input_pos = self.__HOMEPOSITION
time.sleep(self.COMMUNICATION_INTERVAL)
t0 = time.time()
while (
np.abs(self.__HOMEPOSITION - self.axis.encoder.pos_circular) > 0.05
): # tolerance = 360*value degrees, 0.025 ~= 10 degrees
time.sleep(0.1)
if time.time() - t0 > self.TIMEOUT:
print("resetting")
self.reset()
t0 = time.time()
self._locked = True
def reset(self):
try:
self.disconnect()
except:
pass
self.connect()
def twist_off(self):
"""
routine to slightly rotate the chuck from home position.
intended to help remove a stuck substrate from the o-ring, which can get sticky if
perovskite solution drips onto the o-ring.
"""
if not self._locked:
raise Exception(
"Cannot twist off the sample, the chuck is not currently locked!"
)
target_position = self.__HOMEPOSITION + self.__TWISTDELTA
self.axis.controller.input_pos = target_position
t0 = time.time()
while (np.abs(target_position - self.axis.encoder.pos_circular)) > 0.025:
time.sleep(0.1)
if time.time() - t0 > self.TIMEOUT:
print("resetting")
self.reset()
t0 = time.time()
def stop(self):
"""
stop rotation and locks the rotor in position
"""
if self._locked:
return
self.set_rpm(0, 1000)
t0 = time.time()
min_stopped_time = 2
while True:
if self.axis.encoder.vel_estimate > 0:
t0 = time.time()
if time.time() - t0 > min_stopped_time:
break
time.sleep(0.1)
self.lock()
self.idle()
def idle(self):
if self.axis.current_state != AXIS_STATE_IDLE:
self.axis.requested_state = AXIS_STATE_IDLE
self._locked = False
def _lookup_error(self):
for err in dir(odrive.enums):
if self.axis.error == getattr(odrive.enums, err):
print(err)
break
# logging code
def __logging_worker(self):
t0 = time.time()
self.__logdata = {"time": [], "rpm": []}
while self.__logging_active:
self.__logdata["time"].append(time.time() - t0)
self.__logdata["rpm"].append(
self.axis.encoder.vel_estimate * 60
) # rps from odrive -> rpm
time.sleep(self.LOGGINGINTERVAL)
def start_logging(self):
if self.__logging_active:
raise ValueError("Logging is already active!")
self.__logging_active = True
self.__logging_thread = threading.Thread(target=self.__logging_worker)
self.__logging_thread.start()
def finish_logging(self):
if not self.__logging_active:
raise ValueError("Logging is already stopped!")
self.__logging_active = False
self.__logging_thread.join()
return self.__logdata
def __libfibre_timer_worker(self):
"""To prevent libfibre timers from accumulating, inducing global interpreter lock (GIL)
Note:
`odrv0._libfibre.timer_map` is a dictionary that adds a new `<TimerHandle>` entry once per second.
As these entries accumulate, the terminal eventually slows down. I assume these are all involved
in some background process within `libfibre` that accumulate into GIL. When i clear this dictionary
by `odrv0._libfibre.timer_map = {}`, in a second or two (assuming this is the interval of the
libfibre background process) the terminal speed goes back to normal. From what I can tell this does
not affect operation of the odrive.
We also clear errors to allow recovery if we're stuck
"""
while self.__connected:
time.sleep(1)
if not self.__logging_active and len(self.odrv0._libfibre.timer_map) > 60:
try:
latest_idx = max(list(self.odrv0._libfibre.timer_map.keys()))
self.odrv0._libfibre.timer_map = {
0: self.odrv0._libfibre.timer_map[latest_idx]
}
if self.axis.error > 0:
self.axis.clear_errors()
except:
print(
"Spincoater unable to flush - probably disconnected, will try again later"
)
def __del__(self):
self.disconnect()
|
sh.py
|
"""
http://amoffat.github.io/sh/
"""
# ===============================================================================
# Copyright (C) 2011-2017 by Andrew Moffat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ===============================================================================
__version__ = "1.12.14"
__project_url__ = "https://github.com/amoffat/sh"
import platform
if "windows" in platform.system().lower(): # pragma: no cover
raise ImportError("sh %s is currently only supported on linux and osx. \
please install pbs 0.110 (http://pypi.python.org/pypi/pbs) for windows \
support." % __version__)
import sys
IS_PY3 = sys.version_info[0] == 3
MINOR_VER = sys.version_info[1]
IS_PY26 = sys.version_info[0] == 2 and MINOR_VER == 6
import traceback
import os
import re
import time
import getpass
from types import ModuleType, GeneratorType
from functools import partial
import inspect
import tempfile
import stat
import glob as glob_module
import ast
from contextlib import contextmanager
import pwd
import errno
from io import UnsupportedOperation, open as fdopen
from locale import getpreferredencoding
DEFAULT_ENCODING = getpreferredencoding() or "UTF-8"
# normally i would hate this idea of using a global to signify whether we are
# running tests, because it breaks the assumption that what is running in the
# tests is what will run live, but we ONLY use this in a place that has no
# serious side-effects that could change anything. as long as we do that, it
# should be ok
RUNNING_TESTS = bool(int(os.environ.get("SH_TESTS_RUNNING", "0")))
FORCE_USE_SELECT = bool(int(os.environ.get("SH_TESTS_USE_SELECT", "0")))
if IS_PY3:
from io import StringIO
ioStringIO = StringIO
from io import BytesIO as cStringIO
iocStringIO = cStringIO
from queue import Queue, Empty
# for some reason, python 3.1 removed the builtin "callable", wtf
if not hasattr(__builtins__, "callable"):
def callable(ob):
return hasattr(ob, "__call__")
else:
from StringIO import StringIO
from cStringIO import OutputType as cStringIO
from io import StringIO as ioStringIO
from io import BytesIO as iocStringIO
from Queue import Queue, Empty
IS_OSX = platform.system() == "Darwin"
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
SH_LOGGER_NAME = __name__
import errno
import pty
import termios
import signal
import gc
import select
import threading
import tty
import fcntl
import struct
import resource
from collections import deque
import logging
import weakref
# a re-entrant lock for pushd. this way, multiple threads that happen to use
# pushd will all see the current working directory for the duration of the
# with-context
PUSHD_LOCK = threading.RLock()
if hasattr(inspect, "getfullargspec"):
def get_num_args(fn):
return len(inspect.getfullargspec(fn).args)
else:
def get_num_args(fn):
return len(inspect.getargspec(fn).args)
if IS_PY3:
raw_input = input
unicode = str
basestring = str
long = int
_unicode_methods = set(dir(unicode()))
HAS_POLL = hasattr(select, "poll")
POLLER_EVENT_READ = 1
POLLER_EVENT_WRITE = 2
POLLER_EVENT_HUP = 4
POLLER_EVENT_ERROR = 8
# here we use an use a poller interface that transparently selects the most
# capable poller (out of either select.select or select.poll). this was added
# by zhangyafeikimi when he discovered that if the fds created internally by sh
# numbered > 1024, select.select failed (a limitation of select.select). this
# can happen if your script opens a lot of files
if HAS_POLL and not FORCE_USE_SELECT:
class Poller(object):
def __init__(self):
self._poll = select.poll()
# file descriptor <-> file object bidirectional maps
self.fd_lookup = {}
self.fo_lookup = {}
def __nonzero__(self):
return len(self.fd_lookup) != 0
def __len__(self):
return len(self.fd_lookup)
def _set_fileobject(self, f):
if hasattr(f, "fileno"):
fd = f.fileno()
self.fd_lookup[fd] = f
self.fo_lookup[f] = fd
else:
self.fd_lookup[f] = f
self.fo_lookup[f] = f
def _remove_fileobject(self, f):
if hasattr(f, "fileno"):
fd = f.fileno()
del self.fd_lookup[fd]
del self.fo_lookup[f]
else:
del self.fd_lookup[f]
del self.fo_lookup[f]
def _get_file_descriptor(self, f):
return self.fo_lookup.get(f)
def _get_file_object(self, fd):
return self.fd_lookup.get(fd)
def _register(self, f, events):
# f can be a file descriptor or file object
self._set_fileobject(f)
fd = self._get_file_descriptor(f)
self._poll.register(fd, events)
def register_read(self, f):
self._register(f, select.POLLIN | select.POLLPRI)
def register_write(self, f):
self._register(f, select.POLLOUT)
def register_error(self, f):
self._register(f, select.POLLERR | select.POLLHUP | select.POLLNVAL)
def unregister(self, f):
fd = self._get_file_descriptor(f)
self._poll.unregister(fd)
self._remove_fileobject(f)
def poll(self, timeout):
if timeout is not None:
# convert from seconds to milliseconds
timeout *= 1000
changes = self._poll.poll(timeout)
results = []
for fd, events in changes:
f = self._get_file_object(fd)
if events & (select.POLLIN | select.POLLPRI):
results.append((f, POLLER_EVENT_READ))
elif events & (select.POLLOUT):
results.append((f, POLLER_EVENT_WRITE))
elif events & (select.POLLHUP):
results.append((f, POLLER_EVENT_HUP))
elif events & (select.POLLERR | select.POLLNVAL):
results.append((f, POLLER_EVENT_ERROR))
return results
else:
class Poller(object):
def __init__(self):
self.rlist = []
self.wlist = []
self.xlist = []
def __nonzero__(self):
return len(self.rlist) + len(self.wlist) + len(self.xlist) != 0
def __len__(self):
return len(self.rlist) + len(self.wlist) + len(self.xlist)
def _register(self, f, l):
if f not in l:
l.append(f)
def _unregister(self, f, l):
if f in l:
l.remove(f)
def register_read(self, f):
self._register(f, self.rlist)
def register_write(self, f):
self._register(f, self.wlist)
def register_error(self, f):
self._register(f, self.xlist)
def unregister(self, f):
self._unregister(f, self.rlist)
self._unregister(f, self.wlist)
self._unregister(f, self.xlist)
def poll(self, timeout):
_in, _out, _err = select.select(self.rlist, self.wlist, self.xlist, timeout)
results = []
for f in _in:
results.append((f, POLLER_EVENT_READ))
for f in _out:
results.append((f, POLLER_EVENT_WRITE))
for f in _err:
results.append((f, POLLER_EVENT_ERROR))
return results
def encode_to_py3bytes_or_py2str(s):
""" takes anything and attempts to return a py2 string or py3 bytes. this
is typically used when creating command + arguments to be executed via
os.exec* """
fallback_encoding = "utf8"
if IS_PY3:
# if we're already bytes, do nothing
if isinstance(s, bytes):
pass
else:
s = str(s)
try:
s = bytes(s, DEFAULT_ENCODING)
except UnicodeEncodeError:
s = bytes(s, fallback_encoding)
else:
# attempt to convert the thing to unicode from the system's encoding
try:
s = unicode(s, DEFAULT_ENCODING)
# if the thing is already unicode, or it's a number, it can't be
# coerced to unicode with an encoding argument, but if we leave out
# the encoding argument, it will convert it to a string, then to unicode
except TypeError:
s = unicode(s)
# now that we have guaranteed unicode, encode to our system encoding,
# but attempt to fall back to something
try:
s = s.encode(DEFAULT_ENCODING)
except:
s = s.encode(fallback_encoding, "replace")
return s
def _indent_text(text, num=4):
lines = []
for line in text.split("\n"):
line = (" " * num) + line
lines.append(line)
return "\n".join(lines)
class ForkException(Exception):
def __init__(self, orig_exc):
tmpl = """
Original exception:
===================
%s
"""
msg = tmpl % _indent_text(orig_exc)
Exception.__init__(self, msg)
class ErrorReturnCodeMeta(type):
""" a metaclass which provides the ability for an ErrorReturnCode (or
derived) instance, imported from one sh module, to be considered the
subclass of ErrorReturnCode from another module. this is mostly necessary
in the tests, where we do assertRaises, but the ErrorReturnCode that the
program we're testing throws may not be the same class that we pass to
assertRaises
"""
def __subclasscheck__(self, o):
other_bases = set([b.__name__ for b in o.__bases__])
return self.__name__ in other_bases or o.__name__ == self.__name__
class ErrorReturnCode(Exception):
__metaclass__ = ErrorReturnCodeMeta
""" base class for all exceptions as a result of a command's exit status
being deemed an error. this base class is dynamically subclassed into
derived classes with the format: ErrorReturnCode_NNN where NNN is the exit
code number. the reason for this is it reduces boiler plate code when
testing error return codes:
try:
some_cmd()
except ErrorReturnCode_12:
print("couldn't do X")
vs:
try:
some_cmd()
except ErrorReturnCode as e:
if e.exit_code == 12:
print("couldn't do X")
it's not much of a savings, but i believe it makes the code easier to read """
truncate_cap = 750
def __init__(self, full_cmd, stdout, stderr, truncate=True):
self.full_cmd = full_cmd
self.stdout = stdout
self.stderr = stderr
exc_stdout = self.stdout
if truncate:
exc_stdout = exc_stdout[:self.truncate_cap]
out_delta = len(self.stdout) - len(exc_stdout)
if out_delta:
exc_stdout += ("... (%d more, please see e.stdout)" % out_delta).encode()
exc_stderr = self.stderr
if truncate:
exc_stderr = exc_stderr[:self.truncate_cap]
err_delta = len(self.stderr) - len(exc_stderr)
if err_delta:
exc_stderr += ("... (%d more, please see e.stderr)" % err_delta).encode()
msg_tmpl = unicode("\n\n RAN: {cmd}\n\n STDOUT:\n{stdout}\n\n STDERR:\n{stderr}")
msg = msg_tmpl.format(
cmd=self.full_cmd,
stdout=exc_stdout.decode(DEFAULT_ENCODING, "replace"),
stderr=exc_stderr.decode(DEFAULT_ENCODING, "replace")
)
super(ErrorReturnCode, self).__init__(msg)
class SignalException(ErrorReturnCode): pass
class TimeoutException(Exception):
""" the exception thrown when a command is killed because a specified
timeout (via _timeout) was hit """
def __init__(self, exit_code):
self.exit_code = exit_code
super(Exception, self).__init__()
SIGNALS_THAT_SHOULD_THROW_EXCEPTION = set((
signal.SIGABRT,
signal.SIGBUS,
signal.SIGFPE,
signal.SIGILL,
signal.SIGINT,
signal.SIGKILL,
signal.SIGPIPE,
signal.SIGQUIT,
signal.SIGSEGV,
signal.SIGTERM,
signal.SIGSYS,
))
# we subclass AttributeError because:
# https://github.com/ipython/ipython/issues/2577
# https://github.com/amoffat/sh/issues/97#issuecomment-10610629
class CommandNotFound(AttributeError): pass
rc_exc_regex = re.compile("(ErrorReturnCode|SignalException)_((\d+)|SIG[a-zA-Z]+)")
rc_exc_cache = {}
SIGNAL_MAPPING = {}
for k, v in signal.__dict__.items():
if re.match(r"SIG[a-zA-Z]+", k):
SIGNAL_MAPPING[v] = k
def get_exc_from_name(name):
""" takes an exception name, like:
ErrorReturnCode_1
SignalException_9
SignalException_SIGHUP
and returns the corresponding exception. this is primarily used for
importing exceptions from sh into user code, for instance, to capture those
exceptions """
exc = None
try:
return rc_exc_cache[name]
except KeyError:
m = rc_exc_regex.match(name)
if m:
base = m.group(1)
rc_or_sig_name = m.group(2)
if base == "SignalException":
try:
rc = -int(rc_or_sig_name)
except ValueError:
rc = -getattr(signal, rc_or_sig_name)
else:
rc = int(rc_or_sig_name)
exc = get_rc_exc(rc)
return exc
def get_rc_exc(rc):
""" takes a exit code or negative signal number and produces an exception
that corresponds to that return code. positive return codes yield
ErrorReturnCode exception, negative return codes yield SignalException
we also cache the generated exception so that only one signal of that type
exists, preserving identity """
try:
return rc_exc_cache[rc]
except KeyError:
pass
if rc > 0:
name = "ErrorReturnCode_%d" % rc
base = ErrorReturnCode
else:
signame = SIGNAL_MAPPING[abs(rc)]
name = "SignalException_" + signame
base = SignalException
exc = ErrorReturnCodeMeta(name, (base,), {"exit_code": rc})
rc_exc_cache[rc] = exc
return exc
# we monkey patch glob. i'm normally generally against monkey patching, but i
# decided to do this really un-intrusive patch because we need a way to detect
# if a list that we pass into an sh command was generated from glob. the reason
# being that glob returns an empty list if a pattern is not found, and so
# commands will treat the empty list as no arguments, which can be a problem,
# ie:
#
# ls(glob("*.ojfawe"))
#
# ^ will show the contents of your home directory, because it's essentially
# running ls([]) which, as a process, is just "ls".
#
# so we subclass list and monkey patch the glob function. nobody should be the
# wiser, but we'll have results that we can make some determinations on
_old_glob = glob_module.glob
class GlobResults(list):
def __init__(self, path, results):
self.path = path
list.__init__(self, results)
def glob(path, *args, **kwargs):
expanded = GlobResults(path, _old_glob(path, *args, **kwargs))
return expanded
glob_module.glob = glob
def which(program, paths=None):
""" takes a program name or full path, plus an optional collection of search
paths, and returns the full path of the requested executable. if paths is
specified, it is the entire list of search paths, and the PATH env is not
used at all. otherwise, PATH env is used to look for the program """
def is_exe(fpath):
return (os.path.exists(fpath) and
os.access(fpath, os.X_OK) and
os.path.isfile(os.path.realpath(fpath)))
found_path = None
fpath, fname = os.path.split(program)
# if there's a path component, then we've specified a path to the program,
# and we should just test if that program is executable. if it is, return
if fpath:
program = os.path.abspath(os.path.expanduser(program))
if is_exe(program):
found_path = program
# otherwise, we've just passed in the program name, and we need to search
# the paths to find where it actually lives
else:
paths_to_search = []
if isinstance(paths, (tuple, list)):
paths_to_search.extend(paths)
else:
env_paths = os.environ.get("PATH", "").split(os.pathsep)
paths_to_search.extend(env_paths)
for path in paths_to_search:
exe_file = os.path.join(path, program)
if is_exe(exe_file):
found_path = exe_file
break
return found_path
def resolve_command_path(program):
path = which(program)
if not path:
# our actual command might have a dash in it, but we can't call
# that from python (we have to use underscores), so we'll check
# if a dash version of our underscore command exists and use that
# if it does
if "_" in program:
path = which(program.replace("_", "-"))
if not path:
return None
return path
def resolve_command(name, baked_args=None):
path = resolve_command_path(name)
cmd = None
if path:
cmd = Command(path)
if baked_args:
cmd = cmd.bake(**baked_args)
return cmd
class Logger(object):
""" provides a memory-inexpensive logger. a gotcha about python's builtin
logger is that logger objects are never garbage collected. if you create a
thousand loggers with unique names, they'll sit there in memory until your
script is done. with sh, it's easy to create loggers with unique names if
we want our loggers to include our command arguments. for example, these
are all unique loggers:
ls -l
ls -l /tmp
ls /tmp
so instead of creating unique loggers, and without sacrificing logging
output, we use this class, which maintains as part of its state, the logging
"context", which will be the very unique name. this allows us to get a
logger with a very general name, eg: "command", and have a unique name
appended to it via the context, eg: "ls -l /tmp" """
def __init__(self, name, context=None):
self.name = name
self.log = logging.getLogger("%s.%s" % (SH_LOGGER_NAME, name))
self.set_context(context)
def _format_msg(self, msg, *args):
if self.context:
msg = "%s: %s" % (self.context, msg)
return msg % args
def set_context(self, context):
if context:
context = context.replace("%", "%%")
self.context = context or ""
def get_child(self, name, context):
new_name = self.name + "." + name
new_context = self.context + "." + context
l = Logger(new_name, new_context)
return l
def info(self, msg, *args):
self.log.info(self._format_msg(msg, *args))
def debug(self, msg, *args):
self.log.debug(self._format_msg(msg, *args))
def error(self, msg, *args):
self.log.error(self._format_msg(msg, *args))
def exception(self, msg, *args):
self.log.exception(self._format_msg(msg, *args))
def default_logger_str(cmd, call_args, pid=None):
if pid:
s = "<Command %r, pid %d>" % (cmd, pid)
else:
s = "<Command %r>" % cmd
return s
class RunningCommand(object):
""" this represents an executing Command object. it is returned as the
result of __call__() being executed on a Command instance. this creates a
reference to a OProc instance, which is a low-level wrapper around the
process that was exec'd
this is the class that gets manipulated the most by user code, and so it
implements various convenience methods and logical mechanisms for the
underlying process. for example, if a user tries to access a
backgrounded-process's stdout/err, the RunningCommand object is smart enough
to know to wait() on the process to finish first. and when the process
finishes, RunningCommand is smart enough to translate exit codes to
exceptions. """
# these are attributes that we allow to passthrough to OProc for
_OProc_attr_whitelist = set((
"signal",
"terminate",
"kill",
"kill_group",
"signal_group",
"pid",
"sid",
"pgid",
"ctty",
"input_thread_exc",
"output_thread_exc",
"bg_thread_exc",
))
def __init__(self, cmd, call_args, stdin, stdout, stderr):
"""
cmd is an array, where each element is encoded as bytes (PY3) or str
(PY2)
"""
# self.ran is used for auditing what actually ran. for example, in
# exceptions, or if you just want to know what was ran after the
# command ran
#
# here we're making a consistent unicode string out if our cmd.
# we're also assuming (correctly, i think) that the command and its
# arguments are the encoding we pass into _encoding, which falls back to
# the system's encoding
enc = call_args["encoding"]
self.ran = " ".join([arg.decode(enc, "ignore") for arg in cmd])
self.call_args = call_args
self.cmd = cmd
self.process = None
self._process_completed = False
should_wait = True
spawn_process = True
# this is used to track if we've already raised StopIteration, and if we
# have, raise it immediately again if the user tries to call next() on
# us. https://github.com/amoffat/sh/issues/273
self._stopped_iteration = False
# with contexts shouldn't run at all yet, they prepend
# to every command in the context
if call_args["with"]:
spawn_process = False
get_prepend_stack().append(self)
if call_args["piped"] or call_args["iter"] or call_args["iter_noblock"]:
should_wait = False
# we're running in the background, return self and let us lazily
# evaluate
if call_args["bg"]:
should_wait = False
# redirection
if call_args["err_to_out"]:
stderr = OProc.STDOUT
done_callback = call_args["done"]
if done_callback:
call_args["done"] = partial(done_callback, self)
# set up which stream should write to the pipe
# TODO, make pipe None by default and limit the size of the Queue
# in oproc.OProc
pipe = OProc.STDOUT
if call_args["iter"] == "out" or call_args["iter"] is True:
pipe = OProc.STDOUT
elif call_args["iter"] == "err":
pipe = OProc.STDERR
if call_args["iter_noblock"] == "out" or call_args["iter_noblock"] is True:
pipe = OProc.STDOUT
elif call_args["iter_noblock"] == "err":
pipe = OProc.STDERR
# there's currently only one case where we wouldn't spawn a child
# process, and that's if we're using a with-context with our command
self._spawned_and_waited = False
if spawn_process:
log_str_factory = call_args["log_msg"] or default_logger_str
logger_str = log_str_factory(self.ran, call_args)
self.log = Logger("command", logger_str)
self.log.info("starting process")
if should_wait:
self._spawned_and_waited = True
# this lock is needed because of a race condition where a background
# thread, created in the OProc constructor, may try to access
# self.process, but it has not been assigned yet
process_assign_lock = threading.Lock()
with process_assign_lock:
self.process = OProc(self, self.log, cmd, stdin, stdout, stderr,
self.call_args, pipe, process_assign_lock)
logger_str = log_str_factory(self.ran, call_args, self.process.pid)
self.log.set_context(logger_str)
self.log.info("process started")
if should_wait:
self.wait()
def wait(self):
""" waits for the running command to finish. this is called on all
running commands, eventually, except for ones that run in the background
"""
if not self._process_completed:
self._process_completed = True
exit_code = self.process.wait()
if self.process.timed_out:
# if we timed out, our exit code represents a signal, which is
# negative, so let's make it positive to store in our
# TimeoutException
raise TimeoutException(-exit_code)
else:
self.handle_command_exit_code(exit_code)
# if an iterable command is using an instance of OProc for its stdin,
# wait on it. the process is probably set to "piped", which means it
# won't be waited on, which means exceptions won't propagate up to the
# main thread. this allows them to bubble up
if self.process._stdin_process:
self.process._stdin_process.command.wait()
self.log.info("process completed")
return self
def handle_command_exit_code(self, code):
""" here we determine if we had an exception, or an error code that we
weren't expecting to see. if we did, we create and raise an exception
"""
ca = self.call_args
exc_class = get_exc_exit_code_would_raise(code, ca["ok_code"],
ca["piped"])
if exc_class:
exc = exc_class(self.ran, self.process.stdout, self.process.stderr,
ca["truncate_exc"])
raise exc
@property
def stdout(self):
self.wait()
return self.process.stdout
@property
def stderr(self):
self.wait()
return self.process.stderr
@property
def exit_code(self):
self.wait()
return self.process.exit_code
def __len__(self):
return len(str(self))
def __enter__(self):
""" we don't actually do anything here because anything that should have
been done would have been done in the Command.__call__ call.
essentially all that has to happen is the comand be pushed on the
prepend stack. """
pass
def __iter__(self):
return self
def next(self):
""" allow us to iterate over the output of our command """
if self._stopped_iteration:
raise StopIteration()
# we do this because if get blocks, we can't catch a KeyboardInterrupt
# so the slight timeout allows for that.
while True:
try:
chunk = self.process._pipe_queue.get(True, 0.001)
except Empty:
if self.call_args["iter_noblock"]:
return errno.EWOULDBLOCK
else:
if chunk is None:
self.wait()
self._stopped_iteration = True
raise StopIteration()
try:
return chunk.decode(self.call_args["encoding"],
self.call_args["decode_errors"])
except UnicodeDecodeError:
return chunk
# python 3
__next__ = next
def __exit__(self, typ, value, traceback):
if self.call_args["with"] and get_prepend_stack():
get_prepend_stack().pop()
def __str__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
if IS_PY3:
return self.__unicode__()
else:
return unicode(self).encode(self.call_args["encoding"])
def __unicode__(self):
""" a magic method defined for python2. calling unicode() on a
RunningCommand object will call this """
if self.process and self.stdout:
return self.stdout.decode(self.call_args["encoding"],
self.call_args["decode_errors"])
elif IS_PY3:
return ""
else:
return unicode("")
def __eq__(self, other):
return unicode(self) == unicode(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __contains__(self, item):
return item in str(self)
def __getattr__(self, p):
# let these three attributes pass through to the OProc object
if p in self._OProc_attr_whitelist:
if self.process:
return getattr(self.process, p)
else:
raise AttributeError
# see if strings have what we're looking for. we're looking at the
# method names explicitly because we don't want to evaluate self unless
# we absolutely have to, the reason being, in python2, hasattr swallows
# exceptions, and if we try to run hasattr on a command that failed and
# is being run with _iter=True, the command will be evaluated, throw an
# exception, but hasattr will discard it
if p in _unicode_methods:
return getattr(unicode(self), p)
raise AttributeError
def __repr__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
try:
return str(self)
except UnicodeDecodeError:
if self.process:
if self.stdout:
return repr(self.stdout)
return repr("")
def __long__(self):
return long(str(self).strip())
def __float__(self):
return float(str(self).strip())
def __int__(self):
return int(str(self).strip())
def output_redirect_is_filename(out):
return isinstance(out, basestring)
def get_prepend_stack():
tl = Command.thread_local
if not hasattr(tl, "_prepend_stack"):
tl._prepend_stack = []
return tl._prepend_stack
def special_kwarg_validator(kwargs, invalid_list):
s1 = set(kwargs.keys())
invalid_args = []
for args in invalid_list:
if callable(args):
fn = args
ret = fn(kwargs)
invalid_args.extend(ret)
else:
args, error_msg = args
if s1.issuperset(args):
invalid_args.append((args, error_msg))
return invalid_args
def get_fileno(ob):
# in py2, this will return None. in py3, it will return an method that
# raises when called
fileno_meth = getattr(ob, "fileno", None)
fileno = None
if fileno_meth:
# py3 StringIO objects will report a fileno, but calling it will raise
# an exception
try:
fileno = fileno_meth()
except UnsupportedOperation:
pass
elif isinstance(ob, (int, long)) and ob >= 0:
fileno = ob
return fileno
def ob_is_tty(ob):
""" checks if an object (like a file-like object) is a tty. """
fileno = get_fileno(ob)
is_tty = False
if fileno:
is_tty = os.isatty(fileno)
return is_tty
def ob_is_pipe(ob):
fileno = get_fileno(ob)
is_pipe = False
if fileno:
fd_stat = os.fstat(fileno)
is_pipe = stat.S_ISFIFO(fd_stat.st_mode)
return is_pipe
def tty_in_validator(kwargs):
pairs = (("tty_in", "in"), ("tty_out", "out"))
invalid = []
for tty, std in pairs:
if tty in kwargs and ob_is_tty(kwargs.get(std, None)):
args = (tty, std)
error = "`_%s` is a TTY already, so so it doesn't make sense \
to set up a TTY with `_%s`" % (std, tty)
invalid.append((args, error))
return invalid
def bufsize_validator(kwargs):
""" a validator to prevent a user from saying that they want custom
buffering when they're using an in/out object that will be os.dup'd to the
process, and has its own buffering. an example is a pipe or a tty. it
doesn't make sense to tell them to have a custom buffering, since the os
controls this. """
invalid = []
in_ob = kwargs.get("in", None)
out_ob = kwargs.get("out", None)
in_buf = kwargs.get("in_bufsize", None)
out_buf = kwargs.get("out_bufsize", None)
in_no_buf = ob_is_tty(in_ob) or ob_is_pipe(in_ob)
out_no_buf = ob_is_tty(out_ob) or ob_is_pipe(out_ob)
err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY"
if in_no_buf and in_buf is not None:
invalid.append((("in", "in_bufsize"), err.format(target="in")))
if out_no_buf and out_buf is not None:
invalid.append((("out", "out_bufsize"), err.format(target="out")))
return invalid
class Command(object):
""" represents an un-run system program, like "ls" or "cd". because it
represents the program itself (and not a running instance of it), it should
hold very little state. in fact, the only state it does hold is baked
arguments.
when a Command object is called, the result that is returned is a
RunningCommand object, which represents the Command put into an execution
state. """
thread_local = threading.local()
_call_args = {
"fg": False, # run command in foreground
# run a command in the background. commands run in the background
# ignore SIGHUP and do not automatically exit when the parent process
# ends
"bg": False,
# automatically report exceptions for background commands
"bg_exc": True,
"with": False, # prepend the command to every command after it
"in": None,
"out": None, # redirect STDOUT
"err": None, # redirect STDERR
"err_to_out": None, # redirect STDERR to STDOUT
# stdin buffer size
# 1 for line, 0 for unbuffered, any other number for that amount
"in_bufsize": 0,
# stdout buffer size, same values as above
"out_bufsize": 1,
"err_bufsize": 1,
# this is how big the output buffers will be for stdout and stderr.
# this is essentially how much output they will store from the process.
# we use a deque, so if it overflows past this amount, the first items
# get pushed off as each new item gets added.
#
# NOTICE
# this is not a *BYTE* size, this is a *CHUNK* size...meaning, that if
# you're buffering out/err at 1024 bytes, the internal buffer size will
# be "internal_bufsize" CHUNKS of 1024 bytes
"internal_bufsize": 3 * 1024 ** 2,
"env": None,
"piped": None,
"iter": None,
"iter_noblock": None,
"ok_code": 0,
"cwd": None,
# the separator delimiting between a long-argument's name and its value
# setting this to None will cause name and value to be two separate
# arguments, like for short options
# for example, --arg=derp, '=' is the long_sep
"long_sep": "=",
# the prefix used for long arguments
"long_prefix": "--",
# this is for programs that expect their input to be from a terminal.
# ssh is one of those programs
"tty_in": False,
"tty_out": True,
"encoding": DEFAULT_ENCODING,
"decode_errors": "strict",
# how long the process should run before it is auto-killed
"timeout": None,
"timeout_signal": signal.SIGKILL,
# TODO write some docs on "long-running processes"
# these control whether or not stdout/err will get aggregated together
# as the process runs. this has memory usage implications, so sometimes
# with long-running processes with a lot of data, it makes sense to
# set these to true
"no_out": False,
"no_err": False,
"no_pipe": False,
# if any redirection is used for stdout or stderr, internal buffering
# of that data is not stored. this forces it to be stored, as if
# the output is being T'd to both the redirected destination and our
# internal buffers
"tee": None,
# will be called when a process terminates regardless of exception
"done": None,
# a tuple (rows, columns) of the desired size of both the stdout and
# stdin ttys, if ttys are being used
"tty_size": (20, 80),
# whether or not our exceptions should be truncated
"truncate_exc": True,
# a function to call after the child forks but before the process execs
"preexec_fn": None,
# UID to set after forking. Requires root privileges. Not supported on
# Windows.
"uid": None,
# put the forked process in its own process session?
"new_session": True,
# pre-process args passed into __call__. only really useful when used
# in .bake()
"arg_preprocess": None,
# a callable that produces a log message from an argument tuple of the
# command and the args
"log_msg": None,
}
# this is a collection of validators to make sure the special kwargs make
# sense
_kwarg_validators = (
(("fg", "bg"), "Command can't be run in the foreground and background"),
(("fg", "err_to_out"), "Can't redirect STDERR in foreground mode"),
(("err", "err_to_out"), "Stderr is already being redirected"),
(("piped", "iter"), "You cannot iterate when this command is being piped"),
(("piped", "no_pipe"), "Using a pipe doesn't make sense if you've \
disabled the pipe"),
(("no_out", "iter"), "You cannot iterate over output if there is no \
output"),
tty_in_validator,
bufsize_validator,
)
def __init__(self, path, search_paths=None):
found = which(path, search_paths)
self._path = encode_to_py3bytes_or_py2str("")
# is the command baked (aka, partially applied)?
self._partial = False
self._partial_baked_args = []
self._partial_call_args = {}
# bugfix for functools.wraps. issue #121
self.__name__ = str(self)
if not found:
raise CommandNotFound(path)
# the reason why we set the values early in the constructor, and again
# here, is for people who have tools that inspect the stack on
# exception. if CommandNotFound is raised, we need self._path and the
# other attributes to be set correctly, so repr() works when they're
# inspecting the stack. issue #304
self._path = encode_to_py3bytes_or_py2str(found)
self.__name__ = str(self)
def __getattribute__(self, name):
# convenience
getattr = partial(object.__getattribute__, self)
val = None
if name.startswith("_"):
val = getattr(name)
elif name == "bake":
val = getattr("bake")
# here we have a way of getting past shadowed subcommands. for example,
# if "git bake" was a thing, we wouldn't be able to do `git.bake()`
# because `.bake()` is already a method. so we allow `git.bake_()`
elif name.endswith("_"):
name = name[:-1]
if val is None:
val = getattr("bake")(name)
return val
@staticmethod
def _extract_call_args(kwargs):
""" takes kwargs that were passed to a command's __call__ and extracts
out the special keyword arguments, we return a tuple of special keyword
args, and kwargs that will go to the execd command """
kwargs = kwargs.copy()
call_args = {}
for parg, default in Command._call_args.items():
key = "_" + parg
if key in kwargs:
call_args[parg] = kwargs[key]
del kwargs[key]
invalid_kwargs = special_kwarg_validator(call_args,
Command._kwarg_validators)
if invalid_kwargs:
exc_msg = []
for args, error_msg in invalid_kwargs:
exc_msg.append(" %r: %s" % (args, error_msg))
exc_msg = "\n".join(exc_msg)
raise TypeError("Invalid special arguments:\n\n%s\n" % exc_msg)
return call_args, kwargs
# TODO needs documentation
def bake(self, *args, **kwargs):
fn = type(self)(self._path)
fn._partial = True
call_args, kwargs = self._extract_call_args(kwargs)
pruned_call_args = call_args
for k, v in Command._call_args.items():
try:
if pruned_call_args[k] == v:
del pruned_call_args[k]
except KeyError:
continue
fn._partial_call_args.update(self._partial_call_args)
fn._partial_call_args.update(pruned_call_args)
fn._partial_baked_args.extend(self._partial_baked_args)
sep = pruned_call_args.get("long_sep", self._call_args["long_sep"])
prefix = pruned_call_args.get("long_prefix",
self._call_args["long_prefix"])
fn._partial_baked_args.extend(compile_args(args, kwargs, sep, prefix))
return fn
def __str__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
if IS_PY3:
return self.__unicode__()
else:
return self.__unicode__().encode(DEFAULT_ENCODING)
def __eq__(self, other):
return str(self) == str(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __repr__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
return "<Command %r>" % str(self)
def __unicode__(self):
""" a magic method defined for python2. calling unicode() on a
self will call this """
baked_args = " ".join(item.decode(DEFAULT_ENCODING) for item in self._partial_baked_args)
if baked_args:
baked_args = " " + baked_args
return self._path.decode(DEFAULT_ENCODING) + baked_args
def __enter__(self):
self(_with=True)
def __exit__(self, typ, value, traceback):
get_prepend_stack().pop()
def __call__(self, *args, **kwargs):
kwargs = kwargs.copy()
args = list(args)
# this will hold our final command, including arguments, that will be
# execd
cmd = []
# this will hold a complete mapping of all our special keyword arguments
# and their values
call_args = Command._call_args.copy()
# aggregate any 'with' contexts
for prepend in get_prepend_stack():
pcall_args = prepend.call_args.copy()
# don't pass the 'with' call arg
pcall_args.pop("with", None)
call_args.update(pcall_args)
cmd.extend(prepend.cmd)
cmd.append(self._path)
# do we have an argument pre-processor? if so, run it. we need to do
# this early, so that args, kwargs are accurate
preprocessor = self._partial_call_args.get("arg_preprocess", None)
if preprocessor:
args, kwargs = preprocessor(args, kwargs)
# here we extract the special kwargs and override any
# special kwargs from the possibly baked command
extracted_call_args, kwargs = self._extract_call_args(kwargs)
call_args.update(self._partial_call_args)
call_args.update(extracted_call_args)
# handle a None. this is added back only to not break the api in the
# 1.* version. TODO remove this in 2.0, as "ok_code", if specified,
# should always be a definitive value or list of values, and None is
# ambiguous
if call_args["ok_code"] is None:
call_args["ok_code"] = 0
if not getattr(call_args["ok_code"], "__iter__", None):
call_args["ok_code"] = [call_args["ok_code"]]
# check if we're piping via composition
stdin = call_args["in"]
if args:
first_arg = args.pop(0)
if isinstance(first_arg, RunningCommand):
if first_arg.call_args["piped"]:
stdin = first_arg.process
else:
stdin = first_arg.process._pipe_queue
else:
args.insert(0, first_arg)
processed_args = compile_args(args, kwargs, call_args["long_sep"],
call_args["long_prefix"])
# makes sure our arguments are broken up correctly
split_args = self._partial_baked_args + processed_args
final_args = split_args
cmd.extend(final_args)
# if we're running in foreground mode, we need to completely bypass
# launching a RunningCommand and OProc and just do a spawn
if call_args["fg"]:
if call_args["env"] is None:
launch = lambda: os.spawnv(os.P_WAIT, cmd[0], cmd)
else:
launch = lambda: os.spawnve(os.P_WAIT, cmd[0], cmd, call_args["env"])
exit_code = launch()
exc_class = get_exc_exit_code_would_raise(exit_code,
call_args["ok_code"], call_args["piped"])
if exc_class:
if IS_PY3:
ran = " ".join([arg.decode(DEFAULT_ENCODING, "ignore") for arg in cmd])
else:
ran = " ".join(cmd)
exc = exc_class(ran, b"", b"", call_args["truncate_exc"])
raise exc
return None
# stdout redirection
stdout = call_args["out"]
if output_redirect_is_filename(stdout):
stdout = open(str(stdout), "wb")
# stderr redirection
stderr = call_args["err"]
if output_redirect_is_filename(stderr):
stderr = open(str(stderr), "wb")
return RunningCommand(cmd, call_args, stdin, stdout, stderr)
def compile_args(args, kwargs, sep, prefix):
""" takes args and kwargs, as they were passed into the command instance
being executed with __call__, and compose them into a flat list that
will eventually be fed into exec. example:
with this call:
sh.ls("-l", "/tmp", color="never")
this function receives
args = ['-l', '/tmp']
kwargs = {'color': 'never'}
and produces
['-l', '/tmp', '--color=never']
"""
processed_args = []
encode = encode_to_py3bytes_or_py2str
# aggregate positional args
for arg in args:
if isinstance(arg, (list, tuple)):
if isinstance(arg, GlobResults) and not arg:
arg = [arg.path]
for sub_arg in arg:
processed_args.append(encode(sub_arg))
elif isinstance(arg, dict):
processed_args += aggregate_keywords(arg, sep, prefix, raw=True)
else:
processed_args.append(encode(arg))
# aggregate the keyword arguments
processed_args += aggregate_keywords(kwargs, sep, prefix)
return processed_args
def aggregate_keywords(keywords, sep, prefix, raw=False):
""" take our keyword arguments, and a separator, and compose the list of
flat long (and short) arguments. example
{'color': 'never', 't': True, 'something': True} with sep '='
becomes
['--color=never', '-t', '--something']
the `raw` argument indicates whether or not we should leave the argument
name alone, or whether we should replace "_" with "-". if we pass in a
dictionary, like this:
sh.command({"some_option": 12})
then `raw` gets set to True, because we want to leave the key as-is, to
produce:
['--some_option=12']
but if we just use a command's kwargs, `raw` is False, which means this:
sh.command(some_option=12)
becomes:
['--some-option=12']
eessentially, using kwargs is a convenience, but it lacks the ability to
put a '-' in the name, so we do the replacement of '_' to '-' for you.
but when you really don't want that to happen, you should use a
dictionary instead with the exact names you want
"""
processed = []
encode = encode_to_py3bytes_or_py2str
for k, v in keywords.items():
# we're passing a short arg as a kwarg, example:
# cut(d="\t")
if len(k) == 1:
if v is not False:
processed.append(encode("-" + k))
if v is not True:
processed.append(encode(v))
# we're doing a long arg
else:
if not raw:
k = k.replace("_", "-")
if v is True:
processed.append(encode("--" + k))
elif v is False:
pass
elif sep is None or sep == " ":
processed.append(encode(prefix + k))
processed.append(encode(v))
else:
arg = encode("%s%s%s%s" % (prefix, k, sep, v))
processed.append(arg)
return processed
def _start_daemon_thread(fn, name, exc_queue, *args):
def wrap(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception as e:
exc_queue.put(e)
raise
thrd = threading.Thread(target=wrap, name=name, args=args)
thrd.daemon = True
thrd.start()
return thrd
def setwinsize(fd, rows_cols):
""" set the terminal size of a tty file descriptor. borrowed logic
from pexpect.py """
rows, cols = rows_cols
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(fd, TIOCSWINSZ, s)
def construct_streamreader_callback(process, handler):
""" here we're constructing a closure for our streamreader callback. this
is used in the case that we pass a callback into _out or _err, meaning we
want to our callback to handle each bit of output
we construct the closure based on how many arguments it takes. the reason
for this is to make it as easy as possible for people to use, without
limiting them. a new user will assume the callback takes 1 argument (the
data). as they get more advanced, they may want to terminate the process,
or pass some stdin back, and will realize that they can pass a callback of
more args """
# implied arg refers to the "self" that methods will pass in. we need to
# account for this implied arg when figuring out what function the user
# passed in based on number of args
implied_arg = 0
partial_args = 0
handler_to_inspect = handler
if isinstance(handler, partial):
partial_args = len(handler.args)
handler_to_inspect = handler.func
if inspect.ismethod(handler_to_inspect):
implied_arg = 1
num_args = get_num_args(handler_to_inspect)
else:
if inspect.isfunction(handler_to_inspect):
num_args = get_num_args(handler_to_inspect)
# is an object instance with __call__ method
else:
implied_arg = 1
num_args = get_num_args(handler_to_inspect.__call__)
net_args = num_args - implied_arg - partial_args
handler_args = ()
# just the chunk
if net_args == 1:
handler_args = ()
# chunk, stdin
if net_args == 2:
handler_args = (process.stdin,)
# chunk, stdin, process
elif net_args == 3:
# notice we're only storing a weakref, to prevent cyclic references
# (where the process holds a streamreader, and a streamreader holds a
# handler-closure with a reference to the process
handler_args = (process.stdin, weakref.ref(process))
def fn(chunk):
# this is pretty ugly, but we're evaluating the process at call-time,
# because it's a weakref
args = handler_args
if len(args) == 2:
args = (handler_args[0], handler_args[1]())
return handler(chunk, *args)
return fn
def get_exc_exit_code_would_raise(exit_code, ok_codes, sigpipe_ok):
exc = None
success = exit_code in ok_codes
bad_sig = -exit_code in SIGNALS_THAT_SHOULD_THROW_EXCEPTION
# if this is a piped command, SIGPIPE must be ignored by us and not raise an
# exception, since it's perfectly normal for the consumer of a process's
# pipe to terminate early
if sigpipe_ok and -exit_code == signal.SIGPIPE:
bad_sig = False
success = True
if not success or bad_sig:
exc = get_rc_exc(exit_code)
return exc
def handle_process_exit_code(exit_code):
""" this should only ever be called once for each child process """
# if we exited from a signal, let our exit code reflect that
if os.WIFSIGNALED(exit_code):
exit_code = -os.WTERMSIG(exit_code)
# otherwise just give us a normal exit code
elif os.WIFEXITED(exit_code):
exit_code = os.WEXITSTATUS(exit_code)
else:
raise RuntimeError("Unknown child exit status!")
return exit_code
def no_interrupt(syscall, *args, **kwargs):
""" a helper for making system calls immune to EINTR """
ret = None
while True:
try:
ret = syscall(*args, **kwargs)
except OSError as e:
if e.errno == errno.EINTR:
continue
else:
raise
else:
break
return ret
class OProc(object):
""" this class is instantiated by RunningCommand for a command to be exec'd.
it handles all the nasty business involved with correctly setting up the
input/output to the child process. it gets its name for subprocess.Popen
(process open) but we're calling ours OProc (open process) """
_default_window_size = (24, 80)
# used in redirecting
STDOUT = -1
STDERR = -2
def __init__(self, command, parent_log, cmd, stdin, stdout, stderr,
call_args, pipe, process_assign_lock):
"""
cmd is the full string that will be exec'd. it includes the program
name and all its arguments
stdin, stdout, stderr are what the child will use for standard
input/output/err
call_args is a mapping of all the special keyword arguments to apply
to the child process
"""
self.command = command
self.call_args = call_args
# convenience
ca = self.call_args
if ca["uid"] is not None:
if os.getuid() != 0:
raise RuntimeError("UID setting requires root privileges")
target_uid = ca["uid"]
pwrec = pwd.getpwuid(ca["uid"])
target_gid = pwrec.pw_gid
# I had issues with getting 'Input/Output error reading stdin' from dd,
# until I set _tty_out=False
if ca["piped"]:
ca["tty_out"] = False
self._stdin_process = None
# if the objects that we are passing to the OProc happen to be a
# file-like object that is a tty, for example `sys.stdin`, then, later
# on in this constructor, we're going to skip out on setting up pipes
# and pseudoterminals for those endpoints
stdin_is_tty_or_pipe = ob_is_tty(stdin) or ob_is_pipe(stdin)
stdout_is_tty_or_pipe = ob_is_tty(stdout) or ob_is_pipe(stdout)
stderr_is_tty_or_pipe = ob_is_tty(stderr) or ob_is_pipe(stderr)
tee_out = ca["tee"] in (True, "out")
tee_err = ca["tee"] == "err"
# if we're passing in a custom stdout/out/err value, we obviously have
# to force not using single_tty
custom_in_out_err = stdin or stdout or stderr
single_tty = (ca["tty_in"] and ca["tty_out"]) \
and not custom_in_out_err
# this logic is a little convoluted, but basically this top-level
# if/else is for consolidating input and output TTYs into a single
# TTY. this is the only way some secure programs like ssh will
# output correctly (is if stdout and stdin are both the same TTY)
if single_tty:
self._stdin_read_fd, self._stdin_write_fd = pty.openpty()
self._stdout_read_fd = os.dup(self._stdin_read_fd)
self._stdout_write_fd = os.dup(self._stdin_write_fd)
self._stderr_read_fd = os.dup(self._stdin_read_fd)
self._stderr_write_fd = os.dup(self._stdin_write_fd)
# do not consolidate stdin and stdout. this is the most common use-
# case
else:
# this check here is because we may be doing piping and so our stdin
# might be an instance of OProc
if isinstance(stdin, OProc) and stdin.call_args["piped"]:
self._stdin_write_fd = stdin._pipe_fd
self._stdin_read_fd = None
self._stdin_process = stdin
elif stdin_is_tty_or_pipe:
self._stdin_write_fd = os.dup(get_fileno(stdin))
self._stdin_read_fd = None
elif ca["tty_in"]:
self._stdin_read_fd, self._stdin_write_fd = pty.openpty()
# tty_in=False is the default
else:
self._stdin_write_fd, self._stdin_read_fd = os.pipe()
if stdout_is_tty_or_pipe and not tee_out:
self._stdout_write_fd = os.dup(get_fileno(stdout))
self._stdout_read_fd = None
# tty_out=True is the default
elif ca["tty_out"]:
self._stdout_read_fd, self._stdout_write_fd = pty.openpty()
else:
self._stdout_read_fd, self._stdout_write_fd = os.pipe()
# unless STDERR is going to STDOUT, it ALWAYS needs to be a pipe,
# and never a PTY. the reason for this is not totally clear to me,
# but it has to do with the fact that if STDERR isn't set as the
# CTTY (because STDOUT is), the STDERR buffer won't always flush
# by the time the process exits, and the data will be lost.
# i've only seen this on OSX.
if stderr is OProc.STDOUT:
# if stderr is going to stdout, but stdout is a tty or a pipe,
# we should not specify a read_fd, because stdout is dup'd
# directly to the stdout fd (no pipe), and so stderr won't have
# a slave end of a pipe either to dup
if stdout_is_tty_or_pipe and not tee_out:
self._stderr_read_fd = None
else:
self._stderr_read_fd = os.dup(self._stdout_read_fd)
self._stderr_write_fd = os.dup(self._stdout_write_fd)
elif stderr_is_tty_or_pipe and not tee_err:
self._stderr_write_fd = os.dup(get_fileno(stderr))
self._stderr_read_fd = None
else:
self._stderr_read_fd, self._stderr_write_fd = os.pipe()
piped = ca["piped"]
self._pipe_fd = None
if piped:
fd_to_use = self._stdout_read_fd
if piped == "err":
fd_to_use = self._stderr_read_fd
self._pipe_fd = os.dup(fd_to_use)
new_session = ca["new_session"]
needs_ctty = ca["tty_in"] and new_session
self.ctty = None
if needs_ctty:
self.ctty = os.ttyname(self._stdin_write_fd)
# this is a hack, but what we're doing here is intentionally throwing an
# OSError exception if our child processes's directory doesn't exist,
# but we're doing it BEFORE we fork. the reason for before the fork is
# error handling. i'm currently too lazy to implement what
# subprocess.py did and set up a error pipe to handle exceptions that
# happen in the child between fork and exec. it has only been seen in
# the wild for a missing cwd, so we'll handle it here.
cwd = ca["cwd"]
if cwd is not None and not os.path.exists(cwd):
os.chdir(cwd)
gc_enabled = gc.isenabled()
if gc_enabled:
gc.disable()
# for synchronizing
session_pipe_read, session_pipe_write = os.pipe()
exc_pipe_read, exc_pipe_write = os.pipe()
# this pipe is for synchronzing with the child that the parent has
# closed its in/out/err fds. this is a bug on OSX (but not linux),
# where we can lose output sometimes, due to a race, if we do
# os.close(self._stdout_write_fd) in the parent after the child starts
# writing.
if IS_OSX:
close_pipe_read, close_pipe_write = os.pipe()
# session id, group id, process id
self.sid = None
self.pgid = None
self.pid = os.fork()
# child
if self.pid == 0: # pragma: no cover
if IS_OSX:
os.read(close_pipe_read, 1)
os.close(close_pipe_read)
os.close(close_pipe_write)
try:
# ignoring SIGHUP lets us persist even after the parent process
# exits. only ignore if we're backgrounded
if ca["bg"] is True:
signal.signal(signal.SIGHUP, signal.SIG_IGN)
# python ignores SIGPIPE by default. we must make sure to put
# this behavior back to the default for spawned processes,
# otherwise SIGPIPE won't kill piped processes, which is what we
# need, so that we can check the error code of the killed
# process to see that SIGPIPE killed it
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# put our forked process in a new session? this will relinquish
# any control of our inherited CTTY and also make our parent
# process init
if new_session:
os.setsid()
# if we're not going in a new session, we should go in a new
# process group. this way, our process, and any children it
# spawns, are alone, contained entirely in one group. if we
# didn't do this, and didn't use a new session, then our exec'd
# process *could* exist in the same group as our python process,
# depending on how we launch the process (from a shell, or some
# other way)
else:
os.setpgrp()
sid = os.getsid(0)
pgid = os.getpgid(0)
payload = ("%d,%d" % (sid, pgid)).encode(DEFAULT_ENCODING)
os.write(session_pipe_write, payload)
if ca["tty_out"] and not stdout_is_tty_or_pipe and not single_tty:
# set raw mode, so there isn't any weird translation of
# newlines to \r\n and other oddities. we're not outputting
# to a terminal anyways
#
# we HAVE to do this here, and not in the parent process,
# because we have to guarantee that this is set before the
# child process is run, and we can't do it twice.
tty.setraw(self._stdout_write_fd)
# if the parent-side fd for stdin exists, close it. the case
# where it may not exist is if we're using piping
if self._stdin_read_fd:
os.close(self._stdin_read_fd)
if self._stdout_read_fd:
os.close(self._stdout_read_fd)
if self._stderr_read_fd:
os.close(self._stderr_read_fd)
os.close(session_pipe_read)
os.close(exc_pipe_read)
if cwd:
os.chdir(cwd)
os.dup2(self._stdin_write_fd, 0)
os.dup2(self._stdout_write_fd, 1)
os.dup2(self._stderr_write_fd, 2)
# set our controlling terminal, but only if we're using a tty
# for stdin. it doesn't make sense to have a ctty otherwise
if needs_ctty:
tmp_fd = os.open(os.ttyname(0), os.O_RDWR)
os.close(tmp_fd)
if ca["tty_out"] and not stdout_is_tty_or_pipe:
setwinsize(1, ca["tty_size"])
if ca["uid"] is not None:
os.setgid(target_gid)
os.setuid(target_uid)
preexec_fn = ca["preexec_fn"]
if callable(preexec_fn):
preexec_fn()
# don't inherit file descriptors
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
os.closerange(3, max_fd)
# actually execute the process
if ca["env"] is None:
os.execv(cmd[0], cmd)
else:
os.execve(cmd[0], cmd, ca["env"])
# we must ensure that we carefully exit the child process on
# exception, otherwise the parent process code will be executed
# twice on exception https://github.com/amoffat/sh/issues/202
#
# if your parent process experiences an exit code 255, it is most
# likely that an exception occurred between the fork of the child
# and the exec. this should be reported.
except:
# some helpful debugging
try:
tb = traceback.format_exc().encode("utf8", "ignore")
os.write(exc_pipe_write, tb)
finally:
os._exit(255)
# parent
else:
if gc_enabled:
gc.enable()
os.close(self._stdin_write_fd)
os.close(self._stdout_write_fd)
os.close(self._stderr_write_fd)
# tell our child process that we've closed our write_fds, so it is
# ok to proceed towards exec. see the comment where this pipe is
# opened, for why this is necessary
if IS_OSX:
os.close(close_pipe_read)
os.write(close_pipe_write, str(1).encode(DEFAULT_ENCODING))
os.close(close_pipe_write)
os.close(exc_pipe_write)
fork_exc = os.read(exc_pipe_read, 1024 ** 2)
os.close(exc_pipe_read)
if fork_exc:
fork_exc = fork_exc.decode(DEFAULT_ENCODING)
raise ForkException(fork_exc)
os.close(session_pipe_write)
sid, pgid = os.read(session_pipe_read,
1024).decode(DEFAULT_ENCODING).split(",")
os.close(session_pipe_read)
self.sid = int(sid)
self.pgid = int(pgid)
# used to determine what exception to raise. if our process was
# killed via a timeout counter, we'll raise something different than
# a SIGKILL exception
self.timed_out = False
self.started = time.time()
self.cmd = cmd
# exit code should only be manipulated from within self._wait_lock
# to prevent race conditions
self.exit_code = None
self.stdin = stdin or Queue()
# _pipe_queue is used internally to hand off stdout from one process
# to another. by default, all stdout from a process gets dumped
# into this pipe queue, to be consumed in real time (hence the
# thread-safe Queue), or at a potentially later time
self._pipe_queue = Queue()
# this is used to prevent a race condition when we're waiting for
# a process to end, and the OProc's internal threads are also checking
# for the processes's end
self._wait_lock = threading.Lock()
# these are for aggregating the stdout and stderr. we use a deque
# because we don't want to overflow
self._stdout = deque(maxlen=ca["internal_bufsize"])
self._stderr = deque(maxlen=ca["internal_bufsize"])
if ca["tty_in"] and not stdin_is_tty_or_pipe:
setwinsize(self._stdin_read_fd, ca["tty_size"])
self.log = parent_log.get_child("process", repr(self))
self.log.debug("started process")
# disable echoing, but only if it's a tty that we created ourselves
if ca["tty_in"] and not stdin_is_tty_or_pipe:
attr = termios.tcgetattr(self._stdin_read_fd)
attr[3] &= ~termios.ECHO
termios.tcsetattr(self._stdin_read_fd, termios.TCSANOW, attr)
# we're only going to create a stdin thread iff we have potential
# for stdin to come in. this would be through a stdout callback or
# through an object we've passed in for stdin
potentially_has_input = callable(stdout) or stdin
# this represents the connection from a Queue object (or whatever
# we're using to feed STDIN) to the process's STDIN fd
self._stdin_stream = None
if self._stdin_read_fd and potentially_has_input:
log = self.log.get_child("streamwriter", "stdin")
self._stdin_stream = StreamWriter(log, self._stdin_read_fd,
self.stdin, ca["in_bufsize"], ca["encoding"],
ca["tty_in"])
stdout_pipe = None
if pipe is OProc.STDOUT and not ca["no_pipe"]:
stdout_pipe = self._pipe_queue
# this represents the connection from a process's STDOUT fd to
# wherever it has to go, sometimes a pipe Queue (that we will use
# to pipe data to other processes), and also an internal deque
# that we use to aggregate all the output
save_stdout = not ca["no_out"] and \
(tee_out or stdout is None)
pipe_out = ca["piped"] in ("out", True)
pipe_err = ca["piped"] in ("err",)
# if we're piping directly into another process's filedescriptor, we
# bypass reading from the stdout stream altogether, because we've
# already hooked up this processes's stdout fd to the other
# processes's stdin fd
self._stdout_stream = None
if not pipe_out and self._stdout_read_fd:
if callable(stdout):
stdout = construct_streamreader_callback(self, stdout)
self._stdout_stream = \
StreamReader(
self.log.get_child("streamreader", "stdout"),
self._stdout_read_fd, stdout, self._stdout,
ca["out_bufsize"], ca["encoding"],
ca["decode_errors"], stdout_pipe,
save_data=save_stdout)
elif self._stdout_read_fd:
os.close(self._stdout_read_fd)
# if stderr is going to one place (because it's grouped with stdout,
# or we're dealing with a single tty), then we don't actually need a
# stream reader for stderr, because we've already set one up for
# stdout above
self._stderr_stream = None
if stderr is not OProc.STDOUT and not single_tty and not pipe_err \
and self._stderr_read_fd:
stderr_pipe = None
if pipe is OProc.STDERR and not ca["no_pipe"]:
stderr_pipe = self._pipe_queue
save_stderr = not ca["no_err"] and \
(ca["tee"] in ("err",) or stderr is None)
if callable(stderr):
stderr = construct_streamreader_callback(self, stderr)
self._stderr_stream = StreamReader(Logger("streamreader"),
self._stderr_read_fd, stderr, self._stderr,
ca["err_bufsize"], ca["encoding"], ca["decode_errors"],
stderr_pipe, save_data=save_stderr)
elif self._stderr_read_fd:
os.close(self._stderr_read_fd)
def timeout_fn():
self.timed_out = True
self.signal(ca["timeout_signal"])
self._timeout_event = None
self._timeout_timer = None
if ca["timeout"]:
self._timeout_event = threading.Event()
self._timeout_timer = threading.Timer(ca["timeout"],
self._timeout_event.set)
self._timeout_timer.start()
# this is for cases where we know that the RunningCommand that was
# launched was not .wait()ed on to complete. in those unique cases,
# we allow the thread that processes output to report exceptions in
# that thread. it's important that we only allow reporting of the
# exception, and nothing else (like the additional stuff that
# RunningCommand.wait() does), because we want the exception to be
# re-raised in the future, if we DO call .wait()
handle_exit_code = None
if not self.command._spawned_and_waited and ca["bg_exc"]:
def fn(exit_code):
with process_assign_lock:
return self.command.handle_command_exit_code(exit_code)
handle_exit_code = fn
self._quit_threads = threading.Event()
thread_name = "background thread for pid %d" % self.pid
self._bg_thread_exc_queue = Queue(1)
self._background_thread = _start_daemon_thread(background_thread,
thread_name, self._bg_thread_exc_queue, timeout_fn,
self._timeout_event, handle_exit_code, self.is_alive,
self._quit_threads)
# start the main io threads. stdin thread is not needed if we are
# connecting from another process's stdout pipe
self._input_thread = None
self._input_thread_exc_queue = Queue(1)
if self._stdin_stream:
close_before_term = not needs_ctty
thread_name = "STDIN thread for pid %d" % self.pid
self._input_thread = _start_daemon_thread(input_thread,
thread_name, self._input_thread_exc_queue, self.log,
self._stdin_stream, self.is_alive, self._quit_threads,
close_before_term)
# this event is for cases where the subprocess that we launch
# launches its OWN subprocess and dups the stdout/stderr fds to that
# new subprocess. in that case, stdout and stderr will never EOF,
# so our output_thread will never finish and will hang. this event
# prevents that hanging
self._stop_output_event = threading.Event()
self._output_thread_exc_queue = Queue(1)
thread_name = "STDOUT/ERR thread for pid %d" % self.pid
self._output_thread = _start_daemon_thread(output_thread,
thread_name, self._output_thread_exc_queue, self.log,
self._stdout_stream, self._stderr_stream,
self._timeout_event, self.is_alive, self._quit_threads,
self._stop_output_event)
def __repr__(self):
return "<Process %d %r>" % (self.pid, self.cmd[:500])
# these next 3 properties are primary for tests
@property
def output_thread_exc(self):
exc = None
try:
exc = self._output_thread_exc_queue.get(False)
except Empty:
pass
return exc
@property
def input_thread_exc(self):
exc = None
try:
exc = self._input_thread_exc_queue.get(False)
except Empty:
pass
return exc
@property
def bg_thread_exc(self):
exc = None
try:
exc = self._bg_thread_exc_queue.get(False)
except Empty:
pass
return exc
def change_in_bufsize(self, buf):
self._stdin_stream.stream_bufferer.change_buffering(buf)
def change_out_bufsize(self, buf):
self._stdout_stream.stream_bufferer.change_buffering(buf)
def change_err_bufsize(self, buf):
self._stderr_stream.stream_bufferer.change_buffering(buf)
@property
def stdout(self):
return "".encode(self.call_args["encoding"]).join(self._stdout)
@property
def stderr(self):
return "".encode(self.call_args["encoding"]).join(self._stderr)
def get_pgid(self):
""" return the CURRENT group id of the process. this differs from
self.pgid in that this refects the current state of the process, where
self.pgid is the group id at launch """
return os.getpgid(self.pid)
def get_sid(self):
""" return the CURRENT session id of the process. this differs from
self.sid in that this refects the current state of the process, where
self.sid is the session id at launch """
return os.getsid(self.pid)
def signal_group(self, sig):
self.log.debug("sending signal %d to group", sig)
os.killpg(self.get_pgid(), sig)
def signal(self, sig):
self.log.debug("sending signal %d", sig)
os.kill(self.pid, sig)
def kill_group(self):
self.log.debug("killing group")
self.signal_group(signal.SIGKILL)
def kill(self):
self.log.debug("killing")
self.signal(signal.SIGKILL)
def terminate(self):
self.log.debug("terminating")
self.signal(signal.SIGTERM)
def is_alive(self):
""" polls if our child process has completed, without blocking. this
method has side-effects, such as setting our exit_code, if we happen to
see our child exit while this is running """
if self.exit_code is not None:
return False, self.exit_code
# what we're doing here essentially is making sure that the main thread
# (or another thread), isn't calling .wait() on the process. because
# .wait() calls os.waitpid(self.pid, 0), we can't do an os.waitpid
# here...because if we did, and the process exited while in this
# thread, the main thread's os.waitpid(self.pid, 0) would raise OSError
# (because the process ended in another thread).
#
# so essentially what we're doing is, using this lock, checking if
# we're calling .wait(), and if we are, let .wait() get the exit code
# and handle the status, otherwise let us do it.
acquired = self._wait_lock.acquire(False)
if not acquired:
if self.exit_code is not None:
return False, self.exit_code
return True, self.exit_code
try:
# WNOHANG is just that...we're calling waitpid without hanging...
# essentially polling the process. the return result is (0, 0) if
# there's no process status, so we check that pid == self.pid below
# in order to determine how to proceed
pid, exit_code = no_interrupt(os.waitpid, self.pid, os.WNOHANG)
if pid == self.pid:
self.exit_code = handle_process_exit_code(exit_code)
self._process_just_ended()
return False, self.exit_code
# no child process
except OSError:
return False, self.exit_code
else:
return True, self.exit_code
finally:
self._wait_lock.release()
def _process_just_ended(self):
if self._timeout_timer:
self._timeout_timer.cancel()
done_callback = self.call_args["done"]
if done_callback:
success = self.exit_code in self.call_args["ok_code"]
done_callback(success, self.exit_code)
# this can only be closed at the end of the process, because it might be
# the CTTY, and closing it prematurely will send a SIGHUP. we also
# don't want to close it if there's a self._stdin_stream, because that
# is in charge of closing it also
if self._stdin_read_fd and not self._stdin_stream:
os.close(self._stdin_read_fd)
def wait(self):
""" waits for the process to complete, handles the exit code """
self.log.debug("acquiring wait lock to wait for completion")
# using the lock in a with-context blocks, which is what we want if
# we're running wait()
with self._wait_lock:
self.log.debug("got wait lock")
witnessed_end = False
if self.exit_code is None:
self.log.debug("exit code not set, waiting on pid")
pid, exit_code = no_interrupt(os.waitpid, self.pid, 0) # blocks
self.exit_code = handle_process_exit_code(exit_code)
witnessed_end = True
else:
self.log.debug("exit code already set (%d), no need to wait",
self.exit_code)
self._quit_threads.set()
# we may not have a thread for stdin, if the pipe has been connected
# via _piped="direct"
if self._input_thread:
self._input_thread.join()
# wait, then signal to our output thread that the child process is
# done, and we should have finished reading all the stdout/stderr
# data that we can by now
timer = threading.Timer(2.0, self._stop_output_event.set)
timer.start()
# wait for our stdout and stderr streamreaders to finish reading and
# aggregating the process output
self._output_thread.join()
timer.cancel()
self._background_thread.join()
if witnessed_end:
self._process_just_ended()
return self.exit_code
def input_thread(log, stdin, is_alive, quit, close_before_term):
""" this is run in a separate thread. it writes into our process's
stdin (a streamwriter) and waits the process to end AND everything that
can be written to be written """
done = False
closed = False
alive = True
poller = Poller()
poller.register_write(stdin)
while poller and alive:
changed = poller.poll(1)
for fd, events in changed:
if events & (POLLER_EVENT_WRITE | POLLER_EVENT_HUP):
log.debug("%r ready for more input", stdin)
done = stdin.write()
if done:
poller.unregister(stdin)
if close_before_term:
stdin.close()
closed = True
alive, _ = is_alive()
while alive:
quit.wait(1)
alive, _ = is_alive()
if not closed:
stdin.close()
def event_wait(ev, timeout=None):
triggered = ev.wait(timeout)
if IS_PY26:
triggered = ev.is_set()
return triggered
def background_thread(timeout_fn, timeout_event, handle_exit_code, is_alive,
quit):
""" handles the timeout logic """
# if there's a timeout event, loop
if timeout_event:
while not quit.is_set():
timed_out = event_wait(timeout_event, 0.1)
if timed_out:
timeout_fn()
break
# handle_exit_code will be a function ONLY if our command was NOT waited on
# as part of its spawning. in other words, it's probably a background
# command
#
# this reports the exit code exception in our thread. it's purely for the
# user's awareness, and cannot be caught or used in any way, so it's ok to
# suppress this during the tests
if handle_exit_code and not RUNNING_TESTS: # pragma: no cover
alive = True
while alive:
quit.wait(1)
alive, exit_code = is_alive()
handle_exit_code(exit_code)
def output_thread(log, stdout, stderr, timeout_event, is_alive, quit,
stop_output_event):
""" this function is run in a separate thread. it reads from the
process's stdout stream (a streamreader), and waits for it to claim that
its done """
poller = Poller()
if stdout is not None:
poller.register_read(stdout)
if stderr is not None:
poller.register_read(stderr)
# this is our poll loop for polling stdout or stderr that is ready to
# be read and processed. if one of those streamreaders indicate that it
# is done altogether being read from, we remove it from our list of
# things to poll. when no more things are left to poll, we leave this
# loop and clean up
while poller:
changed = no_interrupt(poller.poll, 0.1)
for f, events in changed:
if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP):
log.debug("%r ready to be read from", f)
done = f.read()
if done:
poller.unregister(f)
elif events & POLLER_EVENT_ERROR:
# for some reason, we have to just ignore streams that have had an
# error. i'm not exactly sure why, but don't remove this until we
# figure that out, and create a test for it
pass
if timeout_event and timeout_event.is_set():
break
if stop_output_event.is_set():
break
# we need to wait until the process is guaranteed dead before closing our
# outputs, otherwise SIGPIPE
alive, _ = is_alive()
while alive:
quit.wait(1)
alive, _ = is_alive()
if stdout:
stdout.close()
if stderr:
stderr.close()
class DoneReadingForever(Exception): pass
class NotYetReadyToRead(Exception): pass
def determine_how_to_read_input(input_obj):
""" given some kind of input object, return a function that knows how to
read chunks of that input object.
each reader function should return a chunk and raise a DoneReadingForever
exception, or return None, when there's no more data to read
NOTE: the function returned does not need to care much about the requested
buffering type (eg, unbuffered vs newline-buffered). the StreamBufferer
will take care of that. these functions just need to return a
reasonably-sized chunk of data. """
get_chunk = None
if isinstance(input_obj, Queue):
log_msg = "queue"
get_chunk = get_queue_chunk_reader(input_obj)
elif callable(input_obj):
log_msg = "callable"
get_chunk = get_callable_chunk_reader(input_obj)
# also handles stringio
elif hasattr(input_obj, "read"):
log_msg = "file descriptor"
get_chunk = get_file_chunk_reader(input_obj)
elif isinstance(input_obj, basestring):
log_msg = "string"
get_chunk = get_iter_string_reader(input_obj)
elif isinstance(input_obj, bytes):
log_msg = "bytes"
get_chunk = get_iter_string_reader(input_obj)
elif isinstance(input_obj, GeneratorType):
log_msg = "generator"
get_chunk = get_iter_chunk_reader(iter(input_obj))
else:
try:
it = iter(input_obj)
except TypeError:
raise Exception("unknown input object")
else:
log_msg = "general iterable"
get_chunk = get_iter_chunk_reader(it)
return get_chunk, log_msg
def get_queue_chunk_reader(stdin):
def fn():
try:
chunk = stdin.get(True, 0.1)
except Empty:
raise NotYetReadyToRead
if chunk is None:
raise DoneReadingForever
return chunk
return fn
def get_callable_chunk_reader(stdin):
def fn():
try:
data = stdin()
except DoneReadingForever:
raise
if not data:
raise DoneReadingForever
return data
return fn
def get_iter_string_reader(stdin):
""" return an iterator that returns a chunk of a string every time it is
called. notice that even though bufsize_type might be line buffered, we're
not doing any line buffering here. that's because our StreamBufferer
handles all buffering. we just need to return a reasonable-sized chunk. """
bufsize = 1024
iter_str = (stdin[i:i + bufsize] for i in range(0, len(stdin), bufsize))
return get_iter_chunk_reader(iter_str)
def get_iter_chunk_reader(stdin):
def fn():
try:
if IS_PY3:
chunk = stdin.__next__()
else:
chunk = stdin.next()
return chunk
except StopIteration:
raise DoneReadingForever
return fn
def get_file_chunk_reader(stdin):
bufsize = 1024
def fn():
# python 3.* includes a fileno on stringios, but accessing it throws an
# exception. that exception is how we'll know we can't do a poll on
# stdin
is_real_file = True
if IS_PY3:
try:
stdin.fileno()
except UnsupportedOperation:
is_real_file = False
# this poll is for files that may not yet be ready to read. we test
# for fileno because StringIO/BytesIO cannot be used in a poll
if is_real_file and hasattr(stdin, "fileno"):
poller = Poller()
poller.register_read(stdin)
changed = poller.poll(0.1)
ready = False
for fd, events in changed:
if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP):
ready = True
if not ready:
raise NotYetReadyToRead
chunk = stdin.read(bufsize)
if not chunk:
raise DoneReadingForever
else:
return chunk
return fn
def bufsize_type_to_bufsize(bf_type):
""" for a given bufsize type, return the actual bufsize we will read.
notice that although 1 means "newline-buffered", we're reading a chunk size
of 1024. this is because we have to read something. we let a
StreamBufferer instance handle splitting our chunk on newlines """
# newlines
if bf_type == 1:
bufsize = 1024
# unbuffered
elif bf_type == 0:
bufsize = 1
# or buffered by specific amount
else:
bufsize = bf_type
return bufsize
class StreamWriter(object):
""" StreamWriter reads from some input (the stdin param) and writes to a fd
(the stream param). the stdin may be a Queue, a callable, something with
the "read" method, a string, or an iterable """
def __init__(self, log, stream, stdin, bufsize_type, encoding, tty_in):
self.stream = stream
self.stdin = stdin
self.log = log
self.encoding = encoding
self.tty_in = tty_in
self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding)
self.get_chunk, log_msg = determine_how_to_read_input(stdin)
self.log.debug("parsed stdin as a %s", log_msg)
def fileno(self):
""" defining this allows us to do poll on an instance of this
class """
return self.stream
def write(self):
""" attempt to get a chunk of data to write to our child process's
stdin, then write it. the return value answers the questions "are we
done writing forever?" """
# get_chunk may sometimes return bytes, and sometimes return strings
# because of the nature of the different types of STDIN objects we
# support
try:
chunk = self.get_chunk()
if chunk is None:
raise DoneReadingForever
except DoneReadingForever:
self.log.debug("done reading")
if self.tty_in:
# EOF time
try:
char = termios.tcgetattr(self.stream)[6][termios.VEOF]
except:
char = chr(4).encode()
# normally, one EOF should be enough to signal to an program
# that is read()ing, to return 0 and be on your way. however,
# some programs are misbehaved, like python3.1 and python3.2.
# they don't stop reading sometimes after read() returns 0.
# this can be demonstrated with the following program:
#
# import sys
# sys.stdout.write(sys.stdin.read())
#
# then type 'a' followed by ctrl-d 3 times. in python
# 2.6,2.7,3.3,3.4,3.5,3.6, it only takes 2 ctrl-d to terminate.
# however, in python 3.1 and 3.2, it takes all 3.
#
# so here we send an extra EOF along, just in case. i don't
# believe it can hurt anything
os.write(self.stream, char)
os.write(self.stream, char)
return True
except NotYetReadyToRead:
self.log.debug("received no data")
return False
# if we're not bytes, make us bytes
if IS_PY3 and hasattr(chunk, "encode"):
chunk = chunk.encode(self.encoding)
for proc_chunk in self.stream_bufferer.process(chunk):
self.log.debug("got chunk size %d: %r", len(proc_chunk),
proc_chunk[:30])
self.log.debug("writing chunk to process")
try:
os.write(self.stream, proc_chunk)
except OSError:
self.log.debug("OSError writing stdin chunk")
return True
def close(self):
self.log.debug("closing, but flushing first")
chunk = self.stream_bufferer.flush()
self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
try:
if chunk:
os.write(self.stream, chunk)
except OSError:
pass
os.close(self.stream)
def determine_how_to_feed_output(handler, encoding, decode_errors):
if callable(handler):
process, finish = get_callback_chunk_consumer(handler, encoding,
decode_errors)
# in py3, this is used for bytes
elif isinstance(handler, (cStringIO, iocStringIO)):
process, finish = get_cstringio_chunk_consumer(handler)
# in py3, this is used for unicode
elif isinstance(handler, (StringIO, ioStringIO)):
process, finish = get_stringio_chunk_consumer(handler, encoding,
decode_errors)
elif hasattr(handler, "write"):
process, finish = get_file_chunk_consumer(handler)
else:
try:
handler = int(handler)
except (ValueError, TypeError):
process = lambda chunk: False
finish = lambda: None
else:
process, finish = get_fd_chunk_consumer(handler)
return process, finish
def get_fd_chunk_consumer(handler):
handler = fdopen(handler, "w", closefd=False)
return get_file_chunk_consumer(handler)
def get_file_chunk_consumer(handler):
encode = lambda chunk: chunk
if getattr(handler, "encoding", None):
encode = lambda chunk: chunk.decode(handler.encoding)
flush = lambda: None
if hasattr(handler, "flush"):
flush = handler.flush
def process(chunk):
handler.write(encode(chunk))
# we should flush on an fd. chunk is already the correctly-buffered
# size, so we don't need the fd buffering as well
flush()
return False
def finish():
flush()
return process, finish
def get_callback_chunk_consumer(handler, encoding, decode_errors):
def process(chunk):
# try to use the encoding first, if that doesn't work, send
# the bytes, because it might be binary
try:
chunk = chunk.decode(encoding, decode_errors)
except UnicodeDecodeError:
pass
return handler(chunk)
def finish():
pass
return process, finish
def get_cstringio_chunk_consumer(handler):
def process(chunk):
handler.write(chunk)
return False
def finish():
pass
return process, finish
def get_stringio_chunk_consumer(handler, encoding, decode_errors):
def process(chunk):
handler.write(chunk.decode(encoding, decode_errors))
return False
def finish():
pass
return process, finish
class StreamReader(object):
""" reads from some output (the stream) and sends what it just read to the
handler. """
def __init__(self, log, stream, handler, buffer, bufsize_type, encoding,
decode_errors, pipe_queue=None, save_data=True):
self.stream = stream
self.buffer = buffer
self.save_data = save_data
self.encoding = encoding
self.decode_errors = decode_errors
self.pipe_queue = None
if pipe_queue:
self.pipe_queue = weakref.ref(pipe_queue)
self.log = log
self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding,
self.decode_errors)
self.bufsize = bufsize_type_to_bufsize(bufsize_type)
self.process_chunk, self.finish_chunk_processor = \
determine_how_to_feed_output(handler, encoding, decode_errors)
self.should_quit = False
def fileno(self):
""" defining this allows us to do poll on an instance of this
class """
return self.stream
def close(self):
chunk = self.stream_bufferer.flush()
self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
if chunk:
self.write_chunk(chunk)
self.finish_chunk_processor()
if self.pipe_queue and self.save_data:
self.pipe_queue().put(None)
os.close(self.stream)
def write_chunk(self, chunk):
# in PY3, the chunk coming in will be bytes, so keep that in mind
if not self.should_quit:
self.should_quit = self.process_chunk(chunk)
if self.save_data:
self.buffer.append(chunk)
if self.pipe_queue:
self.log.debug("putting chunk onto pipe: %r", chunk[:30])
self.pipe_queue().put(chunk)
def read(self):
# if we're PY3, we're reading bytes, otherwise we're reading
# str
try:
chunk = no_interrupt(os.read, self.stream, self.bufsize)
except OSError as e:
self.log.debug("got errno %d, done reading", e.errno)
return True
if not chunk:
self.log.debug("got no chunk, done reading")
return True
self.log.debug("got chunk size %d: %r", len(chunk), chunk[:30])
for chunk in self.stream_bufferer.process(chunk):
self.write_chunk(chunk)
class StreamBufferer(object):
""" this is used for feeding in chunks of stdout/stderr, and breaking it up
into chunks that will actually be put into the internal buffers. for
example, if you have two processes, one being piped to the other, and you
want that, first process to feed lines of data (instead of the chunks
however they come in), OProc will use an instance of this class to chop up
the data and feed it as lines to be sent down the pipe """
def __init__(self, buffer_type, encoding=DEFAULT_ENCODING,
decode_errors="strict"):
# 0 for unbuffered, 1 for line, everything else for that amount
self.type = buffer_type
self.buffer = []
self.n_buffer_count = 0
self.encoding = encoding
self.decode_errors = decode_errors
# this is for if we change buffering types. if we change from line
# buffered to unbuffered, its very possible that our self.buffer list
# has data that was being saved up (while we searched for a newline).
# we need to use that up, so we don't lose it
self._use_up_buffer_first = False
# the buffering lock is used because we might change the buffering
# types from a different thread. for example, if we have a stdout
# callback, we might use it to change the way stdin buffers. so we
# lock
self._buffering_lock = threading.RLock()
self.log = Logger("stream_bufferer")
def change_buffering(self, new_type):
# TODO, when we stop supporting 2.6, make this a with context
self.log.debug("acquiring buffering lock for changing buffering")
self._buffering_lock.acquire()
self.log.debug("got buffering lock for changing buffering")
try:
if new_type == 0:
self._use_up_buffer_first = True
self.type = new_type
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for changing buffering")
def process(self, chunk):
# MAKE SURE THAT THE INPUT IS PY3 BYTES
# THE OUTPUT IS ALWAYS PY3 BYTES
# TODO, when we stop supporting 2.6, make this a with context
self.log.debug("acquiring buffering lock to process chunk (buffering: %d)", self.type)
self._buffering_lock.acquire()
self.log.debug("got buffering lock to process chunk (buffering: %d)", self.type)
try:
# unbuffered
if self.type == 0:
if self._use_up_buffer_first:
self._use_up_buffer_first = False
to_write = self.buffer
self.buffer = []
to_write.append(chunk)
return to_write
return [chunk]
# line buffered
elif self.type == 1:
total_to_write = []
nl = "\n".encode(self.encoding)
while True:
newline = chunk.find(nl)
if newline == -1:
break
chunk_to_write = chunk[:newline + 1]
if self.buffer:
chunk_to_write = b"".join(self.buffer) + chunk_to_write
self.buffer = []
self.n_buffer_count = 0
chunk = chunk[newline + 1:]
total_to_write.append(chunk_to_write)
if chunk:
self.buffer.append(chunk)
self.n_buffer_count += len(chunk)
return total_to_write
# N size buffered
else:
total_to_write = []
while True:
overage = self.n_buffer_count + len(chunk) - self.type
if overage >= 0:
ret = "".encode(self.encoding).join(self.buffer) + chunk
chunk_to_write = ret[:self.type]
chunk = ret[self.type:]
total_to_write.append(chunk_to_write)
self.buffer = []
self.n_buffer_count = 0
else:
self.buffer.append(chunk)
self.n_buffer_count += len(chunk)
break
return total_to_write
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for processing chunk (buffering: %d)", self.type)
def flush(self):
self.log.debug("acquiring buffering lock for flushing buffer")
self._buffering_lock.acquire()
self.log.debug("got buffering lock for flushing buffer")
try:
ret = "".encode(self.encoding).join(self.buffer)
self.buffer = []
return ret
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for flushing buffer")
def with_lock(lock):
def wrapped(fn):
fn = contextmanager(fn)
@contextmanager
def wrapped2(*args, **kwargs):
with lock:
with fn(*args, **kwargs):
yield
return wrapped2
return wrapped
@with_lock(PUSHD_LOCK)
def pushd(path):
""" pushd changes the actual working directory for the duration of the
context, unlike the _cwd arg this will work with other built-ins such as
sh.glob correctly """
orig_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(orig_path)
@contextmanager
def args(**kwargs):
""" allows us to temporarily override all the special keyword parameters in
a with context """
kwargs_str = ",".join(["%s=%r" % (k, v) for k, v in kwargs.items()])
raise DeprecationWarning("""
sh.args() has been deprecated because it was never thread safe. use the
following instead:
sh2 = sh({kwargs})
sh2.your_command()
or
sh2 = sh({kwargs})
from sh2 import your_command
your_command()
""".format(kwargs=kwargs_str))
class Environment(dict):
""" this allows lookups to names that aren't found in the global scope to be
searched for as a program name. for example, if "ls" isn't found in this
module's scope, we consider it a system program and try to find it.
we use a dict instead of just a regular object as the base class because the
exec() statement used in the run_repl requires the "globals" argument to be a
dictionary """
# this is a list of all of the names that the sh module exports that will
# not resolve to functions. we don't want to accidentally shadow real
# commands with functions/imports that we define in sh.py. for example,
# "import time" may override the time system program
whitelist = set([
"Command",
"RunningCommand",
"CommandNotFound",
"DEFAULT_ENCODING",
"DoneReadingForever",
"ErrorReturnCode",
"NotYetReadyToRead",
"SignalException",
"ForkException",
"TimeoutException",
"__project_url__",
"__version__",
"__file__",
"args",
"pushd",
"glob",
"contrib",
])
def __init__(self, globs, baked_args={}):
""" baked_args are defaults for the 'sh' execution context. for
example:
tmp = sh(_out=StringIO())
'out' would end up in here as an entry in the baked_args dict """
self.globs = globs
self.baked_args = baked_args
self.disable_whitelist = False
def __getitem__(self, k):
# if we first import "_disable_whitelist" from sh, we can import
# anything defined in the global scope of sh.py. this is useful for our
# tests
if k == "_disable_whitelist":
self.disable_whitelist = True
return None
# we're trying to import something real (maybe), see if it's in our
# global scope
if k in self.whitelist or self.disable_whitelist:
return self.globs[k]
# somebody tried to be funny and do "from sh import *"
if k == "__all__":
raise RuntimeError("Cannot import * from sh. \
Please import sh or import programs individually.")
# check if we're naming a dynamically generated ReturnCode exception
exc = get_exc_from_name(k)
if exc:
return exc
# https://github.com/ipython/ipython/issues/2577
# https://github.com/amoffat/sh/issues/97#issuecomment-10610629
if k.startswith("__") and k.endswith("__"):
raise AttributeError
# is it a custom builtin?
builtin = getattr(self, "b_" + k, None)
if builtin:
return builtin
# is it a command?
cmd = resolve_command(k, self.baked_args)
if cmd:
return cmd
# how about an environment variable?
# this check must come after testing if its a command, because on some
# systems, there are an environment variables that can conflict with
# command names.
# https://github.com/amoffat/sh/issues/238
try:
return os.environ[k]
except KeyError:
pass
# nothing found, raise an exception
raise CommandNotFound(k)
# methods that begin with "b_" are custom builtins and will override any
# program that exists in our path. this is useful for things like
# common shell builtins that people are used to, but which aren't actually
# full-fledged system binaries
def b_cd(self, path=None):
if path:
os.chdir(path)
else:
os.chdir(os.path.expanduser('~'))
def b_which(self, program, paths=None):
return which(program, paths)
class Contrib(ModuleType): # pragma: no cover
@classmethod
def __call__(cls, name):
def wrapper1(fn):
@property
def cmd_getter(self):
cmd = resolve_command(name)
if not cmd:
raise CommandNotFound(name)
new_cmd = fn(cmd)
return new_cmd
setattr(cls, name, cmd_getter)
return fn
return wrapper1
mod_name = __name__ + ".contrib"
contrib = Contrib(mod_name)
sys.modules[mod_name] = contrib
@contrib("git")
def git(orig): # pragma: no cover
""" most git commands play nicer without a TTY """
cmd = orig.bake(_tty_out=False)
return cmd
@contrib("sudo")
def sudo(orig): # pragma: no cover
""" a nicer version of sudo that uses getpass to ask for a password, or
allows the first argument to be a string password """
prompt = "[sudo] password for %s: " % getpass.getuser()
def stdin():
pw = getpass.getpass(prompt=prompt) + "\n"
yield pw
def process(args, kwargs):
password = kwargs.pop("password", None)
if password is None:
pass_getter = stdin()
else:
pass_getter = password.rstrip("\n") + "\n"
kwargs["_in"] = pass_getter
return args, kwargs
cmd = orig.bake("-S", _arg_preprocess=process)
return cmd
def run_repl(env): # pragma: no cover
banner = "\n>> sh v{version}\n>> https://github.com/amoffat/sh\n"
print(banner.format(version=__version__))
while True:
try:
line = raw_input("sh> ")
except (ValueError, EOFError):
break
try:
exec(compile(line, "<dummy>", "single"), env, env)
except SystemExit:
break
except:
print(traceback.format_exc())
# cleans up our last line
print("")
# this is a thin wrapper around THIS module (we patch sys.modules[__name__]).
# this is in the case that the user does a "from sh import whatever"
# in other words, they only want to import certain programs, not the whole
# system PATH worth of commands. in this case, we just proxy the
# import lookup to our Environment class
class SelfWrapper(ModuleType):
def __init__(self, self_module, baked_args={}):
# this is super ugly to have to copy attributes like this,
# but it seems to be the only way to make reload() behave
# nicely. if i make these attributes dynamic lookups in
# __getattr__, reload sometimes chokes in weird ways...
for attr in ["__builtins__", "__doc__", "__file__", "__name__", "__package__"]:
setattr(self, attr, getattr(self_module, attr, None))
# python 3.2 (2.7 and 3.3 work fine) breaks on osx (not ubuntu)
# if we set this to None. and 3.3 needs a value for __path__
self.__path__ = []
self.__self_module = self_module
self.__env = Environment(globals(), baked_args=baked_args)
def __getattr__(self, name):
return self.__env[name]
def __call__(self, **kwargs):
""" returns a new SelfWrapper object, where all commands spawned from it
have the baked_args kwargs set on them by default """
baked_args = self.__env.baked_args.copy()
baked_args.update(kwargs)
new_mod = self.__class__(self.__self_module, baked_args)
# inspect the line in the parent frame that calls and assigns the new sh
# variable, and get the name of the new variable we're assigning to.
# this is very brittle and pretty much a sin. but it works in 99% of
# the time and the tests pass
#
# the reason we need to do this is because we need to remove the old
# cached module from sys.modules. if we don't, it gets re-used, and any
# old baked params get used, which is not what we want
parent = inspect.stack()[1]
code = parent[4][0].strip()
parsed = ast.parse(code)
module_name = parsed.body[0].targets[0].id
if module_name == __name__:
raise RuntimeError("Cannot use the name 'sh' as an execution context")
sys.modules.pop(module_name, None)
return new_mod
def in_importlib(frame):
""" helper for checking if a filename is in importlib guts """
return frame.f_code.co_filename == "<frozen importlib._bootstrap>"
def register_importer():
""" registers our fancy importer that can let us import from a module name,
like:
import sh
tmp = sh()
from tmp import ls
"""
def test(importer):
return importer.__class__.__name__ == ModuleImporterFromVariables.__name__
already_registered = any([True for i in sys.meta_path if test(i)])
if not already_registered:
importer = ModuleImporterFromVariables(
restrict_to=["SelfWrapper"],
)
sys.meta_path.insert(0, importer)
return not already_registered
def fetch_module_from_frame(name, frame):
mod = frame.f_locals.get(name, frame.f_globals.get(name, None))
return mod
class ModuleImporterFromVariables(object):
""" a fancy importer that allows us to import from a variable that was
recently set in either the local or global scope, like this:
sh2 = sh(_timeout=3)
from sh2 import ls
"""
def __init__(self, restrict_to=None):
self.restrict_to = set(restrict_to or set())
def find_module(self, mod_fullname, path=None):
""" mod_fullname doubles as the name of the VARIABLE holding our new sh
context. for example:
derp = sh()
from derp import ls
here, mod_fullname will be "derp". keep that in mind as we go throug
the rest of this function """
parent_frame = inspect.currentframe().f_back
while in_importlib(parent_frame):
parent_frame = parent_frame.f_back
# this line is saying "hey, does mod_fullname exist as a name we've
# defind previously?" the purpose of this is to ensure that
# mod_fullname is really a thing we've defined. if we haven't defined
# it before, then we "can't" import from it
module = fetch_module_from_frame(mod_fullname, parent_frame)
if not module:
return None
# make sure it's a class we're allowed to import from
if module.__class__.__name__ not in self.restrict_to:
return None
return self
def load_module(self, mod_fullname):
parent_frame = inspect.currentframe().f_back
while in_importlib(parent_frame):
parent_frame = parent_frame.f_back
module = fetch_module_from_frame(mod_fullname, parent_frame)
# we HAVE to include the module in sys.modules, per the import PEP.
# older verions of python were more lenient about this being set, but
# not in >= python3.3, unfortunately. this requirement necessitates the
# ugly code in SelfWrapper.__call__
sys.modules[mod_fullname] = module
module.__loader__ = self
return module
def run_tests(env, locale, args, version, force_select, **extra_env): # pragma: no cover
py_version = "python"
py_version += str(version)
py_bin = which(py_version)
return_code = None
poller = "poll"
if force_select:
poller = "select"
if py_bin:
print("Testing %s, locale %r, poller: %s" % (py_version.capitalize(),
locale, poller))
env["SH_TESTS_USE_SELECT"] = str(int(force_select))
env["LANG"] = locale
for k, v in extra_env.items():
env[k] = str(v)
cmd = [py_bin, "-W", "ignore", os.path.join(THIS_DIR, "test.py")] + args[1:]
launch = lambda: os.spawnve(os.P_WAIT, cmd[0], cmd, env)
return_code = launch()
return return_code
# we're being run as a stand-alone script
if __name__ == "__main__": # pragma: no cover
def parse_args():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-e", "--envs", dest="envs", action="append")
parser.add_option("-l", "--locales", dest="constrain_locales", action="append")
options, args = parser.parse_args()
envs = options.envs or []
constrain_locales = options.constrain_locales or []
return args, envs, constrain_locales
# these are essentially restrictions on what envs/constrain_locales to restrict to for
# the tests. if they're empty lists, it means use all available
args, constrain_versions, constrain_locales = parse_args()
action = None
if args:
action = args[0]
if action in ("test", "travis"):
import test
coverage = None
if test.HAS_UNICODE_LITERAL:
import coverage
env = os.environ.copy()
env["SH_TESTS_RUNNING"] = "1"
if coverage:
test.append_module_path(env, coverage)
# if we're testing locally, run all versions of python on the system
if action == "test":
all_versions = ("2.6", "2.7", "3.1", "3.2", "3.3", "3.4", "3.5", "3.6")
# if we're testing on travis, just use the system's default python,
# since travis will spawn a vm per python version in our .travis.yml
# file
elif action == "travis":
v = sys.version_info
sys_ver = "%d.%d" % (v[0], v[1])
all_versions = (sys_ver,)
all_force_select = [True]
if HAS_POLL:
all_force_select.append(False)
all_locales = ("en_US.UTF-8", "C")
i = 0
for locale in all_locales:
if constrain_locales and locale not in constrain_locales:
continue
for version in all_versions:
if constrain_versions and version not in constrain_versions:
continue
for force_select in all_force_select:
env_copy = env.copy()
exit_code = run_tests(env_copy, locale, args, version,
force_select, SH_TEST_RUN_IDX=i)
if exit_code is None:
print("Couldn't find %s, skipping" % version)
elif exit_code != 0:
print("Failed for %s, %s" % (version, locale))
exit(1)
i += 1
ran_versions = ",".join(all_versions)
print("Tested Python versions: %s" % ran_versions)
else:
env = Environment(globals())
run_repl(env)
# we're being imported from somewhere
else:
self = sys.modules[__name__]
sys.modules[__name__] = SelfWrapper(self)
register_importer()
|
__init__.py
|
# Author: Jacob Tsekrekos
# Date: Jun 1, 2018
# File: DeviceInput.__init__.py
# Description: Init for the DeviceInput module
from pynput import keyboard as _keyboard
from pynput import mouse as _mouse
from .Gamepad import get_gamepad as _get_gamepad
from .Gamepad import check_gamepad as _check_gamepad
from .Gamepad import UnpluggedError as _UnpluggedError
from .IndexCodes import ButtonCode, KeyCode, XCode
from threading import Thread as _Thread
from ..utils.logger import Logger
# Gamepad is a stripped down inputs.py (mouse and keyboard handlers were not working)
__logger = Logger(__name__, Logger.ERROR, "input.log")
# todo fix 2d checking (ACCESS)
class UpdateChecker:
def __init__(self):
self.__vals = {}
self.__updated = set()
def __setitem__(self, key, value):
self.__vals[key] = value
self.__updated.add(key)
def __getitem__(self, item):
if self.__updated.__contains__(item):
self.__updated.remove(item)
return self.__vals.get(item, None)
def __str__(self):
return str(self.__vals)
def keys(self):
return [key for key in self.__vals.keys()]
@property
def update_list(self):
return list(self.__updated)
@property
def has_updated(self):
return len(self.__updated) > 0
keys = UpdateChecker()
mouse = UpdateChecker()
gamepad = UpdateChecker()
def keyboard_handler(callback=None):
"""
:returns an input thread
Call keyboard_handler.start() in order to start listening
"""
if callback is None:
def callback():
pass
# key handler functions
def on_press(key):
keys[KeyCode.code_of(key)] = True
callback()
def on_release(key):
keys[KeyCode.code_of(key)] = False
callback()
def get_input():
with _keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
return _Thread(target=get_input, name="Keyboard-Thread", daemon=True)
def mouse_handler(callback=None):
"""
:returns an input thread
Call mouse_handler.start() in order to start listening
"""
if callback is None:
def callback():
pass
def on_move(x, y):
mouse["pos"] = (x, y)
callback()
def on_click(x, y, button, pressed):
mouse[ButtonCode.code_of(button)] = pressed
callback()
def on_scroll(x, y, dx, dy):
mouse["HScroll"] = dx
mouse["VScroll"] = dy
callback()
# Collect events until released
def get_input():
with _mouse.Listener(on_move=on_move, on_click=on_click, on_scroll=on_scroll) as listener:
listener.join()
return _Thread(target=get_input, name="Mouse-Thread", daemon=True)
def gamepad_handler(callback=None):
"""
:returns an input thread
Call keyboard_handler.start() in order to start listening
*NOTE* IF THERE IS NO CONTROLLER FOUND, the thread will exit
"""
if callback is None:
def callback():
pass
gamepad[XCode.DPAD0] = [0, 0]
gamepad[XCode.DPAD1] = [0, 0]
gamepad[XCode.DPAD2] = [0, 0]
gamepad[XCode.DPAD3] = [0, 0]
# STICKS ARE BUGGED!!
gamepad[XCode.LSTICK] = [0, 0]
gamepad[XCode.RSTICK] = [0, 0]
e = False
if not _check_gamepad():
e = True
__logger.error("No gamepad found, thread exiting.")
def get_input():
if e:
exit(-1)
while True:
events = _get_gamepad()
for event in events:
index = XCode.code_of(event.code)
if event.ev_type == "Sync":
continue
if event.code[-1] == "X":
gamepad[index][0] = event.state
elif event.code[-1] == "Y":
gamepad[index][1] = event.state
else:
gamepad[index] = event.state
callback()
return _Thread(target=get_input, name="Gamepad-Thread", daemon=True)
|
multithreaded.py
|
# multithreaded.py
import threading
import time
from queue import Queue
import requests
def make_request(url):
"""Makes a web request, prints the thread name, URL, and
response code.
"""
resp = requests.get(url)
with print_lock:
print("Thread name: {}".format(threading.current_thread().name))
print("Url: {}".format(url))
print("Response code: {}\n".format(resp.status_code))
def manage_queue():
"""Manages the url_queue and calls the make request function"""
while True:
# Stores the URL and removes it from the queue so no
# other threads will use it.
current_url = url_queue.get()
# Calls the make_request function
make_request(current_url)
# Tells the queue that the processing on the task is complete.
url_queue.task_done()
if __name__ == '__main__':
# Set the number of threads.
number_of_threads = 5
# Needed to safely print in mult-threaded programs.
# https://stackoverflow.com/questions/40356200/python-printing-in-multiple-threads
print_lock = threading.Lock()
# Initializes the queue that all threads will pull from.
url_queue = Queue()
# The list of URLs that will go into the queue.
urls = ["https://duckduckgo.com/"] * 5
# Start the threads.
for i in range(number_of_threads):
# Send the threads to the function that manages the queue.
t = threading.Thread(target=manage_queue)
# Makes the thread a daemon so it exits when the program finishes.
t.daemon = True
t.start()
start = time.time()
# Puts the URLs in the queue
for current_url in urls:
url_queue.put(current_url)
# Wait until all threads have finished before continuing the program.
url_queue.join()
print("Execution time = {0:.5f}".format(time.time() - start))
|
train.py
|
#!/usr/bin/env python
"""Train models."""
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.logging import logger
from onmt.train_single import main as single_main
from onmt.utils.parse import ArgumentParser
def main(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
nb_gpu = len(opt.gpu_ranks)
if opt.world_size > 1:
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, ), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
for p in procs:
p.join()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def run(opt, device_id, error_queue):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
if __name__ == "__main__":
parser = ArgumentParser(description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
main(opt)
|
run-tests.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
from argparse import ArgumentParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
if sys.version < '3':
import Queue
else:
import queue as Queue
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = Manager().dict()
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# TODO: revisit for Scala 2.13
for scala in ["2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise Exception("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python)
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
# Also override the JVM's temp directory by setting driver and executor options.
spark_args = [
"--conf", "spark.driver.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
"--conf", "spark.executor.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark")] + test_name.split(),
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode("utf-8", "replace")
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode("utf-8", "replace"), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python2.7", "python3.6", "pypy"] if which(x)]
if "python2.7" not in python_execs:
LOGGER.warning("Not testing against `python2.7` because it could not be found; falling"
" back to `python` instead")
python_execs.insert(0, "python")
return python_execs
def parse_opts():
parser = ArgumentParser(
prog="run-tests"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"--modules", type=str,
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.debug("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.debug("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.blacklisted_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
main()
|
me212bot_node.py
|
#!/usr/bin/python
# 2.12 Lab 2 me212bot_node: ROS driver running on the pc side to read and send messages to Arduino
# Peter Yu Sept 2016
import rospy
import threading
import serial
import tf.transformations as tfm
from geometry_msgs.msg import Pose, Quaternion , Quaternion
import helper
from me212bot.msg import WheelCmdVel, Odometry
serialComm = serial.Serial('/dev/ttyACM0', 115200, timeout = 5)
## main function (Need to modify)
def main():
rospy.init_node('me212bot', anonymous=True)
odometry_thread = threading.Thread(target = read_odometry_loop)
odometry_thread.start()
## 1. Initialize a subscriber
cmdvel_sub = rospy.Subscriber('/cmdvel', WheelCmdVel, cmdvel_callback)
rospy.spin()
## msg handling function (Need to modify)
def cmdvel_callback(msg):
## 2. Send msg.desiredVW_R and msg.desiredVW_L to Arduino.
serialComm.write('%f,%f\n' % (msg.desiredWV_R, msg.desiredWV_L))
return
# read_odometry_loop() is for reading odometry from Arduino and publish to rostopic. (No need to modify)
def read_odometry_loop():
prevtime = rospy.Time.now()
velcmd_pub = rospy.Publisher("/odometry", Odometry, queue_size = 1)
while not rospy.is_shutdown():
# get a line of string that represent current odometry from serial
serialData = serialComm.readline()
# split the string e.g. "0.1,0.2,0.1" with cammas
splitData = serialData.split(',')
# parse the 3 split strings into 3 floats
try:
x = float(splitData[0])
y = float(splitData[1])
theta = float(splitData[2])
hz = 1.0 / (rospy.Time.now().to_sec() - prevtime.to_sec())
prevtime = rospy.Time.now()
print 'x=', x, ' y=', y, ' theta =', theta, ' hz =', hz
# publish odometry as Pose msg
odom = Odometry()
odom.dthetar = x
odom.dthetal = y
# qtuple = tfm.quaternion_from_euler(0, 0, theta)
# odom.orientation = Quaternion(qtuple[0], qtuple[1], qtuple[2], qtuple[3])
velcmd_pub.publish(odom)
except:
# print out msg if there is an error parsing a serial msg
print 'Cannot parse', splitData
if __name__=='__main__':
main()
|
test_device.py
|
# SPDX-License-Identifier: MIT
import threading
import pytest
import hidabc
class DummyInterface():
def __init__(self):
self._data = []
@property
def name(self): # pragma: no cover
return 'dummy device'
@property
def vid(self): # pragma: no cover
return 0x0000
@property
def pid(self): # pragma: no cover
return 0x0000
def read(self): # pragma: no cover
return self._data
def write(self, data): # pragma: no cover
assert isinstance(data, list)
self._data = data
@pytest.fixture()
def device():
return hidabc.Device(DummyInterface())
def test_lock(device):
def write_ff(device, done):
with device as interface:
interface.write([0xff])
done.set()
done = threading.Event()
task = threading.Thread(target=write_ff, args=[device, done])
with device as interface:
task.start()
interface.write([0x01])
assert interface.read() == [0x01]
done.wait()
task.join()
with device as interface:
assert interface.read() == [0xff]
|
windows.py
|
"""The window manager. Creates a bunch of curses frames with various
functions. The "main" window is reserved for rendering Views that
display data aggregated by the various collectors."""
import time
import curses
import threading
from . import views
MENU_WIDTH = 40
STATUS_WIDTH = 40
def setup_window(window):
window.keypad(1)
window.immedok(True)
class WindowManager(object):
def __init__(self, data):
self.data = data
self.screen = None
self.render_thread = None
self.windows = {}
self.views = {}
self.render_thread = None
def start(self):
self.screen = curses.initscr()
curses.start_color()
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.curs_set(0)
curses.noecho()
curses.cbreak()
setup_window(self.screen)
self._create_windows()
self._start_render_thread()
self._main_loop()
def close(self):
curses.curs_set(1)
self.screen.immedok(False)
self.screen.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
def view_class_for_collector(self, collector_doc):
return getattr(views, "{}View".format(collector_doc['type']))
def change_main_view(self, menu_options):
collector_name = self.views['menu'].menu.get_active_in_group('collectors')[0]
collector_doc = [collector
for collector in self.data.config.get('collectors')
if collector['name'] == collector_name][0]
view_class = self.view_class_for_collector(collector_doc)
view = view_class(self.data, self.windows['main'], collector_name)
self.views['main'] = view
self.views['main'].render()
self.change_to_view_menu(view)
def change_to_view_menu(self, view):
self.views['menu'].menu = view.menu
self.views['menu'].render()
def change_to_main_menu(self):
menu_view = views.MenuView(self, self.data, self.windows['menu'])
menu_view.menu.on_change(self.change_main_view)
self.views['menu'] = menu_view
menu_view.render()
def _create_windows(self):
y, x = self.screen.getmaxyx()
self.windows['title'] = curses.newwin(1, x, 0, 0)
self.windows['mini'] = curses.newwin(1, x, y-1, 0)
self.windows['menu'] = curses.newwin(y - 2, MENU_WIDTH, 1, 0)
self.windows['status'] = curses.newwin(y - 2, STATUS_WIDTH, 1, x - STATUS_WIDTH)
self.windows['main'] = curses.newwin(y - 2, x - (MENU_WIDTH + STATUS_WIDTH), 1, MENU_WIDTH)
for window in self.windows.values():
setup_window(window)
self.views['title'] = views.TitleView(self.data, self.windows['title'])
self.views['mini'] = views.MiniView(self.data, self.windows['mini'])
self.views['status'] = views.StatusView(self.data, self.windows['status'])
initial_collector = self.data.config['collectors'][0]
initial_view_class = self.view_class_for_collector(initial_collector)
self.views['main'] = initial_view_class(self.data, self.windows['main'], initial_collector['name'])
self.change_to_main_menu()
self.change_to_view_menu(self.views['main'])
def _redraw_windows(self):
y, x = self.screen.getmaxyx()
self.screen.clear()
curses.resizeterm(y, x)
self.screen.refresh()
def _periodic_render(self):
while True:
for view in self.views.values():
try:
view.render()
except:
pass
time.sleep(1)
def _start_render_thread(self):
self.render_thread = threading.Thread(target=self._periodic_render)
self.render_thread.daemon = True
self.render_thread.start()
def _main_loop(self):
while True:
char = self.screen.getch()
if char == 10:
char = curses.KEY_ENTER
if char >= 0 and char <= 255:
char = chr(char).lower()
if char == 'q':
self.close()
break
if char == 'm':
self.change_to_main_menu()
continue
for view in self.views.values():
view.process_char(char)
|
test_connection_pool.py
|
import os
import pytest
import re
import redis
import time
from unittest import mock
from threading import Thread
from redis.connection import ssl_available, to_bool
from .conftest import skip_if_server_version_lt, _get_client
from .test_pubsub import wait_for_message
class DummyConnection:
description_format = "DummyConnection<>"
def __init__(self, **kwargs):
self.kwargs = kwargs
self.pid = os.getpid()
def connect(self):
pass
def can_read(self):
return False
class TestConnectionPool:
def get_pool(self, connection_kwargs=None, max_connections=None,
connection_class=redis.Connection):
connection_kwargs = connection_kwargs or {}
pool = redis.ConnectionPool(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
return pool
def test_connection_creation(self):
connection_kwargs = {'foo': 'bar', 'biz': 'baz'}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=DummyConnection)
connection = pool.get_connection('_')
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self, master_host):
connection_kwargs = {'host': master_host}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection('_')
c2 = pool.get_connection('_')
assert c1 != c2
def test_max_connections(self, master_host):
connection_kwargs = {'host': master_host}
pool = self.get_pool(max_connections=2,
connection_kwargs=connection_kwargs)
pool.get_connection('_')
pool.get_connection('_')
with pytest.raises(redis.ConnectionError):
pool.get_connection('_')
def test_reuse_previously_released_connection(self, master_host):
connection_kwargs = {'host': master_host}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection('_')
pool.release(c1)
c2 = pool.get_connection('_')
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
connection_kwargs = {
'host': 'localhost',
'port': 6379,
'db': 1,
'client_name': 'test-client'
}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.Connection)
expected = ('ConnectionPool<Connection<'
'host=localhost,port=6379,db=1,client_name=test-client>>')
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
connection_kwargs = {
'path': '/abc',
'db': 1,
'client_name': 'test-client'
}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.UnixDomainSocketConnection)
expected = ('ConnectionPool<UnixDomainSocketConnection<'
'path=/abc,db=1,client_name=test-client>>')
assert repr(pool) == expected
class TestBlockingConnectionPool:
def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20):
connection_kwargs = connection_kwargs or {}
pool = redis.BlockingConnectionPool(connection_class=DummyConnection,
max_connections=max_connections,
timeout=timeout,
**connection_kwargs)
return pool
def test_connection_creation(self, master_host):
connection_kwargs = {'foo': 'bar', 'biz': 'baz', 'host': master_host}
pool = self.get_pool(connection_kwargs=connection_kwargs)
connection = pool.get_connection('_')
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self, master_host):
connection_kwargs = {'host': master_host}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection('_')
c2 = pool.get_connection('_')
assert c1 != c2
def test_connection_pool_blocks_until_timeout(self, master_host):
"When out of connections, block for timeout seconds, then raise"
connection_kwargs = {'host': master_host}
pool = self.get_pool(max_connections=1, timeout=0.1,
connection_kwargs=connection_kwargs)
pool.get_connection('_')
start = time.time()
with pytest.raises(redis.ConnectionError):
pool.get_connection('_')
# we should have waited at least 0.1 seconds
assert time.time() - start >= 0.1
def test_connection_pool_blocks_until_conn_available(self, master_host):
"""
When out of connections, block until another connection is released
to the pool
"""
connection_kwargs = {'host': master_host}
pool = self.get_pool(max_connections=1, timeout=2,
connection_kwargs=connection_kwargs)
c1 = pool.get_connection('_')
def target():
time.sleep(0.1)
pool.release(c1)
start = time.time()
Thread(target=target).start()
pool.get_connection('_')
assert time.time() - start >= 0.1
def test_reuse_previously_released_connection(self, master_host):
connection_kwargs = {'host': master_host}
pool = self.get_pool(connection_kwargs=connection_kwargs)
c1 = pool.get_connection('_')
pool.release(c1)
c2 = pool.get_connection('_')
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
pool = redis.ConnectionPool(
host='localhost',
port=6379,
client_name='test-client'
)
expected = ('ConnectionPool<Connection<'
'host=localhost,port=6379,db=0,client_name=test-client>>')
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
pool = redis.ConnectionPool(
connection_class=redis.UnixDomainSocketConnection,
path='abc',
client_name='test-client'
)
expected = ('ConnectionPool<UnixDomainSocketConnection<'
'path=abc,db=0,client_name=test-client>>')
assert repr(pool) == expected
class TestConnectionPoolURLParsing:
def test_hostname(self):
pool = redis.ConnectionPool.from_url('redis://my.host')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'my.host',
}
def test_quoted_hostname(self):
pool = redis.ConnectionPool.from_url('redis://my %2F host %2B%3D+')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'my / host +=+',
}
def test_port(self):
pool = redis.ConnectionPool.from_url('redis://localhost:6380')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6380,
}
@skip_if_server_version_lt("6.0.0")
def test_username(self):
pool = redis.ConnectionPool.from_url('redis://myuser:@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'username': 'myuser',
}
@skip_if_server_version_lt("6.0.0")
def test_quoted_username(self):
pool = redis.ConnectionPool.from_url(
'redis://%2Fmyuser%2F%2B name%3D%24+:@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'username': '/myuser/+ name=$+',
}
def test_password(self):
pool = redis.ConnectionPool.from_url('redis://:mypassword@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'password': 'mypassword',
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
'redis://:%2Fmypass%2F%2B word%3D%24+@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'password': '/mypass/+ word=$+',
}
@skip_if_server_version_lt("6.0.0")
def test_username_and_password(self):
pool = redis.ConnectionPool.from_url('redis://myuser:mypass@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'username': 'myuser',
'password': 'mypass',
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url('redis://localhost', db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'db': 1,
}
def test_db_in_path(self):
pool = redis.ConnectionPool.from_url('redis://localhost/2', db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'db': 2,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url('redis://localhost/2?db=3',
db=1)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'db': 3,
}
def test_extra_typed_querystring_options(self):
pool = redis.ConnectionPool.from_url(
'redis://localhost/2?socket_timeout=20&socket_connect_timeout=10'
'&socket_keepalive=&retry_on_timeout=Yes&max_connections=10'
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'db': 2,
'socket_timeout': 20.0,
'socket_connect_timeout': 10.0,
'retry_on_timeout': True,
}
assert pool.max_connections == 10
def test_boolean_parsing(self):
for expected, value in (
(None, None),
(None, ''),
(False, 0), (False, '0'),
(False, 'f'), (False, 'F'), (False, 'False'),
(False, 'n'), (False, 'N'), (False, 'No'),
(True, 1), (True, '1'),
(True, 'y'), (True, 'Y'), (True, 'Yes'),
):
assert expected is to_bool(value)
def test_client_name_in_querystring(self):
pool = redis.ConnectionPool.from_url(
'redis://location?client_name=test-client'
)
assert pool.connection_kwargs['client_name'] == 'test-client'
def test_invalid_extra_typed_querystring_options(self):
with pytest.raises(ValueError):
redis.ConnectionPool.from_url(
'redis://localhost/2?socket_timeout=_&'
'socket_connect_timeout=abc'
)
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'a': '1',
'b': '2'
}
def test_calling_from_subclass_returns_correct_instance(self):
pool = redis.BlockingConnectionPool.from_url('redis://localhost')
assert isinstance(pool, redis.BlockingConnectionPool)
def test_client_creates_connection_pool(self):
r = redis.Redis.from_url('redis://myhost')
assert r.connection_pool.connection_class == redis.Connection
assert r.connection_pool.connection_kwargs == {
'host': 'myhost',
}
def test_invalid_scheme_raises_error(self):
with pytest.raises(ValueError) as cm:
redis.ConnectionPool.from_url('localhost')
assert str(cm.value) == (
'Redis URL must specify one of the following schemes '
'(redis://, rediss://, unix://)'
)
class TestConnectionPoolUnixSocketURLParsing:
def test_defaults(self):
pool = redis.ConnectionPool.from_url('unix:///socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
}
@skip_if_server_version_lt("6.0.0")
def test_username(self):
pool = redis.ConnectionPool.from_url('unix://myuser:@/socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'username': 'myuser',
}
@skip_if_server_version_lt("6.0.0")
def test_quoted_username(self):
pool = redis.ConnectionPool.from_url(
'unix://%2Fmyuser%2F%2B name%3D%24+:@/socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'username': '/myuser/+ name=$+',
}
def test_password(self):
pool = redis.ConnectionPool.from_url('unix://:mypassword@/socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'password': 'mypassword',
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
'unix://:%2Fmypass%2F%2B word%3D%24+@/socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'password': '/mypass/+ word=$+',
}
def test_quoted_path(self):
pool = redis.ConnectionPool.from_url(
'unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/my/path/to/../+_+=$ocket',
'password': 'mypassword',
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url('unix:///socket', db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 1,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url('unix:///socket?db=2', db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 2,
}
def test_client_name_in_querystring(self):
pool = redis.ConnectionPool.from_url(
'redis://location?client_name=test-client'
)
assert pool.connection_kwargs['client_name'] == 'test-client'
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url('unix:///socket?a=1&b=2')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'a': '1',
'b': '2'
}
@pytest.mark.skipif(not ssl_available, reason="SSL not installed")
class TestSSLConnectionURLParsing:
def test_host(self):
pool = redis.ConnectionPool.from_url('rediss://my.host')
assert pool.connection_class == redis.SSLConnection
assert pool.connection_kwargs == {
'host': 'my.host',
}
def test_cert_reqs_options(self):
import ssl
class DummyConnectionPool(redis.ConnectionPool):
def get_connection(self, *args, **kwargs):
return self.make_connection()
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=none')
assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=optional')
assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=required')
assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED
pool = DummyConnectionPool.from_url(
'rediss://?ssl_check_hostname=False')
assert pool.get_connection('_').check_hostname is False
pool = DummyConnectionPool.from_url(
'rediss://?ssl_check_hostname=True')
assert pool.get_connection('_').check_hostname is True
class TestConnection:
def test_on_connect_error(self):
"""
An error in Connection.on_connect should disconnect from the server
see for details: https://github.com/andymccurdy/redis-py/issues/368
"""
# this assumes the Redis server being tested against doesn't have
# 9999 databases ;)
bad_connection = redis.Redis(db=9999)
# an error should be raised on connect
with pytest.raises(redis.RedisError):
bad_connection.info()
pool = bad_connection.connection_pool
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_disconnects_socket(self, r):
"""
If Redis raises a LOADING error, the connection should be
disconnected and a BusyLoadingError raised
"""
with pytest.raises(redis.BusyLoadingError):
r.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
assert not r.connection._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline_immediate_command(self, r):
"""
BusyLoadingErrors should raise from Pipelines that execute a
command immediately, like WATCH does.
"""
pipe = r.pipeline()
with pytest.raises(redis.BusyLoadingError):
pipe.immediate_execute_command('DEBUG', 'ERROR',
'LOADING fake message')
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline(self, r):
"""
BusyLoadingErrors should be raised from a pipeline execution
regardless of the raise_on_error flag.
"""
pipe = r.pipeline()
pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
with pytest.raises(redis.BusyLoadingError):
pipe.execute()
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_read_only_error(self, r):
"READONLY errors get turned in ReadOnlyError exceptions"
with pytest.raises(redis.ReadOnlyError):
r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah')
def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url('redis://localhost')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'Connection',
'host=localhost,port=6379,db=0',
)
def test_connect_from_url_unix(self):
connection = redis.Redis.from_url('unix:///path/to/socket')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'UnixDomainSocketConnection',
'path=/path/to/socket,db=0',
)
def test_connect_no_auth_supplied_when_required(self, r):
"""
AuthenticationError should be raised when the server requires a
password but one isn't supplied.
"""
with pytest.raises(redis.AuthenticationError):
r.execute_command('DEBUG', 'ERROR',
'ERR Client sent AUTH, but no password is set')
def test_connect_invalid_password_supplied(self, r):
"AuthenticationError should be raised when sending the wrong password"
with pytest.raises(redis.AuthenticationError):
r.execute_command('DEBUG', 'ERROR', 'ERR invalid password')
class TestMultiConnectionClient:
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis,
request,
single_connection_client=False)
def test_multi_connection_command(self, r):
assert not r.connection
assert r.set('a', '123')
assert r.get('a') == b'123'
class TestHealthCheck:
interval = 60
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis, request,
health_check_interval=self.interval)
def assert_interval_advanced(self, connection):
diff = connection.next_health_check - time.time()
assert self.interval > diff > (self.interval - 1)
def test_health_check_runs(self, r):
r.connection.next_health_check = time.time() - 1
r.connection.check_health()
self.assert_interval_advanced(r.connection)
def test_arbitrary_command_invokes_health_check(self, r):
# invoke a command to make sure the connection is entirely setup
r.get('foo')
r.connection.next_health_check = time.time()
with mock.patch.object(r.connection, 'send_command',
wraps=r.connection.send_command) as m:
r.get('foo')
m.assert_called_with('PING', check_health=False)
self.assert_interval_advanced(r.connection)
def test_arbitrary_command_advances_next_health_check(self, r):
r.get('foo')
next_health_check = r.connection.next_health_check
r.get('foo')
assert next_health_check < r.connection.next_health_check
def test_health_check_not_invoked_within_interval(self, r):
r.get('foo')
with mock.patch.object(r.connection, 'send_command',
wraps=r.connection.send_command) as m:
r.get('foo')
ping_call_spec = (('PING',), {'check_health': False})
assert ping_call_spec not in m.call_args_list
def test_health_check_in_pipeline(self, r):
with r.pipeline(transaction=False) as pipe:
pipe.connection = pipe.connection_pool.get_connection('_')
pipe.connection.next_health_check = 0
with mock.patch.object(pipe.connection, 'send_command',
wraps=pipe.connection.send_command) as m:
responses = pipe.set('foo', 'bar').get('foo').execute()
m.assert_any_call('PING', check_health=False)
assert responses == [True, b'bar']
def test_health_check_in_transaction(self, r):
with r.pipeline(transaction=True) as pipe:
pipe.connection = pipe.connection_pool.get_connection('_')
pipe.connection.next_health_check = 0
with mock.patch.object(pipe.connection, 'send_command',
wraps=pipe.connection.send_command) as m:
responses = pipe.set('foo', 'bar').get('foo').execute()
m.assert_any_call('PING', check_health=False)
assert responses == [True, b'bar']
def test_health_check_in_watched_pipeline(self, r):
r.set('foo', 'bar')
with r.pipeline(transaction=False) as pipe:
pipe.connection = pipe.connection_pool.get_connection('_')
pipe.connection.next_health_check = 0
with mock.patch.object(pipe.connection, 'send_command',
wraps=pipe.connection.send_command) as m:
pipe.watch('foo')
# the health check should be called when watching
m.assert_called_with('PING', check_health=False)
self.assert_interval_advanced(pipe.connection)
assert pipe.get('foo') == b'bar'
# reset the mock to clear the call list and schedule another
# health check
m.reset_mock()
pipe.connection.next_health_check = 0
pipe.multi()
responses = pipe.set('foo', 'not-bar').get('foo').execute()
assert responses == [True, b'not-bar']
m.assert_any_call('PING', check_health=False)
def test_health_check_in_pubsub_before_subscribe(self, r):
"A health check happens before the first [p]subscribe"
p = r.pubsub()
p.connection = p.connection_pool.get_connection('_')
p.connection.next_health_check = 0
with mock.patch.object(p.connection, 'send_command',
wraps=p.connection.send_command) as m:
assert not p.subscribed
p.subscribe('foo')
# the connection is not yet in pubsub mode, so the normal
# ping/pong within connection.send_command should check
# the health of the connection
m.assert_any_call('PING', check_health=False)
self.assert_interval_advanced(p.connection)
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
def test_health_check_in_pubsub_after_subscribed(self, r):
"""
Pubsub can handle a new subscribe when it's time to check the
connection health
"""
p = r.pubsub()
p.connection = p.connection_pool.get_connection('_')
p.connection.next_health_check = 0
with mock.patch.object(p.connection, 'send_command',
wraps=p.connection.send_command) as m:
p.subscribe('foo')
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
self.assert_interval_advanced(p.connection)
# because we weren't subscribed when sending the subscribe
# message to 'foo', the connection's standard check_health ran
# prior to subscribing.
m.assert_any_call('PING', check_health=False)
p.connection.next_health_check = 0
m.reset_mock()
p.subscribe('bar')
# the second subscribe issues exactly only command (the subscribe)
# and the health check is not invoked
m.assert_called_once_with('SUBSCRIBE', 'bar', check_health=False)
# since no message has been read since the health check was
# reset, it should still be 0
assert p.connection.next_health_check == 0
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
assert wait_for_message(p) is None
# now that the connection is subscribed, the pubsub health
# check should have taken over and include the HEALTH_CHECK_MESSAGE
m.assert_any_call('PING', p.HEALTH_CHECK_MESSAGE,
check_health=False)
self.assert_interval_advanced(p.connection)
def test_health_check_in_pubsub_poll(self, r):
"""
Polling a pubsub connection that's subscribed will regularly
check the connection's health.
"""
p = r.pubsub()
p.connection = p.connection_pool.get_connection('_')
with mock.patch.object(p.connection, 'send_command',
wraps=p.connection.send_command) as m:
p.subscribe('foo')
subscribe_message = wait_for_message(p)
assert subscribe_message['type'] == 'subscribe'
self.assert_interval_advanced(p.connection)
# polling the connection before the health check interval
# doesn't result in another health check
m.reset_mock()
next_health_check = p.connection.next_health_check
assert wait_for_message(p) is None
assert p.connection.next_health_check == next_health_check
m.assert_not_called()
# reset the health check and poll again
# we should not receive a pong message, but the next_health_check
# should be advanced
p.connection.next_health_check = 0
assert wait_for_message(p) is None
m.assert_called_with('PING', p.HEALTH_CHECK_MESSAGE,
check_health=False)
self.assert_interval_advanced(p.connection)
|
main.py
|
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
queue = [6, 33, 4, 43, 15, 47, 18, 42, 35, 40, 34, 20, 9, 29, 19, 22, 3, 5, 38, 7, 41, 39, 46, 17, 24, 28, 26, 45, 16, 14, 50, 48, 36, 27, 32, 8, 10, 49, 2, 12, 23, 1, 37, 31, 44, 21, 30, 11, 13, 25]
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
K80_epoch_time = {}
for item in queue:
K80_epoch_time[str(item)] = 0
speedup_dict = {}
with open('speedup.json', 'r') as fp:
speedup_dict = json.load(fp)
index = 0
K80_cap = 8
V100_cap = 4
K80_used = 0
V100_used = 0
K80_job = {}
for i in range(8):
K80_job[str(i)] = 'idle'
V100_job = {}
for i in range(4):
V100_job[str(i)] = 'idle'
qualified_job = []
pc_job = []
K80_node = 'c2180'
V100_node = 'd1006'
host_node = 'c0179'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000 if node == K80_node else 10001
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, force_demote):
num_demote = len(force_demote)
num_promote = len(promote_list)
V100_vacant = num_demote + V100_free
K80_vacant = num_promote + K80_free
global speedup_dict
if K80_vacant >= num_demote: # if more vacant K80s than demote jobs, always demote
# selectively promote among active V100 jobs and promote list jobs
V100_qual = list(set(list(V100_job.values())) - set(force_demote))
if 'idle' in V100_qual:
V100_qual.remove('idle')
V100_pool = list(set(V100_qual).union(promote_list))
if len(V100_pool) <= 4: # promote all jobs as well
return promote_list, force_demote
else: # promote the top 4 jobs
pool_dict = {}
for job in V100_pool:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:4]
promotion_list = list(set(promote_list).intersection(sorted_pool))
demotion_list = list(set(list(V100_job.values())).difference(sorted_pool))
if 'idle' in demotion_list:
demotion_list.remove('idle') # this includes force demotion
return promotion_list, demotion_list
elif V100_vacant >= num_promote: # if more vacant V100s than promote jobs, always promote
# less vacant K80s than demote jobs, select worst among force demote list
pool_dict = {} # here the pool only includes force demote jobs
for job in force_demote:
if job in speedup_dict:
pool_dict[job] = speedup_dict[job]
sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:K80_vacant]
return promote_list, sorted_pool
else:
raise ValueError('Bug with max speedup promotion, condition not considered')
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
send_signal(node, 'save ' + job)
global ovhd_start
ovhd_start[job] = time.time()
# after sending checkpoint signal, wait for it to finish
while True:
time.sleep(5)
with open('checkpoint.json', 'r') as fp2:
checkpoint_dict = json.load(fp2)
if checkpoint_dict['job'+job] == 1: # checkpoint has finished
print('checkpointed successfully')
checkpoint_dict['job'+job] = 0 # reset it
json_file = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp2:
fp2.write(json_file)
break
# also check if job has already finished
global finish_dict
if finish_dict['job'+job] == 1:
break
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
while True:
if os.path.exists('pid.json'):
os.rename('pid.json', 'pid_lock.json')
break
else:
time.sleep(1)
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
while True:
if os.path.exists('pid.json'):
break
else:
time.sleep(1)
# start job
def start_job(node, gpu, job):
# first wait for pid.json to show up, rename pid.json to pid_lock.json
# then in jobx.py, modify pid_lock.json, rename it to pid.json
# then wait for pid.json to show up
while True:
if os.path.exists('pid.json'):
os.rename('pid.json', 'pid_lock.json')
break
else:
time.sleep(1)
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
while True:
if os.path.exists('pid.json'):
break
else:
time.sleep(1)
############### first clear finish status of all jobs ####################
pid_dict = {}
with open('pid.json', 'r') as fp:
pid_dict = json.load(fp)
for key in pid_dict:
pid_dict[key] = 0
json_file = json.dumps(pid_dict)
with open('pid.json', 'w') as fp:
fp.write(json_file)
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
for key in checkpoint_dict:
checkpoint_dict[key] = 0
json_file = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file)
ckpt_qual_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
for key in epoch_waste_dict:
epoch_waste_dict[key] = 0
json_file = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file)
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
if 'param' in data_str:
pass
elif 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
# move overhead profiling here
global ovhd_start
global overhead
global V100_job
job = job_name.replace('job','')
if ovhd_start[job] != 0:
if ckpt_qual_dict[job_name] == 1:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
finish_dict[job_name] = 1
print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job)
JCT[job] = int(time.time() - job_start[job])
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job)
JCT[job] = int(time.time() - job_start[job])
################ submit new jobs to vacant K80 GPUs ############################
# check if there are vacant K80s
## yes: submit jobs from queue
## no: do nothing
new_pool = []
if K80_used < K80_cap:
K80_free = K80_cap - K80_used
for i in range(K80_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in K80_job.items():
if job == 'idle': # schedule new job here if idle
new_pool.append(job_new)
qualified_job.append(job_new)
K80_job[gpu] = job_new # allocate gpu for it, but don't start yet
index += 1
K80_used += 1
break
# make promotion decisions
V100_free = V100_cap - V100_used
K80_free = K80_cap - K80_used
promote_list = list(set(qualified_job).intersection(list(K80_job.values())).difference(pc_job))
force_demote = list(set(list(V100_job.values())).intersection(pc_job))
if len(promote_list) > 0:
promoted, demoted = max_speedup_promotion(K80_free, V100_free, V100_job, promote_list, force_demote)
if len(promoted) > 0:
print('promoted jobs: ', promoted)
if len(demoted) > 0:
print('demoted jobs: ', demoted)
# stop all promoted jobs on K80
for gpu, job in K80_job.items():
if job in promoted:
if job not in new_pool: # don't do checkpointing for new jobs
save_job(K80_node, job)
K80_job[gpu] = 'idle'
K80_used -= 1
# stop all demoted jobs on V100
for gpu, job in V100_job.items():
if job in demoted:
save_job(V100_node, job)
V100_job[gpu] = 'idle'
V100_used -= 1
# resume promoted jobs on V100, make sure the gpu is idle
for job_new in promoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in V100_job.items():
if job == 'idle': # if gpu idle, schedule new job here
if job_new in new_pool:
start_job(V100_node, gpu, job_new)
job_start[job_new] = time.time()
new_pool.remove(job_new)
else:
resume_job(V100_node, gpu, job_new)
num_mig[job_new] += 1
V100_job[gpu] = job_new
promoted.remove(job_new)
V100_used += 1
break
else: # job has already finished before checkpointing
JCT[job_new] = int(time.time() - job_start[job_new])
promoted.remove(job_new)
# start remaining new jobs on K80, make sure the gpu equals its allocated one
for job_new in new_pool[:]:
for gpu, job in K80_job.items():
if job == job_new: # if gpu idle, schedule new job here
start_job(K80_node, gpu, job_new)
job_start[job_new] = time.time()
new_pool.remove(job_new)
break
# resume demoted jobs on K80, make sure the gpu is idle
for job_new in demoted[:]:
if finish_dict['job'+job_new] != 1:
for gpu, job in K80_job.items():
if job == 'idle': # if gpu idle, schedule new job here
resume_job(K80_node, gpu, job_new)
num_mig[job_new] += 1
K80_job[gpu] = job_new
demoted.remove(job_new)
K80_used += 1
break
else: # job has already finished before checkpointing
JCT[job_new] = int(time.time() - job_start[job_new])
demoted.remove(job_new)
# perform a check, make sure all promoted/demoted jobs are scheduled
if len(promoted) > 0 or len(demoted) > 0 or len(new_pool) > 0:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
V100_idle_num = sum(value == 'idle' for value in V100_job.values())
if K80_idle_num == K80_cap and V100_idle_num == V100_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
# after everything is finished
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
print('finished all runs')
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
|
run.py
|
#!/usr/bin/env python3
import logging
import os
from os import path as osp
import sys
import time
from multiprocessing import Process, Queue
import cloudpickle
import easy_tf_log
from a2c import logger
from a2c.a2c.a2c import learn
from a2c.a2c.policies import CnnPolicy, MlpPolicy
from a2c.common import set_global_seeds
from a2c.common.vec_env.subproc_vec_env import SubprocVecEnv
from params import parse_args, PREFS_VAL_FRACTION
from pref_db import PrefDB, PrefBuffer
from pref_interface import PrefInterface
from reward_predictor import RewardPredictorEnsemble
from reward_predictor_core_network import net_cnn, net_moving_dot_features
from utils import VideoRenderer, get_port_range, make_env
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # filter out INFO messages
def main():
general_params, a2c_params, \
pref_interface_params, rew_pred_training_params = parse_args()
if general_params['debug']:
logging.getLogger().setLevel(logging.DEBUG)
run(general_params,
a2c_params,
pref_interface_params,
rew_pred_training_params)
def run(general_params,
a2c_params,
pref_interface_params,
rew_pred_training_params):
seg_pipe = Queue(maxsize=1)
pref_pipe = Queue(maxsize=1)
start_policy_training_flag = Queue(maxsize=1)
if general_params['render_episodes']:
episode_vid_queue, episode_renderer = start_episode_renderer()
else:
episode_vid_queue = episode_renderer = None
if a2c_params['env_id'] in ['MovingDot-v0', 'MovingDotNoFrameskip-v0']:
reward_predictor_network = net_moving_dot_features
elif a2c_params['env_id'] in ['PongNoFrameskip-v4', 'EnduroNoFrameskip-v4']:
reward_predictor_network = net_cnn
else:
raise Exception("Unsure about reward predictor network for {}".format(
a2c_params['env_id']))
def make_reward_predictor(name, cluster_dict):
return RewardPredictorEnsemble(
cluster_job_name=name,
cluster_dict=cluster_dict,
log_dir=general_params['log_dir'],
batchnorm=rew_pred_training_params['batchnorm'],
dropout=rew_pred_training_params['dropout'],
lr=rew_pred_training_params['lr'],
core_network=reward_predictor_network)
save_make_reward_predictor(general_params['log_dir'],
make_reward_predictor)
if general_params['mode'] == 'gather_initial_prefs':
env, a2c_proc = start_policy_training(
cluster_dict=None,
make_reward_predictor=None,
gen_segments=True,
start_policy_training_pipe=start_policy_training_flag,
seg_pipe=seg_pipe,
episode_vid_queue=episode_vid_queue,
log_dir=general_params['log_dir'],
a2c_params=a2c_params)
pi, pi_proc = start_pref_interface(
seg_pipe=seg_pipe,
pref_pipe=pref_pipe,
log_dir=general_params['log_dir'],
**pref_interface_params)
n_train = general_params['max_prefs'] * (1 - PREFS_VAL_FRACTION)
n_val = general_params['max_prefs'] * PREFS_VAL_FRACTION
pref_db_train = PrefDB(maxlen=n_train)
pref_db_val = PrefDB(maxlen=n_val)
pref_buffer = PrefBuffer(db_train=pref_db_train, db_val=pref_db_val)
pref_buffer.start_recv_thread(pref_pipe)
pref_buffer.wait_until_len(general_params['n_initial_prefs'])
pref_db_train, pref_db_val = pref_buffer.get_dbs()
save_prefs(general_params['log_dir'], pref_db_train, pref_db_val)
pi_proc.terminate()
pi.stop_renderer()
a2c_proc.terminate()
pref_buffer.stop_recv_thread()
env.close()
elif general_params['mode'] == 'pretrain_reward_predictor':
cluster_dict = create_cluster_dict(['ps', 'train'])
ps_proc = start_parameter_server(cluster_dict, make_reward_predictor)
rpt_proc = start_reward_predictor_training(
cluster_dict=cluster_dict,
make_reward_predictor=make_reward_predictor,
just_pretrain=True,
pref_pipe=pref_pipe,
start_policy_training_pipe=start_policy_training_flag,
max_prefs=general_params['max_prefs'],
prefs_dir=general_params['prefs_dir'],
load_ckpt_dir=None,
n_initial_prefs=general_params['n_initial_prefs'],
n_initial_epochs=rew_pred_training_params['n_initial_epochs'],
val_interval=rew_pred_training_params['val_interval'],
ckpt_interval=rew_pred_training_params['ckpt_interval'],
log_dir=general_params['log_dir'])
rpt_proc.join()
ps_proc.terminate()
elif general_params['mode'] == 'train_policy_with_original_rewards':
env, a2c_proc = start_policy_training(
cluster_dict=None,
make_reward_predictor=None,
gen_segments=False,
start_policy_training_pipe=start_policy_training_flag,
seg_pipe=seg_pipe,
episode_vid_queue=episode_vid_queue,
log_dir=general_params['log_dir'],
a2c_params=a2c_params)
start_policy_training_flag.put(True)
a2c_proc.join()
env.close()
elif general_params['mode'] == 'train_policy_with_preferences':
cluster_dict = create_cluster_dict(['ps', 'a2c', 'train'])
ps_proc = start_parameter_server(cluster_dict, make_reward_predictor)
env, a2c_proc = start_policy_training(
cluster_dict=cluster_dict,
make_reward_predictor=make_reward_predictor,
gen_segments=True,
start_policy_training_pipe=start_policy_training_flag,
seg_pipe=seg_pipe,
episode_vid_queue=episode_vid_queue,
log_dir=general_params['log_dir'],
a2c_params=a2c_params)
pi, pi_proc = start_pref_interface(
seg_pipe=seg_pipe,
pref_pipe=pref_pipe,
log_dir=general_params['log_dir'],
**pref_interface_params)
rpt_proc = start_reward_predictor_training(
cluster_dict=cluster_dict,
make_reward_predictor=make_reward_predictor,
just_pretrain=False,
pref_pipe=pref_pipe,
start_policy_training_pipe=start_policy_training_flag,
max_prefs=general_params['max_prefs'],
prefs_dir=general_params['prefs_dir'],
load_ckpt_dir=rew_pred_training_params['load_ckpt_dir'],
n_initial_prefs=general_params['n_initial_prefs'],
n_initial_epochs=rew_pred_training_params['n_initial_epochs'],
val_interval=rew_pred_training_params['val_interval'],
ckpt_interval=rew_pred_training_params['ckpt_interval'],
log_dir=general_params['log_dir'])
# We wait for A2C to complete the specified number of policy training
# steps
a2c_proc.join()
rpt_proc.terminate()
pi_proc.terminate()
pi.stop_renderer()
ps_proc.terminate()
env.close()
else:
raise Exception("Unknown mode: {}".format(general_params['mode']))
if episode_renderer:
episode_renderer.stop()
def save_prefs(log_dir, pref_db_train, pref_db_val):
train_path = osp.join(log_dir, 'train.pkl.gz')
pref_db_train.save(train_path)
print("Saved training preferences to '{}'".format(train_path))
val_path = osp.join(log_dir, 'val.pkl.gz')
pref_db_val.save(val_path)
print("Saved validation preferences to '{}'".format(val_path))
def save_make_reward_predictor(log_dir, make_reward_predictor):
save_dir = osp.join(log_dir, 'reward_predictor_checkpoints')
os.makedirs(save_dir, exist_ok=True)
with open(osp.join(save_dir, 'make_reward_predictor.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_reward_predictor))
def create_cluster_dict(jobs):
n_ports = len(jobs) + 1
ports = get_port_range(start_port=2200,
n_ports=n_ports,
random_stagger=True)
cluster_dict = {}
for part, port in zip(jobs, ports):
cluster_dict[part] = ['localhost:{}'.format(port)]
return cluster_dict
def configure_a2c_logger(log_dir):
a2c_dir = osp.join(log_dir, 'a2c')
os.makedirs(a2c_dir)
tb = logger.TensorBoardOutputFormat(a2c_dir)
logger.Logger.CURRENT = logger.Logger(dir=a2c_dir, output_formats=[tb])
def make_envs(env_id, n_envs, seed):
def wrap_make_env(env_id, rank):
def _thunk():
return make_env(env_id, seed + rank)
return _thunk
set_global_seeds(seed)
env = SubprocVecEnv(env_id, [wrap_make_env(env_id, i)
for i in range(n_envs)])
return env
def start_parameter_server(cluster_dict, make_reward_predictor):
def f():
make_reward_predictor('ps', cluster_dict)
while True:
time.sleep(1.0)
proc = Process(target=f, daemon=True)
proc.start()
return proc
def start_policy_training(cluster_dict, make_reward_predictor, gen_segments,
start_policy_training_pipe, seg_pipe,
episode_vid_queue, log_dir, a2c_params):
env_id = a2c_params['env_id']
if env_id in ['MovingDotNoFrameskip-v0', 'MovingDot-v0']:
policy_fn = MlpPolicy
elif env_id in ['PongNoFrameskip-v4', 'EnduroNoFrameskip-v4']:
policy_fn = CnnPolicy
else:
msg = "Unsure about policy network for {}".format(a2c_params['env_id'])
raise Exception(msg)
configure_a2c_logger(log_dir)
# Done here because daemonic processes can't have children
env = make_envs(a2c_params['env_id'],
a2c_params['n_envs'],
a2c_params['seed'])
del a2c_params['env_id'], a2c_params['n_envs']
ckpt_dir = osp.join(log_dir, 'policy_checkpoints')
os.makedirs(ckpt_dir)
def f():
if make_reward_predictor:
reward_predictor = make_reward_predictor('a2c', cluster_dict)
else:
reward_predictor = None
misc_logs_dir = osp.join(log_dir, 'a2c_misc')
easy_tf_log.set_dir(misc_logs_dir)
learn(
policy=policy_fn,
env=env,
seg_pipe=seg_pipe,
start_policy_training_pipe=start_policy_training_pipe,
episode_vid_queue=episode_vid_queue,
reward_predictor=reward_predictor,
ckpt_save_dir=ckpt_dir,
gen_segments=gen_segments,
**a2c_params)
proc = Process(target=f, daemon=True)
proc.start()
return env, proc
def start_pref_interface(seg_pipe, pref_pipe, max_segs, synthetic_prefs,
log_dir):
def f():
# The preference interface needs to get input from stdin. stdin is
# automatically closed at the beginning of child processes in Python,
# so this is a bit of a hack, but it seems to be fine.
sys.stdin = os.fdopen(0)
pi.run(seg_pipe=seg_pipe, pref_pipe=pref_pipe)
# Needs to be done in the main process because does GUI setup work
prefs_log_dir = osp.join(log_dir, 'pref_interface')
pi = PrefInterface(synthetic_prefs=synthetic_prefs,
max_segs=max_segs,
log_dir=prefs_log_dir)
proc = Process(target=f, daemon=True)
proc.start()
return pi, proc
def start_reward_predictor_training(cluster_dict,
make_reward_predictor,
just_pretrain,
pref_pipe,
start_policy_training_pipe,
max_prefs,
n_initial_prefs,
n_initial_epochs,
prefs_dir,
load_ckpt_dir,
val_interval,
ckpt_interval,
log_dir):
def f():
rew_pred = make_reward_predictor('train', cluster_dict)
rew_pred.init_network(load_ckpt_dir)
if prefs_dir is not None:
train_path = osp.join(prefs_dir, 'train.pkl.gz')
pref_db_train = PrefDB.load(train_path)
print("Loaded training preferences from '{}'".format(train_path))
n_prefs, n_segs = len(pref_db_train), len(pref_db_train.segments)
print("({} preferences, {} segments)".format(n_prefs, n_segs))
val_path = osp.join(prefs_dir, 'val.pkl.gz')
pref_db_val = PrefDB.load(val_path)
print("Loaded validation preferences from '{}'".format(val_path))
n_prefs, n_segs = len(pref_db_val), len(pref_db_val.segments)
print("({} preferences, {} segments)".format(n_prefs, n_segs))
else:
n_train = max_prefs * (1 - PREFS_VAL_FRACTION)
n_val = max_prefs * PREFS_VAL_FRACTION
pref_db_train = PrefDB(maxlen=n_train)
pref_db_val = PrefDB(maxlen=n_val)
pref_buffer = PrefBuffer(db_train=pref_db_train,
db_val=pref_db_val)
pref_buffer.start_recv_thread(pref_pipe)
if prefs_dir is None:
pref_buffer.wait_until_len(n_initial_prefs)
save_prefs(log_dir, pref_db_train, pref_db_val)
if not load_ckpt_dir:
print("Pretraining reward predictor for {} epochs".format(
n_initial_epochs))
pref_db_train, pref_db_val = pref_buffer.get_dbs()
for i in range(n_initial_epochs):
# Note that we deliberately don't update the preferences
# databases during pretraining to keep the number of
# fairly preferences small so that pretraining doesn't take too
# long.
print("Reward predictor training epoch {}".format(i))
rew_pred.train(pref_db_train, pref_db_val, val_interval)
if i and i % ckpt_interval == 0:
rew_pred.save()
print("Reward predictor pretraining done")
rew_pred.save()
if just_pretrain:
return
start_policy_training_pipe.put(True)
i = 0
while True:
pref_db_train, pref_db_val = pref_buffer.get_dbs()
save_prefs(log_dir, pref_db_train, pref_db_val)
rew_pred.train(pref_db_train, pref_db_val, val_interval)
if i and i % ckpt_interval == 0:
rew_pred.save()
proc = Process(target=f, daemon=True)
proc.start()
return proc
def start_episode_renderer():
episode_vid_queue = Queue()
renderer = VideoRenderer(
episode_vid_queue,
playback_speed=2,
zoom=2,
mode=VideoRenderer.play_through_mode)
return episode_vid_queue, renderer
if __name__ == '__main__':
main()
|
client.py
|
import py_env
import os
from typing import *
import queue
import socket
import threading
import sys
import argparse
import selectors
from py_env.utils import log
class Client:
def __init__(self, host_addr : Tuple[str, int], code_config : 'CodeConfig'):
self.sock = socket.socket()
self.sock.connect(host_addr)
self.sock.setblocking(False)
self.stdin_queue = queue.Queue()
self.stdout_queue = queue.Queue()
self.stderr_queue = queue.Queue()
self.stdin_proxy = py_env.proxy_io.IOProxy(input_queue=None, output_queue=self.stdin_queue)
self.stdout_proxy = py_env.proxy_io.IOProxy(input_queue=self.stdout_queue, output_queue=None)
self.stderr_proxy = py_env.proxy_io.IOProxy(input_queue=self.stderr_queue, output_queue=None)
self.command_handlers = {
py_env.protocol.TaskDone: self.for_task_done
}
self.socket_splitter = py_env.proxy_io.SocketSplitter(protocol=py_env.protocol.JsonProtocol(),
sock=self.sock,
sock_write_source=self.stdin_queue,
sock_stdin_dest=None,
sock_stdout_dest=self.stdout_queue,
sock_stderr_dest=self.stderr_queue,
command_handlers=self.command_handlers,
on_broken_pipe=self.done)
self.running = True
self.code_config = code_config
def stdin_collector(self):
selector = selectors.DefaultSelector()
selector.register(sys.stdin.fileno(), selectors.EVENT_READ)
while self.running:
r = selector.select(timeout=0.01)
if len(r) > 0:
_in = sys.stdin.readline()
self.stdin_proxy.write("stdin", _in)
def stdout_collector(self):
try:
while not self.stdout_proxy.empty():
print(self.stdout_proxy.read(), end='')
except EOFError:
pass
def stderr_collector(self):
try:
while not self.stderr_proxy.empty():
print(self.stderr_proxy.read(), end='', file=sys.stderr)
except EOFError:
pass
def load_code(self, cfg : 'CodeConfig'):
if cfg.code == None:
obj = py_env.protocol.LoadCodeByPath(
path=cfg.code_path,
pwd=cfg.pwd,
environ=cfg.environ,
argv=cfg.argv
)
else:
obj = py_env.protocol.LoadCode(
code=cfg.code,
pwd=cfg.pwd,
environ=cfg.environ,
argv=cfg.argv
)
self.stdin_queue.put(obj)
def run(self):
threading.Thread(target=self.socket_splitter.run).start()
threading.Thread(target=self.stdin_collector).start()
threading.Thread(target=self.stdout_collector).start()
threading.Thread(target=self.stderr_collector).start()
self.load_code(self.code_config)
def done(self):
self.socket_splitter.done = True
self.running = False
def for_task_done(self, obj : py_env.protocol.TaskDone):
self.done()
class CodeConfig:
def __init__(self, code_path, pwd=None, environ=None, argv=None, code=None):
self.code_path = code_path
self.pwd = os.getcwd() if pwd is None else pwd
self.environ = dict(os.environ)
if environ:
self.environ.update(environ)
self.argv = [code_path] if argv is None else argv
self.code = code
def main(argv):
parser = argparse.ArgumentParser(add_help=True)
path_arg = parser.add_argument_group('required named arguments')
path_arg.add_argument('-f', action='store', default=None, help='The path to your code. ')
optionals = parser.add_argument_group('required named arguments')
optionals.add_argument('-ip', action='store', dest='ip', default='127.0.0.1', help='IP address of the pyenv host. ')
optionals.add_argument('-port', action='store', default='8964', help='Port of the pyenv host. ', type=int)
optionals.add_argument('-wd', action='store', default=os.getcwd(), help='The user working directory. '
'The host will switch to the directory to find `f`, '
'and execute your code. ')
optionals.add_argument('-env', action='store', default=None, help='Extra environment variables for your script. ')
optionals.add_argument('-c', action='store', default=None, help='Python script, executed with pyenv imported. '
'This will override `f` argument. ')
optionals.add_argument('rest', nargs=argparse.REMAINDER)
argv = parser.parse_args(argv[1:])
client = Client((argv.ip, argv.port),
CodeConfig(argv.f, pwd=argv.wd, environ=argv.env, argv=[argv.f] + argv.rest, code=argv.c))
client.run()
if __name__ == '__main__':
main(sys.argv)
|
conftest.py
|
import faulthandler
import io
import logging.handlers
import multiprocessing as mp
import os
import signal
import sys
import threading
from collections import namedtuple
from os import getenv
from pathlib import Path
from random import randint
from zipfile import ZipFile
import pytest
TEST_DATA = "data"
TEST_PYBIO_ZIPFOLDER = "unet2d"
TEST_PYBIO_DUMMY = "dummy"
NNModel = namedtuple("NNModel", ["model", "state"])
@pytest.fixture
def data_path():
conf_path = Path(__file__).parent
return conf_path / TEST_DATA
def read_bytes(filename):
with open(filename, "rb") as file:
return file.read()
@pytest.fixture
def srv_port():
return getenv("TEST_TIKTORCH_PORT", randint(5500, 8000))
@pytest.fixture
def pub_port():
return getenv("TEST_TIKTORCH_PUB_PORT", randint(8000, 9999))
@pytest.fixture(scope="session", autouse=True)
def register_faulthandler():
if not sys.platform.startswith("win"):
faulthandler.register(signal.SIGUSR1, file=sys.stderr, all_threads=True, chain=False)
class QueueListener(logging.handlers.QueueListener):
def start(self):
# Redefine to provide meaningful thread name
self._thread = t = threading.Thread(target=self._monitor, name="QueueListener")
t.daemon = True
t.start()
@pytest.fixture(scope="module")
def log_queue():
q = mp.Queue()
logger = logging.getLogger()
listener = QueueListener(q, *logger.handlers)
listener.start()
yield q
listener.stop()
@pytest.fixture(scope="session")
def assert_threads_cleanup():
yield
running_threads = [str(t) for t in threading.enumerate() if t != threading.current_thread() and not t.daemon]
if len(running_threads):
pytest.fail("Threads still running:\n\t%s" % "\n\t".join(running_threads))
@pytest.fixture
def pybio_model_bytes(data_path):
zip_folder = data_path / TEST_PYBIO_ZIPFOLDER
data = io.BytesIO()
with ZipFile(data, mode="w") as zip_model:
for f_path in zip_folder.iterdir():
if str(f_path.name).startswith("__"):
continue
with f_path.open(mode="rb") as f:
zip_model.writestr(f_path.name, f.read())
return data
@pytest.fixture
def pybio_model_zipfile(pybio_model_bytes):
with ZipFile(pybio_model_bytes, mode="r") as zf:
yield zf
@pytest.fixture
def pybio_dummy_model_bytes(data_path):
pybio_net_dir = Path(data_path) / TEST_PYBIO_DUMMY
data = io.BytesIO()
with ZipFile(data, mode="w") as zip_model:
for f_path in pybio_net_dir.iterdir():
if str(f_path.name).startswith("__"):
continue
with f_path.open(mode="rb") as f:
zip_model.writestr(f_path.name, f.read())
return data
@pytest.fixture
def cache_path(tmp_path):
return Path(getenv("PYBIO_CACHE_PATH", tmp_path))
|
test_general.py
|
"""Collection of tests for unified general functions."""
# global
import time
import einops
import jax.numpy as jnp
import pytest
from hypothesis import given, strategies as st
import numpy as np
from numbers import Number
from collections.abc import Sequence
import torch.multiprocessing as multiprocessing
# local
import threading
import ivy
import ivy.functional.backends.jax
import ivy.functional.backends.tensorflow
import ivy.functional.backends.torch
import ivy.functional.backends.mxnet
import ivy_tests.test_ivy.helpers as helpers
import ivy.functional.backends.numpy as ivy_np
# Helpers #
# --------#
def _get_shape_of_list(lst, shape=()):
if not lst:
return []
if not isinstance(lst, Sequence):
return shape
if isinstance(lst[0], Sequence):
length = len(lst[0])
if not all(len(item) == length for item in lst):
msg = "not all lists have the same length"
raise ValueError(msg)
shape += (len(lst),)
shape = _get_shape_of_list(lst[0], shape)
return shape
# Tests #
# ------#
# set_framework
@given(fw_str=st.sampled_from(["numpy", "jax", "torch", "mxnet"]))
def test_set_framework(fw_str, device, call):
ivy.set_backend(fw_str)
ivy.unset_backend()
# use_framework
def test_use_within_use_framework(device, call):
with ivy.functional.backends.numpy.use:
pass
with ivy.functional.backends.jax.use:
pass
with ivy.functional.backends.tensorflow.use:
pass
with ivy.functional.backends.torch.use:
pass
with ivy.functional.backends.mxnet.use:
pass
@given(allow_duplicates=st.booleans())
def test_match_kwargs(allow_duplicates):
def func_a(a, b, c=2):
pass
def func_b(a, d, e=5):
return None
class ClassA:
def __init__(self, c, f, g=3):
pass
kwargs = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6}
kwfa, kwfb, kwca = ivy.match_kwargs(
kwargs, func_a, func_b, ClassA, allow_duplicates=allow_duplicates
)
if allow_duplicates:
assert kwfa == {"a": 0, "b": 1, "c": 2}
assert kwfb == {"a": 0, "d": 3, "e": 4}
assert kwca == {"c": 2, "f": 5, "g": 6}
else:
assert kwfa == {"a": 0, "b": 1, "c": 2}
assert kwfb == {"d": 3, "e": 4}
assert kwca == {"f": 5, "g": 6}
def test_get_referrers_recursive(device, call):
class SomeClass:
def __init__(self):
self.x = [1, 2]
self.y = [self.x]
some_obj = SomeClass()
refs = ivy.get_referrers_recursive(some_obj.x)
ref_keys = refs.keys()
assert len(ref_keys) == 3
assert "repr" in ref_keys
assert refs["repr"] == "[1,2]"
y_id = str(id(some_obj.y))
y_refs = refs[y_id]
assert y_refs["repr"] == "[[1,2]]"
some_obj_dict_id = str(id(some_obj.__dict__))
assert y_refs[some_obj_dict_id] == "tracked"
dict_refs = refs[some_obj_dict_id]
assert dict_refs["repr"] == "{'x':[1,2],'y':[[1,2]]}"
some_obj_id = str(id(some_obj))
some_obj_refs = dict_refs[some_obj_id]
assert some_obj_refs["repr"] == str(some_obj).replace(" ", "")
assert len(some_obj_refs) == 1
# copy array
@given(dtype_and_x=helpers.dtype_and_values(ivy_np.valid_dtypes))
def test_copy_array(dtype_and_x, device, call, fw):
dtype, x = dtype_and_x
if fw == "torch" and dtype in ["uint16", "uint32", "uint64"]:
return
if call in [helpers.mx_call] and dtype == "int16":
# mxnet does not support int16
return
# smoke test
x = ivy.array(x, dtype=dtype, device=device)
ret = ivy.copy_array(x)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
helpers.assert_all_close(ivy.to_numpy(ret), ivy.to_numpy(x))
assert id(x) != id(ret)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
# array_equal
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes, n_arrays=2))
def test_array_equal(x0_n_x1_n_res, device, call, fw):
dtype0, x0 = x0_n_x1_n_res[0][0], x0_n_x1_n_res[1][0]
dtype1, x1 = x0_n_x1_n_res[0][1], x0_n_x1_n_res[1][1]
if fw == "torch" and (
dtype0 in ["uint16", "uint32", "uint64"]
or dtype1 in ["uint16", "uint32", "uint64"]
):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and (
dtype0 in ["int16", "bool"] or dtype1 in ["int16", "bool"]
):
# mxnet does not support int16, and does not support
# bool for broadcast_equal method used
return
# smoke test
x0 = ivy.array(x0, dtype=dtype0, device=device)
x1 = ivy.array(x1, dtype=dtype1, device=device)
res = ivy.array_equal(x0, x1)
# type test
assert ivy.is_ivy_array(x0)
assert ivy.is_ivy_array(x1)
assert isinstance(res, bool) or ivy.is_ivy_array(res)
# value test
assert res == np.array_equal(np.array(x0, dtype=dtype0), np.array(x1, dtype=dtype1))
# arrays_equal
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes, n_arrays=3))
def test_arrays_equal(x0_n_x1_n_res, device, call, fw):
dtype0, x0 = x0_n_x1_n_res[0][0], x0_n_x1_n_res[1][0]
dtype1, x1 = x0_n_x1_n_res[0][1], x0_n_x1_n_res[1][1]
dtype2, x2 = x0_n_x1_n_res[0][2], x0_n_x1_n_res[1][2]
if fw == "torch" and (
dtype0 in ["uint16", "uint32", "uint64"]
or dtype1 in ["uint16", "uint32", "uint64"]
or dtype2 in ["uint16", "uint32", "uint64"]
):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and (
dtype0 in ["int16", "bool"] or dtype1 in ["int16", "bool"]
):
# mxnet does not support int16, and does not support bool
# for broadcast_equal method used
return
# smoke test
x0 = ivy.array(x0, dtype=dtype0, device=device)
x1 = ivy.array(x1, dtype=dtype1, device=device)
x2 = ivy.array(x2, dtype=dtype2, device=device)
res = ivy.arrays_equal([x0, x1, x2])
# type test
assert ivy.is_ivy_array(x0)
assert ivy.is_ivy_array(x1)
assert ivy.is_ivy_array(x2)
assert isinstance(res, bool) or ivy.is_ivy_array(res)
# value test
true_res = (
np.array_equal(ivy.to_numpy(x0), ivy.to_numpy(x1))
and np.array_equal(ivy.to_numpy(x0), ivy.to_numpy(x2))
and np.array_equal(ivy.to_numpy(x1), ivy.to_numpy(x2))
)
assert res == true_res
# to_numpy
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes))
def test_to_numpy(x0_n_x1_n_res, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if fw == "torch" and (dtype in ["uint16", "uint32", "uint64"]):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and dtype == "int16":
# mxnet does not support int16
return
if call in [helpers.tf_graph_call]:
# to_numpy() requires eager execution
return
# smoke test
ret = ivy.to_numpy(ivy.array(object_in, dtype=dtype, device=device))
# type test
assert isinstance(ret, np.ndarray)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
helpers.assert_all_close(ret, np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
# to_scalar
@given(
object_in=st.sampled_from([[0.0], [[[1]]], [True], [[1.0]]]),
dtype=st.sampled_from(ivy_np.valid_dtypes),
)
def test_to_scalar(object_in, dtype, device, call, fw):
if fw == "torch" and (dtype in ["uint16", "uint32", "uint64"]):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and dtype == "int16":
# mxnet does not support int16
return
if call in [helpers.tf_graph_call]:
# to_scalar() requires eager execution
return
# smoke test
ret = ivy.to_scalar(ivy.array(object_in, dtype=dtype, device=device))
true_val = ivy.to_numpy(ivy.array(object_in, dtype=dtype)).item()
# type test
assert isinstance(ret, type(true_val))
# value test
assert ivy.to_scalar(ivy.array(object_in, dtype=dtype, device=device)) == true_val
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support scalar conversion
return
# to_list
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes))
def test_to_list(x0_n_x1_n_res, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
return
# smoke test
arr = ivy.array(object_in, dtype=dtype, device=device)
ret = ivy.to_list(arr)
# type test (result won't be a list if input is 0 dimensional
if arr.ndim != 0:
assert isinstance(ret, list)
# cardinality test
assert _get_shape_of_list(ret) == _get_shape_of_list(object_in)
# value test
assert np.allclose(
np.nan_to_num(
np.asarray(ivy.to_list(ivy.array(object_in, dtype=dtype, device=device))),
posinf=np.inf,
neginf=-np.inf,
),
np.nan_to_num(np.array(object_in).astype(dtype), posinf=np.inf, neginf=-np.inf),
)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support list conversion
return
# shape
@given(
x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes),
as_tensor=st.booleans(),
tensor_fn=st.sampled_from([ivy.array, helpers.var_fn]),
)
def test_shape(x0_n_x1_n_res, as_tensor, tensor_fn, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if fw == "torch" and (
dtype in ["uint16", "uint32", "uint64"]
or (dtype not in ivy_np.valid_float_dtypes and tensor_fn == helpers.var_fn)
):
# torch does not support those dtypes
return
ret = ivy.shape(tensor_fn(object_in, dtype=dtype, device=device), as_tensor)
# type test
if as_tensor:
assert ivy.is_ivy_array(ret)
else:
assert isinstance(ret, tuple)
ret = ivy.array(ret)
# cardinality test
assert ret.shape[0] == len(np.asarray(object_in).shape)
# value test
assert np.array_equal(
ivy.to_numpy(ret), np.asarray(np.asarray(object_in).shape, np.int32)
)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# get_num_dims
@given(
x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes),
as_tensor=st.booleans(),
tensor_fn=st.sampled_from([ivy.array, helpers.var_fn]),
)
def test_get_num_dims(x0_n_x1_n_res, as_tensor, tensor_fn, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if fw == "torch" and (
dtype in ["uint16", "uint32", "uint64"]
or (dtype not in ivy_np.valid_float_dtypes and tensor_fn == helpers.var_fn)
):
# torch does not support those dtypes
return
ret = ivy.get_num_dims(tensor_fn(object_in, dtype=dtype, device=device), as_tensor)
# type test
if as_tensor:
assert ivy.is_ivy_array(ret)
else:
assert isinstance(ret, int)
ret = ivy.array(ret)
# cardinality test
assert list(ret.shape) == []
# value test
assert np.array_equal(
ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32)
)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# clip_vector_norm
@pytest.mark.parametrize(
"x_max_norm_n_p_val_clipped",
[
(-0.5, 0.4, 2.0, -0.4),
([1.7], 1.5, 3.0, [1.5]),
(
[[0.8, 2.2], [1.5, 0.2]],
4.0,
1.0,
[[0.6808511, 1.8723406], [1.2765958, 0.17021278]],
),
(
[[0.8, 2.2], [1.5, 0.2]],
2.5,
2.0,
[[0.71749604, 1.9731141], [1.345305, 0.17937401]],
),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_clip_vector_norm(
x_max_norm_n_p_val_clipped, dtype, with_out, tensor_fn, device, call
):
# smoke test
if call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_max_norm_n_p_val_clipped[0], dtype=dtype, device=device)
max_norm = x_max_norm_n_p_val_clipped[1]
p_val = x_max_norm_n_p_val_clipped[2]
clipped = x_max_norm_n_p_val_clipped[3]
if with_out:
out = ivy.zeros(x.shape if len(x.shape) else (1,))
ret = ivy.clip_vector_norm(x, max_norm, p_val, out=out)
else:
ret = ivy.clip_vector_norm(x, max_norm, p_val)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == (x.shape if len(x.shape) else (1,))
# value test
assert np.allclose(
call(ivy.clip_vector_norm, x, max_norm, p_val), np.array(clipped)
)
if with_out:
if not ivy.current_backend_str() in ["tensorflow", "jax"]:
# these backends do not support native inplace updates
assert ret is out
assert ret.data is out.data
# compilation test
if call is helpers.torch_call:
# pytorch jit cannot compile global variables, in this case MIN_DENOMINATOR
return
# floormod
# @given(
# xy=helpers.dtype_and_values(ivy_np.valid_numeric_dtypes, n_arrays=2),
# as_variable=st.booleans(),
# with_out=st.booleans(),
# num_positional_args=st.integers(1, 2),
# native_array=st.booleans(),
# container=st.booleans(),
# instance_method=st.booleans(),
# )
# def test_floormod(
# xy,
# as_variable,
# with_out,
# num_positional_args,
# native_array,
# container,
# instance_method,
# device,
# call,
# fw,
# ):
# # smoke test
# dtype = xy[0]
# x = xy[1][0]
# divisor = np.abs(xy[1][1])
# if 0 in divisor:
# return
# if fw == "torch" and any(d in ["uint16", "uint32", "uint64"] for d in dtype):
# return
# helpers.test_array_function(
# dtype,
# as_variable,
# with_out,
# num_positional_args,
# native_array,
# container,
# instance_method,
# fw,
# "floormod",
# x=np.asarray(x, dtype=dtype[0]),
# y=np.asarray(divisor, dtype=dtype[1]),
# )
# unstack
@pytest.mark.parametrize(
"x_n_axis", [(1, -1), ([[0.0, 1.0, 2.0]], 0), ([[0.0, 1.0, 2.0]], 1)]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_unstack(x_n_axis, dtype, tensor_fn, device, call):
# smoke test
x, axis = x_n_axis
if (
isinstance(x, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype=dtype, device=device)
ret = ivy.unstack(x, axis)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (
axis % len(x.shape)
if (axis is not None and len(x.shape) != 0)
else len(x.shape) - 1
)
if x.shape == ():
expected_shape = ()
else:
expected_shape = list(x.shape)
expected_shape.pop(axis_val)
assert ret[0].shape == tuple(expected_shape)
# value test
assert np.allclose(
call(ivy.unstack, x, axis),
np.asarray(ivy.functional.backends.numpy.unstack(ivy.to_numpy(x), axis)),
)
# fourier_encode
@pytest.mark.parametrize(
"x_n_mf_n_nb_n_gt",
[
(
[2.0],
4.0,
4,
[
[
2.0000000e00,
1.7484555e-07,
9.9805772e-01,
-5.2196848e-01,
3.4969111e-07,
1.0000000e00,
-6.2295943e-02,
-8.5296476e-01,
1.0000000e00,
]
],
),
(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[2.0, 4.0],
4,
[
[
[
1.0000000e00,
-8.7422777e-08,
-8.7422777e-08,
-8.7422777e-08,
-8.7422777e-08,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
],
[
2.0000000e00,
1.7484555e-07,
9.9805772e-01,
-5.2196848e-01,
-6.0398321e-07,
1.0000000e00,
-6.2295943e-02,
-8.5296476e-01,
1.0000000e00,
],
],
[
[
3.0000000e00,
-2.3849761e-08,
-2.3849761e-08,
-2.3849761e-08,
-2.3849761e-08,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
],
[
4.0000000e00,
3.4969111e-07,
-1.2434989e-01,
8.9044148e-01,
-1.2079664e-06,
1.0000000e00,
-9.9223840e-01,
4.5509776e-01,
1.0000000e00,
],
],
[
[
5.0000000e00,
-6.7553248e-07,
-6.7553248e-07,
-6.7553248e-07,
-6.7553248e-07,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
],
[
6.0000000e00,
4.7699523e-08,
-9.8256493e-01,
-9.9706185e-01,
-3.7192983e-06,
1.0000000e00,
1.8591987e-01,
7.6601014e-02,
1.0000000e00,
],
],
],
),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_fourier_encode(x_n_mf_n_nb_n_gt, dtype, tensor_fn, device, call):
# smoke test
x, max_freq, num_bands, ground_truth = x_n_mf_n_nb_n_gt
if (
isinstance(x, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype=dtype, device=device)
if isinstance(max_freq, list):
max_freq = tensor_fn(max_freq, dtype=dtype, device=device)
ret = ivy.fourier_encode(x, max_freq, num_bands)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else list(x.shape)
expected_shape = x_shape + [1 + 2 * num_bands]
assert list(ret.shape) == expected_shape
# value test
assert np.allclose(
call(ivy.fourier_encode, x, max_freq, num_bands),
np.array(ground_truth),
atol=1e-5,
)
# indices_where
@pytest.mark.parametrize("x", [[True], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_indices_where(x, dtype, tensor_fn, device, call):
# smoke test
if (
isinstance(x, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype=dtype, device=device)
ret = ivy.indices_where(x)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert len(ret.shape) == 2
assert ret.shape[-1] == len(x.shape)
# value test
assert np.allclose(
call(ivy.indices_where, x),
np.asarray(ivy.functional.backends.numpy.indices_where(ivy.to_numpy(x))),
)
# one_hot
@pytest.mark.parametrize(
"ind_n_depth", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_one_hot(ind_n_depth, dtype, tensor_fn, device, call):
# smoke test
ind, depth = ind_n_depth
if (
isinstance(ind, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
ind = ivy.array(ind, dtype="int32", device=device)
ret = ivy.one_hot(ind, depth, device=device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == ind.shape + (depth,)
# value test
assert np.allclose(
call(ivy.one_hot, ind, depth, device=device),
np.asarray(
ivy.functional.backends.numpy.one_hot(
ivy.to_numpy(ind), depth, device=device
)
),
)
# cumsum
@pytest.mark.parametrize(
"x_n_axis",
[
([[0.0, 1.0, 2.0]], -1),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 0),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 1),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_cumsum(x_n_axis, dtype, with_out, tensor_fn, device, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype=dtype, device=device)
if with_out:
if ivy.exists(axis):
out = ivy.zeros(x.shape)
ret = ivy.cumsum(x, axis, out=out)
else:
out = ivy.zeros(ivy.reshape(x, (-1,)).shape)
ret = ivy.cumsum(x, axis, out=out)
else:
ret = ivy.cumsum(x, axis)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(
call(ivy.cumsum, x, axis),
np.asarray(ivy.functional.backends.numpy.cumsum(ivy.to_numpy(x), axis)),
)
# out test
if with_out:
if not ivy.current_backend_str() in ["tensorflow", "jax"]:
# these backends do not support native inplace updates
assert ret is out
assert ret.data is out.data
# cumprod
@pytest.mark.parametrize(
"x_n_axis",
[
([[0.0, 1.0, 2.0]], -1),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 0),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 1),
],
)
@pytest.mark.parametrize("exclusive", [True, False])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_cumprod(x_n_axis, exclusive, dtype, with_out, tensor_fn, device, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype=dtype, device=device)
if with_out:
if ivy.exists(axis):
out = ivy.zeros(x.shape)
ret = ivy.cumprod(x, axis, exclusive=exclusive, out=out)
else:
out = ivy.zeros(ivy.reshape(x, (-1,)).shape)
ret = ivy.cumprod(x, axis, exclusive=exclusive, out=out)
else:
ret = ivy.cumprod(x, axis, exclusive)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(
call(ivy.cumprod, x, axis, exclusive),
np.asarray(
ivy.functional.backends.numpy.cumprod(ivy.to_numpy(x), axis, exclusive)
),
)
# out test
if with_out:
if not ivy.current_backend_str() in ["tensorflow", "jax"]:
# these backends do not support native inplace updates
assert ret is out
assert ret.data is out.data
# scatter_flat
@pytest.mark.parametrize(
"inds_n_upd_n_size_n_tnsr_n_wdup",
[
([0, 4, 1, 2], [1, 2, 3, 4], 8, None, False),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], 8, None, True),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], None, [11, 10, 9, 8, 7, 6], True),
],
)
@pytest.mark.parametrize("red", ["sum", "min", "max", "replace"])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_flat(
inds_n_upd_n_size_n_tnsr_n_wdup, red, dtype, tensor_fn, device, call
):
# smoke test
if red in ("sum", "min", "max") and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, size, tensor, with_duplicates = inds_n_upd_n_size_n_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, dtype="int32", device=device)
upd = tensor_fn(upd, dtype=dtype, device=device)
if tensor:
# pytorch variables do not support in-place updates
tensor = (
ivy.array(tensor, dtype=dtype, device=device)
if ivy.current_backend_str() == "torch"
else tensor_fn(tensor, dtype=dtype, device=device)
)
ret = ivy.scatter_flat(inds, upd, size, tensor, red, device=device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
if size:
assert ret.shape == (size,)
else:
assert ret.shape == tensor.shape
# value test
if red == "replace" and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
assert np.allclose(
call(ivy.scatter_flat, inds, upd, size, tensor, red, device=device),
np.asarray(
ivy.functional.backends.numpy.scatter_flat(
ivy.to_numpy(inds),
ivy.to_numpy(upd),
size,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor,
red,
device=device,
)
),
)
# scatter_nd
@pytest.mark.parametrize(
"inds_n_upd_n_shape_tnsr_n_wdup",
[
([[4], [3], [1], [7]], [9, 10, 11, 12], [8], None, False),
([[0, 1, 2]], [1], [3, 3, 3], None, False),
(
[[0], [2]],
[
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
],
[4, 4, 4],
None,
False,
),
(
[[0, 1, 2]],
[1],
None,
[
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[4, 5, 6], [7, 8, 9], [1, 2, 3]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
],
False,
),
],
)
@pytest.mark.parametrize("red", ["sum", "min", "max", "replace"])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_nd(
inds_n_upd_n_shape_tnsr_n_wdup, red, dtype, tensor_fn, device, call
):
# smoke test
if red in ("sum", "min", "max") and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, shape, tensor, with_duplicates = inds_n_upd_n_shape_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, dtype="int32", device=device)
upd = tensor_fn(upd, dtype=dtype, device=device)
if tensor:
# pytorch variables do not support in-place updates
tensor = (
ivy.array(tensor, dtype=dtype, device=device)
if ivy.current_backend_str() == "torch"
else tensor_fn(tensor, dtype=dtype, device=device)
)
ret = ivy.scatter_nd(inds, upd, shape, tensor, red, device=device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
if shape:
assert tuple(ret.shape) == tuple(shape)
else:
assert tuple(ret.shape) == tuple(tensor.shape)
# value test
if red == "replace" and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
ret = call(ivy.scatter_nd, inds, upd, shape, tensor, red, device=device)
true = np.asarray(
ivy.functional.backends.numpy.scatter_nd(
ivy.to_numpy(inds),
ivy.to_numpy(upd),
shape,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor,
red,
device=device,
)
)
assert np.allclose(ret, true)
# gather
@pytest.mark.parametrize(
"prms_n_inds_n_axis",
[
([9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [0, 4, 7], 0),
([[1, 2], [3, 4]], [[0, 0], [1, 0]], 1),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_gather(prms_n_inds_n_axis, dtype, with_out, tensor_fn, device, call):
# smoke test
prms, inds, axis = prms_n_inds_n_axis
prms = tensor_fn(prms, dtype=dtype, device=device)
inds = ivy.array(inds, dtype="int32", device=device)
if with_out:
out = ivy.zeros(inds.shape)
ret = ivy.gather(prms, inds, axis, device=device, out=out)
else:
ret = ivy.gather(prms, inds, axis, device=device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == inds.shape
# value test
assert np.allclose(
call(ivy.gather, prms, inds, axis, device=device),
np.asarray(
ivy.functional.backends.numpy.gather(
ivy.to_numpy(prms), ivy.to_numpy(inds), axis, device=device
)
),
)
# out test
if with_out:
if not ivy.current_backend_str() in ["tensorflow", "jax"]:
# these backends do not support native inplace updates
assert ret is out
assert ret.data is out.data
# gather_nd
@pytest.mark.parametrize(
"prms_n_inds",
[
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[0, 1], [1, 0]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1]], [[1, 0]]]),
(
[[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]],
[[[0, 1, 0]], [[1, 0, 1]]],
),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_gather_nd(prms_n_inds, dtype, tensor_fn, device, call):
# smoke test
prms, inds = prms_n_inds
prms = tensor_fn(prms, dtype=dtype, device=device)
inds = ivy.array(inds, dtype="int32", device=device)
ret = ivy.gather_nd(prms, inds, device=device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == inds.shape[:-1] + prms.shape[inds.shape[-1] :]
# value test
assert np.allclose(
call(ivy.gather_nd, prms, inds, device=device),
np.asarray(
ivy.functional.backends.numpy.gather_nd(
ivy.to_numpy(prms), ivy.to_numpy(inds), device=device
)
),
)
# exists
@pytest.mark.parametrize("x", [[1.0], None, [[10.0, 9.0, 8.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_exists(x, dtype, tensor_fn, device, call):
# smoke test
x = tensor_fn(x, dtype=dtype, device=device) if x is not None else None
ret = ivy.exists(x)
# type test
assert isinstance(ret, bool)
# value test
y_true = x is not None
assert ret == y_true
# default
@pytest.mark.parametrize(
"x_n_dv", [([1.0], [2.0]), (None, [2.0]), ([[10.0, 9.0, 8.0]], [2.0])]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_default(x_n_dv, dtype, tensor_fn, device, call):
x, dv = x_n_dv
# smoke test
x = tensor_fn(x, dtype=dtype, device=device) if x is not None else None
dv = tensor_fn(dv, dtype=dtype, device=device)
ret = ivy.default(x, dv)
# type test
assert ivy.is_ivy_array(ret)
# value test
y_true = ivy.to_numpy(x if x is not None else dv)
assert np.allclose(call(ivy.default, x, dv), y_true)
def test_cache_fn(device, call):
def func():
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn()
ret0_again = cached_fn()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions
# each use the same global dict
ret0 = ivy.cache_fn(func)()
ret0_again = ivy.cache_fn(func)()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_cache_fn_with_args(device, call):
def func(_):
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn(0)
ret0_again = cached_fn(0)
ret1 = cached_fn(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions
# each use the same global dict
ret0 = ivy.cache_fn(func)(0)
ret0_again = ivy.cache_fn(func)(0)
ret1 = ivy.cache_fn(func)(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_framework_setting_with_threading(device, call):
if call is helpers.jnp_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def thread_fn():
x_ = jnp.array([0.0, 1.0, 2.0])
ivy.set_backend("jax")
for _ in range(2000):
try:
ivy.mean(x_)
except TypeError:
return False
ivy.unset_backend()
return True
# get original framework string and array
fws = ivy.current_backend_str()
x = ivy.array([0.0, 1.0, 2.0])
# start jax loop thread
thread = threading.Thread(target=thread_fn)
thread.start()
time.sleep(0.01)
# start local original framework loop
ivy.set_backend(fws)
for _ in range(2000):
ivy.mean(x)
ivy.unset_backend()
assert not thread.join()
def test_framework_setting_with_multiprocessing(device, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def worker_fn(out_queue):
ivy.set_backend("numpy")
x_ = np.array([0.0, 1.0, 2.0])
for _ in range(1000):
try:
ivy.mean(x_)
except TypeError:
out_queue.put(False)
return
ivy.unset_backend()
out_queue.put(True)
# get original framework string and array
fws = ivy.current_backend_str()
x = ivy.array([0.0, 1.0, 2.0])
# start numpy loop thread
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(target=worker_fn, args=(output_queue,))
worker.start()
# start local original framework loop
ivy.set_backend(fws)
for _ in range(1000):
ivy.mean(x)
ivy.unset_backend()
worker.join()
assert output_queue.get_nowait()
def test_explicit_ivy_framework_handles(device, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
# store original framework string and unset
fw_str = ivy.current_backend_str()
ivy.unset_backend()
# set with explicit handle caught
ivy_exp = ivy.get_backend(fw_str)
assert ivy_exp.current_backend_str() == fw_str
# assert backend implemented function is accessible
assert "array" in ivy_exp.__dict__
assert callable(ivy_exp.array)
# assert joint implemented function is also accessible
assert "cache_fn" in ivy_exp.__dict__
assert callable(ivy_exp.cache_fn)
# set global ivy to numpy
ivy.set_backend("numpy")
# assert the explicit handle is still unchanged
assert ivy.current_backend_str() == "numpy"
assert ivy_exp.current_backend_str() == fw_str
# unset global ivy from numpy
ivy.unset_backend()
def test_class_ivy_handles(device, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
class ArrayGen:
def __init__(self, ivyh):
self._ivy = ivyh
def get_array(self):
return self._ivy.array([0.0, 1.0, 2.0], dtype="float32", device=device)
# create instance
ag = ArrayGen(ivy.get_backend())
# create array from array generator
x = ag.get_array()
# verify this is not a numpy array
assert not isinstance(x, np.ndarray)
# change global framework to numpy
ivy.set_backend("numpy")
# create another array from array generator
x = ag.get_array()
# verify this is not still a numpy array
assert not isinstance(x, np.ndarray)
# einops_rearrange
@pytest.mark.parametrize(
"x_n_pattern_n_newx",
[([[0.0, 1.0, 2.0, 3.0]], "b n -> n b", [[0.0], [1.0], [2.0], [3.0]])],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_rearrange(x_n_pattern_n_newx, dtype, tensor_fn, device, call):
# smoke test
x, pattern, new_x = x_n_pattern_n_newx
x = tensor_fn(x, dtype=dtype, device=device)
ret = ivy.einops_rearrange(x, pattern)
true_ret = einops.rearrange(ivy.to_native(x), pattern)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_reduce
@pytest.mark.parametrize(
"x_n_pattern_n_red_n_newx", [([[0.0, 1.0, 2.0, 3.0]], "b n -> b", "mean", [1.5])]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_reduce(x_n_pattern_n_red_n_newx, dtype, tensor_fn, device, call):
# smoke test
x, pattern, reduction, new_x = x_n_pattern_n_red_n_newx
x = tensor_fn(x, dtype=dtype, device=device)
ret = ivy.einops_reduce(x, pattern, reduction)
true_ret = einops.reduce(ivy.to_native(x), pattern, reduction)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_repeat
@pytest.mark.parametrize(
"x_n_pattern_n_al_n_newx",
[
(
[[0.0, 1.0, 2.0, 3.0]],
"b n -> b n c",
{"c": 2},
[[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]],
)
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_repeat(x_n_pattern_n_al_n_newx, dtype, tensor_fn, device, call):
# smoke test
x, pattern, axes_lengths, new_x = x_n_pattern_n_al_n_newx
x = tensor_fn(x, dtype=dtype, device=device)
ret = ivy.einops_repeat(x, pattern, **axes_lengths)
true_ret = einops.repeat(ivy.to_native(x), pattern, **axes_lengths)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# container types
def test_container_types(device, call):
cont_types = ivy.container_types()
assert isinstance(cont_types, list)
for cont_type in cont_types:
assert hasattr(cont_type, "keys")
assert hasattr(cont_type, "values")
assert hasattr(cont_type, "items")
def test_inplace_arrays_supported(device, call):
cur_fw = ivy.current_backend_str()
if cur_fw in ["numpy", "mxnet", "torch"]:
assert ivy.inplace_arrays_supported()
elif cur_fw in ["jax", "tensorflow"]:
assert not ivy.inplace_arrays_supported()
else:
raise Exception("Unrecognized framework")
def test_inplace_variables_supported(device, call):
cur_fw = ivy.current_backend_str()
if cur_fw in ["numpy", "mxnet", "torch", "tensorflow"]:
assert ivy.inplace_variables_supported()
elif cur_fw in ["jax"]:
assert not ivy.inplace_variables_supported()
else:
raise Exception("Unrecognized framework")
@pytest.mark.parametrize("x_n_new", [([0.0, 1.0, 2.0], [2.0, 1.0, 0.0]), (0.0, 1.0)])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_update(x_n_new, tensor_fn, device, call):
x_orig, new_val = x_n_new
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, dtype="float32", device=device)
new_val = tensor_fn(new_val, dtype="float32", device=device)
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or (
tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()
):
x = ivy.inplace_update(x_orig, new_val)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(new_val))
return
pytest.skip()
@pytest.mark.parametrize("x_n_dec", [([0.0, 1.0, 2.0], [2.0, 1.0, 0.0]), (0.0, 1.0)])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_decrement(x_n_dec, tensor_fn, device, call):
x_orig, dec = x_n_dec
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, dtype="float32", device=device)
dec = tensor_fn(dec, dtype="float32", device=device)
new_val = x_orig - dec
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or (
tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()
):
x = ivy.inplace_decrement(x_orig, dec)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
return
pytest.skip()
@pytest.mark.parametrize("x_n_inc", [([0.0, 1.0, 2.0], [2.0, 1.0, 0.0]), (0.0, 1.0)])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_increment(x_n_inc, tensor_fn, device, call):
x_orig, inc = x_n_inc
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, dtype="float32", device=device)
inc = tensor_fn(inc, dtype="float32", device=device)
new_val = x_orig + inc
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or (
tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()
):
x = ivy.inplace_increment(x_orig, inc)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
return
pytest.skip()
# Still to Add #
# ---------------#
# is_ivy_array
# is_array
# is_ivy_container
# all_equal
# to_numpy
# clip_matrix_norm
# unstack
# value_is_nan
# has_nans
# exists
# shape_to_tuple
# try_else_none
# arg_names
# cache_fn
# current_framework_str
# get_min_denominator
# set_min_denominator
# get_min_base
# set_min_base
# stable_divide
# stable_pow
# get_all_arrays_in_memory
# num_arrays_in_memory
# print_all_arrays_in_memory
# set_queue_timeout
# queue_timeout
# tmp_dir
# set_tmp_dir
# supports_inplace
# assert_supports_inplace
|
local_job_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import logging
import os
import queue
import shutil
import subprocess
import tempfile
import threading
import time
import traceback
from builtins import object
from typing import TYPE_CHECKING
from typing import List
from typing import Optional
import grpc
from google.protobuf import text_format # type: ignore # not in typeshed
from apache_beam.metrics import monitoring_infos
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.portability.api import beam_runner_api_pb2
_LOGGER = logging.getLogger(__name__)
def _iter_queue(q):
while True:
yield q.get(block=True)
class LocalJobServicer(abstract_job_service.AbstractJobServiceServicer):
"""Manages one or more pipelines, possibly concurrently.
Experimental: No backward compatibility guaranteed.
Servicer for the Beam Job API.
This JobService uses a basic local implementation of runner to run the job.
This JobService is not capable of managing job on remote clusters.
By default, this JobService executes the job in process but still uses GRPC
to communicate pipeline and worker state. It can also be configured to use
inline calls rather than GRPC (for speed) or launch completely separate
subprocesses for the runner and worker(s).
"""
def __init__(self, staging_dir=None):
super(LocalJobServicer, self).__init__()
self._cleanup_staging_dir = staging_dir is None
self._staging_dir = staging_dir or tempfile.mkdtemp()
self._artifact_service = artifact_service.BeamFilesystemArtifactService(
self._staging_dir)
self._artifact_staging_endpoint = None # type: Optional[endpoints_pb2.ApiServiceDescriptor]
def create_beam_job(self,
preparation_id, # stype: str
job_name, # type: str
pipeline, # type: beam_runner_api_pb2.Pipeline
options # type: struct_pb2.Struct
):
# type: (...) -> BeamJob
# TODO(angoenka): Pass an appropriate staging_session_token. The token can
# be obtained in PutArtifactResponse from JobService
if not self._artifact_staging_endpoint:
# The front-end didn't try to stage anything, but the worker may
# request what's here so we should at least store an empty manifest.
self._artifact_service.CommitManifest(
beam_artifact_api_pb2.CommitManifestRequest(
staging_session_token=preparation_id,
manifest=beam_artifact_api_pb2.Manifest()))
provision_info = fn_api_runner.ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
job_id=preparation_id,
job_name=job_name,
pipeline_options=options,
retrieval_token=self._artifact_service.retrieval_token(
preparation_id)),
self._staging_dir)
return BeamJob(
preparation_id,
pipeline,
options,
provision_info,
self._artifact_staging_endpoint)
def get_bind_address(self):
"""Return the address used to open the port on the gRPC server.
This is often, but not always the same as the service address. For
example, to make the service accessible to external machines, override this
to return '[::]' and override `get_service_address()` to return a publicly
accessible host name.
"""
return self.get_service_address()
def get_service_address(self):
"""Return the host name at which this server will be accessible.
In particular, this is provided to the client upon connection as the
artifact staging endpoint.
"""
return 'localhost'
def start_grpc_server(self, port=0):
self._server = grpc.server(UnboundedThreadPoolExecutor())
port = self._server.add_insecure_port(
'%s:%d' % (self.get_bind_address(), port))
beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server)
beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server(
self._artifact_service, self._server)
hostname = self.get_service_address()
self._artifact_staging_endpoint = endpoints_pb2.ApiServiceDescriptor(
url='%s:%d' % (hostname, port))
self._server.start()
_LOGGER.info('Grpc server started at %s on port %d' % (hostname, port))
return port
def stop(self, timeout=1):
self._server.stop(timeout)
if os.path.exists(self._staging_dir) and self._cleanup_staging_dir:
shutil.rmtree(self._staging_dir, ignore_errors=True)
def GetJobMetrics(self, request, context=None):
if request.job_id not in self._jobs:
raise LookupError("Job {} does not exist".format(request.job_id))
result = self._jobs[request.job_id].result
monitoring_info_list = []
for mi in result._monitoring_infos_by_stage.values():
monitoring_info_list.extend(mi)
# Filter out system metrics
user_monitoring_info_list = [
x for x in monitoring_info_list
if monitoring_infos._is_user_monitoring_info(x) or
monitoring_infos._is_user_distribution_monitoring_info(x)
]
return beam_job_api_pb2.GetJobMetricsResponse(
metrics=beam_job_api_pb2.MetricResults(
committed=user_monitoring_info_list))
class SubprocessSdkWorker(object):
"""Manages a SDK worker implemented as a subprocess communicating over grpc.
"""
def __init__(
self,
worker_command_line, # type: bytes
control_address,
worker_id=None):
self._worker_command_line = worker_command_line
self._control_address = control_address
self._worker_id = worker_id
def run(self):
logging_server = grpc.server(UnboundedThreadPoolExecutor())
logging_port = logging_server.add_insecure_port('[::]:0')
logging_server.start()
logging_servicer = BeamFnLoggingServicer()
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
logging_servicer, logging_server)
logging_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url='localhost:%s' % logging_port))
control_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url=self._control_address))
env_dict = dict(
os.environ,
CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor,
LOGGING_API_SERVICE_DESCRIPTOR=logging_descriptor)
# only add worker_id when it is set.
if self._worker_id:
env_dict['WORKER_ID'] = self._worker_id
with fn_api_runner.SUBPROCESS_LOCK:
p = subprocess.Popen(self._worker_command_line, shell=True, env=env_dict)
try:
p.wait()
if p.returncode:
raise RuntimeError(
'Worker subprocess exited with return code %s' % p.returncode)
finally:
if p.poll() is None:
p.kill()
logging_server.stop(0)
class BeamJob(abstract_job_service.AbstractBeamJob):
"""This class handles running and managing a single pipeline.
The current state of the pipeline is available as self.state.
"""
def __init__(self,
job_id, # type: str
pipeline,
options,
provision_info, # type: fn_api_runner.ExtendedProvisionInfo
artifact_staging_endpoint # type: Optional[endpoints_pb2.ApiServiceDescriptor]
):
super(BeamJob, self).__init__(
job_id, provision_info.provision_info.job_name, pipeline, options)
self._provision_info = provision_info
self._artifact_staging_endpoint = artifact_staging_endpoint
self._state_queues = [] # type: List[queue.Queue]
self._log_queues = [] # type: List[queue.Queue]
self.daemon = True
self.result = None
def set_state(self, new_state):
"""Set the latest state as an int enum and notify consumers"""
timestamp = super(BeamJob, self).set_state(new_state)
if timestamp is not None:
# Inform consumers of the new state.
for queue in self._state_queues:
queue.put((new_state, timestamp))
def prepare(self):
pass
def artifact_staging_endpoint(self):
return self._artifact_staging_endpoint
def run(self):
self.set_state(beam_job_api_pb2.JobState.STARTING)
self._run_thread = threading.Thread(target=self._run_job)
self._run_thread.start()
def _run_job(self):
self.set_state(beam_job_api_pb2.JobState.RUNNING)
with JobLogHandler(self._log_queues):
try:
result = fn_api_runner.FnApiRunner(
provision_info=self._provision_info).run_via_runner_api(
self._pipeline_proto)
_LOGGER.info('Successfully completed job.')
self.set_state(beam_job_api_pb2.JobState.DONE)
self.result = result
except: # pylint: disable=bare-except
_LOGGER.exception('Error running pipeline.')
_LOGGER.exception(traceback)
self.set_state(beam_job_api_pb2.JobState.FAILED)
raise
def cancel(self):
if not self.is_terminal_state(self.state):
self.set_state(beam_job_api_pb2.JobState.CANCELLING)
# TODO(robertwb): Actually cancel...
self.set_state(beam_job_api_pb2.JobState.CANCELLED)
def get_state_stream(self):
# Register for any new state changes.
state_queue = queue.Queue()
self._state_queues.append(state_queue)
for state, timestamp in self.with_state_history(_iter_queue(state_queue)):
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
# Register for any new messages.
log_queue = queue.Queue()
self._log_queues.append(log_queue)
self._state_queues.append(log_queue)
for msg in self.with_state_history(_iter_queue(log_queue)):
if isinstance(msg, tuple):
assert len(msg) == 2 and isinstance(msg[0], int)
current_state = msg[0]
yield msg
if self.is_terminal_state(current_state):
break
else:
yield msg
class BeamFnLoggingServicer(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
def Logging(self, log_bundles, context=None):
for log_bundle in log_bundles:
for log_entry in log_bundle.log_entries:
_LOGGER.info('Worker: %s', str(log_entry).replace('\n', ' '))
return iter([])
class JobLogHandler(logging.Handler):
"""Captures logs to be returned via the Beam Job API.
Enabled via the with statement."""
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.CRITICAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.ERROR: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.WARNING: beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING,
logging.INFO: beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC,
logging.DEBUG: beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG,
}
def __init__(self, log_queues):
super(JobLogHandler, self).__init__()
self._last_id = 0
self._logged_thread = None
self._log_queues = log_queues
def __enter__(self):
# Remember the current thread to demultiplex the logs of concurrently
# running pipelines (as Python log handlers are global).
self._logged_thread = threading.current_thread()
logging.getLogger().addHandler(self)
def __exit__(self, *args):
self._logged_thread = None
self.close()
def _next_id(self):
self._last_id += 1
return str(self._last_id)
def emit(self, record):
if self._logged_thread is threading.current_thread():
msg = beam_job_api_pb2.JobMessage(
message_id=self._next_id(),
time=time.strftime(
'%Y-%m-%d %H:%M:%S.', time.localtime(record.created)),
importance=self.LOG_LEVEL_MAP[record.levelno],
message_text=self.format(record))
# Inform all message consumers.
for queue in self._log_queues:
queue.put(msg)
|
event_worker.py
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
import abc
from multiprocessing import Process
import signal
from threading import Timer
from oslo_config import cfg
from oslo_log import log
import six
import storyboard.db.api.base as db_api
from storyboard.notifications.notification_hook import class_mappings
from storyboard.notifications.subscriber import subscribe
from storyboard._i18n import _LI, _LW
from storyboard.plugin.base import PluginBase
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MANAGER = None
IMPORT_OPTS = [
cfg.IntOpt("worker-count",
default="5",
help="The number of workers to spawn and manage.")
]
def run_daemon():
"""Start the daemon manager.
"""
global MANAGER
CONF.register_cli_opts(IMPORT_OPTS)
try:
log.register_options(CONF)
except cfg.ArgsAlreadyParsedError:
pass
log.setup(CONF, 'storyboard')
CONF(project='storyboard')
signal.signal(signal.SIGTERM, terminate)
signal.signal(signal.SIGINT, terminate)
MANAGER = DaemonManager(daemon_method=subscribe,
child_process_count=CONF.worker_count)
MANAGER.start()
def terminate(sig, frame):
# This assumes that all the child processes will terminate gracefully
# on a SIGINT
global MANAGER
MANAGER.stop()
# Raise SIGINT to all child processes.
signal.default_int_handler()
class DaemonManager(object):
"""A Daemon manager to handle multiple subprocesses.
"""
def __init__(self, child_process_count, daemon_method):
"""Create a new daemon manager with N processes running the passed
method. Once start() is called, The daemon method will be spawned N
times and continually checked/restarted until the process is
interrupted either by a system exit or keyboard interrupt.
:param child_process_count: The number of child processes to spawn.
:param daemon_method: The method to run in the child process.
"""
# Number of child procs.
self._child_process_count = child_process_count
# Process management threads.
self._procs = list()
# Save the daemon method
self._daemon_method = daemon_method
# Health check timer
self._timer = PerpetualTimer(1, self._health_check)
def _health_check(self):
processes = list(self._procs)
dead_processes = 0
for process in processes:
if not process.is_alive():
LOG.warning(_LW("Dead Process found [exit code:%d]") %
(process.exitcode,))
dead_processes += 1
self._procs.remove(process)
for i in range(dead_processes):
self._add_process()
def start(self):
"""Start the daemon manager and spawn child processes.
"""
LOG.info(_LI("Spawning %s child processes") %
(self._child_process_count,))
self._timer.start()
for i in range(self._child_process_count):
self._add_process()
def stop(self):
self._timer.cancel()
processes = list(self._procs)
for process in processes:
if process.is_alive():
process.terminate()
process.join()
self._procs.remove(process)
def _add_process(self):
process = Process(target=self._daemon_method)
process.start()
self._procs.append(process)
class PerpetualTimer(object):
"""A timer wrapper class that repeats itself.
"""
def __init__(self, t, handler):
self.t = t
self.handler = handler
self.thread = Timer(self.t, self.handle_function)
def handle_function(self):
self.handler()
self.thread = Timer(self.t, self.handle_function)
self.thread.setDaemon(True)
self.thread.start()
def start(self):
self.thread.start()
def cancel(self):
if self.thread.is_alive():
self.thread.cancel()
@six.add_metaclass(abc.ABCMeta)
class WorkerTaskBase(PluginBase):
"""Base class for a worker that listens to API Events."""
def event(self, author_id, method, url, path, query_string, status,
resource, resource_id, sub_resource=None, sub_resource_id=None,
resource_before=None, resource_after=None):
"""Handle an event.
A database session is created, and passed to the abstract method.
"""
session = db_api.get_session(in_request=False)
with session.begin(subtransactions=True):
author = self.resolve_resource_by_name(session, 'user', author_id)
self.handle(session=session,
author=author,
method=method,
url=url,
path=path,
query_string=query_string,
status=status,
resource=resource,
resource_id=resource_id,
sub_resource=sub_resource,
sub_resource_id=sub_resource_id,
resource_before=resource_before,
resource_after=resource_after)
def resolve_resource_by_name(self, session, resource_name, resource_id):
if resource_name not in class_mappings:
return None
klass = class_mappings[resource_name][0]
return db_api.entity_get(klass, resource_id, session=session)
@abc.abstractmethod
def handle(self, session, author, method, url, path, query_string, status,
resource, resource_id, sub_resource=None, sub_resource_id=None,
resource_before=None, resource_after=None):
"""Handle an event.
:param session: An event-specific SQLAlchemy session.
:param author: The author's user record.
:param method: The HTTP Method.
:param url: The Referer header from the request.
:param path: The full HTTP Path requested.
:param query_string: The HTTP query string provided.
:param status: The returned HTTP Status of the response.
:param resource: The resource type.
:param resource_id: The ID of the resource.
:param sub_resource: The subresource type.
:param sub_resource_id: The ID of the subresource.
:param resource_before: The resource state before this event occurred.
:param resource_after: The resource state after this event occurred.
"""
|
abstract.py
|
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
"""Includes the base-class for all commands of any service."""
from __future__ import unicode_literals
import ecstasy
import requests
import threading
import sys
try:
from Queue import Queue
except ImportError:
from queue import Queue
import lnk.config
import lnk.errors
class AbstractCommand(object):
"""
Abstract-base-class for all commands of any service.
This class' constructor handles the bulk of configuration every
command needs, such as fetching the service's API url, available
endpoints and default settings. It also gives each command a queue
and lock for threading, as well as a few other things most, if not
all, commands need. The class defines an interface all commands must
have, with some of AbstractCommand's methods, such as fetch(), not
being implemented and throwing a NotImplementedError if called directly.
An AbstractCommand must have knowledge about the service that the class
subclassing it uses (e.g. bit.ly or tinyurl), as well as about the
name of the command (e.g. 'link' or 'stats').
Attributes:
url (str): The URL of the API.
api (str): The URL of the API, joined with its version. Endpoints can
be joined to this string to form a full URL (without
parameters) for a request.
config (dict): The configuration data of a command.
endpoints (dict): The endpoints for a command.
settings (dict): The default settings of a command.
sets (dict|None): If available, the data sets/categories that the
command allows, else None if the command has no
such thing (e.g. the 'link' command).
queue (Queue.Queue): A queue for thread-safe data-passing.
lock (threading.Lock): A lock object for thread-safe actions.
error (Exception): The last exception thrown by a thread started
with the new_thread method. This is useful to
see if a thread threw an exception which would
otherwise not be properly handled (because you
can't catch exceptions from a child-thread in the
main thread).
parameters (dict): Dictionary for the parameters of an HTTP request.
list_item (str): A string, formatted with ecstasy, that should be used
to format a list-item (e.g. for the stats command).
It already includes the necessary markup such that
str.format() can be used on it directly with the
string to be formatted.
"""
def __init__(self, service, command):
with lnk.config.Manager(service) as manager:
self.url = manager['url']
self.api = '{0}/v{1}'.format(self.url, manager['version'])
self.config = manager['commands'][command]
self.endpoints = self.config['endpoints']
self.settings = self.config.get('settings')
self.sets = self.config.get('sets')
self.queue = Queue()
self.lock = threading.Lock()
self.error = None
self.parameters = {}
self.list_item = ecstasy.beautify(' <+> {0}', ecstasy.Color.Red)
def fetch(self, *args):
"""
Abstract method to fetch command-specific data.
Arguments:
args (variadic): Whatever arguments the overriden method takes.
Raises:
NotImplementedError: When called directly.
"""
raise NotImplementedError
def get(self, endpoint, parameters=None):
"""
Base method to perform an HTTP request with the GET method.
Arguments:
endpoint (str): The endpoint at which to request data.
parameters (dict): Additional parameters to pass with the request.
Return:
The requests.Response object resulting from the request.
"""
url = '{0}/{1}'.format(self.api, endpoint)
if not parameters:
parameters = self.parameters
else:
parameters.update(self.parameters)
return requests.get(url, params=parameters, timeout=60)
def post(self, endpoint, authorization=None, data=None):
"""
Base method to perform an HTTP request with the POST method.
Arguments:
endpoint (str): The endpoint at which to send data.
authorization (tuple): Optionally, a (login, password) tuple that
should be used for HTTP authorization.
data (dict): Optionally, data to send with the request.
Return:
The requests.Response object resulting from the request.
"""
url = '{0}/{1}'.format(self.url, endpoint)
return requests.post(url, auth=authorization, data=data, timeout=60)
def new_thread(self, function, *args, **kwargs):
"""
Runs a function in a new thread and returns the thread.
The function is run in a new thread in way that all positional
and keyword arguments can be forwarded to the function. Additionally,
extra measures are taken to wrap the function into another proxy
function that handles exceptions which would otherwise not be handled.
If the function to be called throws an exception, this is recorded
and the exception is assigned to 'error' attribute, where it can later
be checked.
Arguments:
function (func): The function to execute.
args (variadic): The positional arguments to pass to the function
when calling it.
kwargs (variadic): The keyword arguments to pass to the function
when calling it.
Returns:
The started (!) thread in which the function is being executed.
"""
def proxy(*args, **kwargs):
"""Proxy function for concurrent exception-handling."""
try:
function(*args, **kwargs)
except Exception:
_, self.error, _ = sys.exc_info()
thread = threading.Thread(target=proxy, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def join(self, threads, timeout=120):
"""
Joins a list of thread and checks for errors.
Each thread is join with a timeout period as specified by the timeout
parameter of this function. If an error was found in the 'error'
attribute, it is re-raised so that it can be caught in the main thread.
Arguments:
threads (list): The list of threading.Threads to join.
timeout (float): A floating-point number specifying the number of
seconds to wait when joining. Defaults to 60
seconds.
Raises:
lnk.errors.InternalError: If a thread could not be joined in the given
timeout period (i.e. if it is still alive after
joining).
Other errors: If a thread threw an exception, this exception is
re-raised in the main thread.
"""
for thread in threads:
thread.join(timeout=timeout)
if thread.is_alive():
raise lnk.errors.InternalError('Could not join thread.')
if self.error:
raise self.error
def filter_sets(all_sets, only, hide):
"""
Filters a set of categories.
This method is used by many commands that use some sort of dictionary
of available data sets/categories, which must filter those sets according
to the ones the user wants to have included in the response by that command.
Arguments:
all_sets (dict): The base dictionary of all available sets (from which
a subset should be filtered).
only (tuple): A tuple of the names of the categories/sets to include.
hide (tuple): A tuple of the names of the categories/sets to exclude.
Note that sets are hidden after the 'only' sets have
been processed, i.e. if a certain set is in 'only' and
in 'hide', it will first be selected and then discared
again (which would make no sense). Usually either 'only'
or 'hide' is empty.
Returns:
A dictionary containing the filtered key/value pairs.
"""
filtered = {}
only = only or all_sets
for key, value in all_sets.items():
if key in only and key not in hide:
filtered[key] = value
return filtered
|
test_gc.py
|
import unittest
from test.support import (verbose, run_unittest, start_threads,
requires_type_collecting)
import sys
import time
import gc
import weakref
try:
import threading
except ImportError:
threading = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
### Tests
###############################################################################
@unittest.skipIf(sys.platform == 'cli', 'IronPython does not fully support the gc module.')
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@requires_type_collecting
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A:
def __del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A(object):
def __del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n") in d
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example:
# - disposed tuples are not freed, but reused
# - the call to assertEqual somehow avoids building its args tuple
def test_get_count(self):
# Avoid future allocation of method object
assertEqual = self._baseAssertEqual
gc.collect()
assertEqual(gc.get_count(), (0, 0, 0))
a = dict()
# since gc.collect(), we created two objects:
# the dict, and the tuple returned by get_count()
assertEqual(gc.get_count(), (2, 0, 0))
def test_collect_generations(self):
# Avoid future allocation of method object
assertEqual = self.assertEqual
gc.collect()
a = dict()
gc.collect(0)
assertEqual(gc.get_count(), (0, 1, 0))
gc.collect(1)
assertEqual(gc.get_count(), (0, 0, 1))
gc.collect(2)
assertEqual(gc.get_count(), (0, 0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_checkinterval = sys.getcheckinterval()
sys.setcheckinterval(3)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setcheckinterval(old_checkinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + range(5))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(u"a"))
self.assertFalse(gc.is_tracked(bytearray("a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class OldStyle:
pass
class NewStyle(object):
pass
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(OldStyle))
self.assertTrue(gc.is_tracked(OldStyle()))
self.assertTrue(gc.is_tracked(NewStyle))
self.assertTrue(gc.is_tracked(NewStyle()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
@unittest.skipIf(sys.platform == 'cli', 'IronPython does not fully support the gc module.')
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print "restoring automatic collection"
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
test_pyepics_compat.py
|
#!/usr/bin/env python
# unit-tests for ca interface
# Lifted almost exactly from pyepics
# The epics python module was orignally written by
#
# Matthew Newville <newville@cars.uchicago.edu>
# CARS, University of Chicago
#
# There have been several contributions from many others, notably Angus
# Gratton <angus.gratton@anu.edu.au>. See the Acknowledgements section of
# the documentation for a list of more contributors.
#
# Except where explicitly noted, all files in this distribution are licensed
# under the Epics Open License.:
#
# ------------------------------------------------
#
# Copyright 2010 Matthew Newville, The University of Chicago. All rights reserved.
#
# The epics python module is distributed subject to the following license conditions:
# SOFTWARE LICENSE AGREEMENT
# Software: epics python module
#
# 1. The "Software", below, refers to the epics python module (in either
# source code, or binary form and accompanying documentation). Each
# licensee is addressed as "you" or "Licensee."
#
# 2. The copyright holders shown above and their third-party licensors
# hereby grant Licensee a royalty-free nonexclusive license, subject to
# the limitations stated herein and U.S. Government license rights.
#
# 3. You may modify and make a copy or copies of the Software for use
# within your organization, if you meet the following conditions:
#
# 1. Copies in source code must include the copyright notice and this
# Software License Agreement.
#
# 2. Copies in binary form must include the copyright notice and this
# Software License Agreement in the documentation and/or other
# materials provided with the copy.
#
# 4. You may modify a copy or copies of the Software or any portion of
# it, thus forming a work based on the Software, and distribute copies of
# such work outside your organization, if you meet all of the following
# conditions:
#
# 1. Copies in source code must include the copyright notice and this
# Software License Agreement;
#
# 2. Copies in binary form must include the copyright notice and this
# Software License Agreement in the documentation and/or other
# materials provided with the copy;
#
# 3. Modified copies and works based on the Software must carry
# prominent notices stating that you changed specified portions of
# the Software.
#
# 5. Portions of the Software resulted from work developed under a
# U.S. Government contract and are subject to the following license: the
# Government is granted for itself and others acting on its behalf a
# paid-up, nonexclusive, irrevocable worldwide license in this computer
# software to reproduce, prepare derivative works, and perform publicly
# and display publicly.
#
# 6. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS" WITHOUT
# WARRANTY OF ANY KIND. THE COPYRIGHT HOLDERS, THEIR THIRD PARTY
# LICENSORS, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY,
# AND THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT, (2) DO NOT
# ASSUME ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE ACCURACY,
# COMPLETENESS, OR USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT
# USE OF THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4) DO
# NOT WARRANT THAT THE SOFTWARE WILL FUNCTION UNINTERRUPTED, THAT IT IS
# ERROR-FREE OR THAT ANY ERRORS WILL BE CORRECTED.
#
# 7. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT HOLDERS,
# THEIR THIRD PARTY LICENSORS, THE UNITED STATES, THE UNITED STATES
# DEPARTMENT OF ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF ANY KIND OR
# NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF PROFITS OR LOSS OF DATA,
# FOR ANY REASON WHATSOEVER, WHETHER SUCH LIABILITY IS ASSERTED ON THE
# BASIS OF CONTRACT, TORT (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR
# OTHERWISE, EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
# POSSIBILITY OF SUCH LOSS OR DAMAGES.
#
# ------------------------------------------------
import pytest
numpy = pytest.importorskip("numpy")
import time
import os
import sys
import threading
from types import SimpleNamespace
from contextlib import contextmanager
from caproto.threading.pyepics_compat import (PV, caput, caget, cainfo,
caget_many, caput_many,
AccessRightsException)
from .conftest import default_setup_module, default_teardown_module
from .test_threading_client import context, shared_broadcaster
def setup_module(module):
default_setup_module(module)
from caproto.benchmarking.util import set_logging_level
set_logging_level('DEBUG')
def teardown_module(module):
default_teardown_module(module)
@pytest.fixture(scope='function')
def pvnames(request, epics_base_ioc, context):
class PVNames:
prefix = epics_base_ioc.prefix
double_pv = prefix + 'ao1'
double_pv_units = 'microns'
double_pv_prec = 4
double_pv2 = prefix + 'ao2'
pause_pv = prefix + 'pause'
str_pv = prefix + 'ao1.DESC'
int_pv = prefix + 'long2'
long_pv = prefix + 'long2'
float_pv = prefix + 'ao3'
enum_pv = prefix + 'mbbo1'
enum_pv_strs = ['Stop', 'Start', 'Pause', 'Resume']
proc_pv = prefix + 'ao1.PROC'
long_arr_pv = prefix + 'long2k'
double_arr_pv = prefix + 'double2k'
string_arr_pv = prefix + 'string128'
char_arr_pv = prefix + 'char128'
char_arrays = [prefix + 'char128',
prefix + 'char2k',
prefix + 'char64k']
long_arrays = [prefix + 'long128',
prefix + 'long2k',
prefix + 'long64k']
double_arrays = [prefix + 'double128',
prefix + 'double2k',
prefix + 'double64k']
updating_pv1 = prefix + 'ao1'
updating_str1 = prefix + 'char256'
updating_pvlist = [prefix + 'ao1',
prefix + 'ai1',
prefix + 'long1',
prefix + 'ao2']
non_updating_pv = prefix + 'ao4'
alarm_pv = prefix + 'long1'
alarm_comp = 'ge'
alarm_trippoint = 7
subarr_driver = prefix + 'wave_test'
subarr1 = prefix + 'subArr1'
subarr2 = prefix + 'subArr2'
subarr3 = prefix + 'subArr3'
subarr4 = prefix + 'subArr4'
zero_len_subarr1 = prefix + 'ZeroLenSubArr1'
# TODO: softIoc does not build with motor
motor1 = 'sim:mtr1'
motor2 = 'sim:mtr2'
def __repr__(self):
return f'<PVNames prefix={epics_base_ioc.prefix}>'
PV._default_context = context
def finalize_context():
print('Cleaning up PV context')
context.disconnect()
assert not context._process_search_results_thread.is_alive()
assert not context._activate_subscriptions_thread.is_alive()
assert not context.selector.thread.is_alive()
sb = context.broadcaster
sb.disconnect()
assert not sb._command_thread.is_alive()
assert not sb.selector.thread.is_alive()
assert not sb._retry_unanswered_searches_thread.is_alive()
print('Done cleaning up PV context')
request.addfinalizer(finalize_context)
return PVNames()
def simulator_main(prefix, ready_event, exit_event):
'simulator.py from pyepics testioc (same license as above)'
import random
epics = pytest.importorskip('epics')
_caput = epics.caput
_PV = epics.PV
class PV(_PV):
def put(self, value, **kw):
rval = repr(value)[:50]
print(f'(simulator: put {self.pvname} {rval})')
return super().put(value, **kw)
def caput(pv, value, **kw):
rval = repr(value)[:50]
print(f'(simulator: caput {pv} {rval})')
return _caput(pv, value, **kw)
NEEDS_INIT = True
SLEEP_TIME = 0.10
def onConnect(pvname=None, conn=None, **kws):
nonlocal NEEDS_INIT
NEEDS_INIT = conn
def make_pvs(*args, **kwds):
# print("Make PVS ' ", prefix, args)
# print( [("%s%s" % (prefix, name)) for name in args])
pvlist = [PV("%s%s" % (prefix, name)) for name in args]
for pv in pvlist:
pv.connect()
pv.connection_callbacks.append(onConnect)
return pvlist
mbbos = make_pvs("mbbo1", "mbbo2")
pause_pv = make_pvs("pause",)[0]
longs = make_pvs("long1", "long2", "long3", "long4")
strs = make_pvs("str1", "str2")
analogs = make_pvs("ao1", "ai1", "ao2", "ao3")
binaries = make_pvs("bo1", "bi1")
char_waves = make_pvs("char128", "char256", "char2k", "char64k")
double_waves = make_pvs("double128", "double2k", "double64k")
long_waves = make_pvs("long128", "long2k", "long64k")
str_waves = make_pvs("string128", "string2k", "string64k")
subarrays = make_pvs("subArr1", "subArr2", "subArr3", "subArr4" )
subarray_driver = make_pvs("wave_test",)[0]
def initialize_data():
subarray_driver.put(numpy.arange(64)/12.0)
for p in mbbos:
p.put(1)
for i, p in enumerate(longs):
p.put((i+1))
for i, p in enumerate(strs):
p.put(("String %s" % (i+1)))
for i, p in enumerate(binaries):
p.put((i+1))
for i, p in enumerate(analogs):
p.put((i+1)*1.7135000 )
caput(f'{prefix}ao1.EGU', 'microns')
caput(f'{prefix}ao1.PREC', 4)
caput(f'{prefix}ai1.PREC', 2)
caput(f'{prefix}ao2.PREC', 3)
char_waves[0].put([60+random.randrange(30) for i in range(128)])
char_waves[1].put([random.randrange(256) for i in range(256)])
char_waves[2].put([random.randrange(256) for i in range(2048)])
char_waves[3].put([random.randrange(256) for i in range(65536)])
long_waves[0].put([i+random.randrange(2) for i in range(128)])
long_waves[1].put([i+random.randrange(128) for i in range(2048)])
long_waves[2].put([i for i in range(65536)])
double_waves[0].put([i+random.randrange(2) for i in range(128)])
double_waves[1].put([random.random() for i in range(2048)])
double_waves[2].put([random.random() for i in range(65536)])
pause_pv.put(0)
str_waves[0].put([(" String %i" % (i+1)) for i in range(128)])
print('Data initialized')
text = '''line 1
this is line 2
and line 3
here is another line
this is the 5th line
line 6
line 7
line 8
line 9
line 10
line 11
'''.split('\n')
start_time = time.time()
count = 0
long_update = 0
lcount = 1
initialized_at = 0
while not exit_event.is_set():
if NEEDS_INIT:
initialize_data()
time.sleep(SLEEP_TIME)
NEEDS_INIT = False
initialized_at = count
time.sleep(SLEEP_TIME)
count = count + 1
if not NEEDS_INIT and count >= initialized_at + 4:
if not ready_event.is_set():
ready_event.set()
print('[Pyepics simulator running!]')
if count > 99999999:
count = 1
t0 = time.time()
if pause_pv.get() == 1:
# pause for up to 120 seconds if pause was selected
t0 = time.time()
while time.time()-t0 < 120:
time.sleep(SLEEP_TIME)
if pause_pv.get() == 0:
break
elif exit_event.is_set():
break
pause_pv.put(0)
if exit_event.is_set():
break
noise = numpy.random.normal
analogs[0].put(100*(random.random()-0.5))
analogs[1].put(76.54321*(time.time()-start_time))
analogs[2].put(0.3*numpy.sin(time.time() / 2.302) + noise(scale=0.4))
char_waves[0].put([45+random.randrange(64)
for i in range(128)])
if count % 3 == 0:
analogs[3].put(
numpy.exp((max(0.001, noise(scale=0.03) +
numpy.sqrt((count/16.0) % 87.)))))
long_waves[1].put([i+random.randrange(128)
for i in range(2048)])
str_waves[0].put([("Str%i_%.3f" % (i+1, 100*random.random()))
for i in range(128)])
if t0-long_update >= 1.0:
long_update=t0
lcount = (lcount + 1) % 10
longs[0].put(lcount)
char_waves[1].put(text[lcount])
double_waves[2].put([random.random()
for i in range(65536)])
double_waves[1].put([random.random()
for i in range(2048)])
print('[Simulator loop exiting]')
@pytest.fixture(scope='function')
def simulator(request, pvnames):
prefix = pvnames.prefix
ready_event = threading.Event()
exit_event = threading.Event()
kwargs = dict(prefix=pvnames.prefix,
ready_event=ready_event,
exit_event=exit_event)
print()
print()
print(f'* Starting up simulator for prefix: {prefix}')
thread = threading.Thread(target=simulator_main, kwargs=kwargs)
thread.start()
def stop_simulator():
print()
print(f'* Joining simulator thread')
exit_event.set()
thread.join(timeout=2)
print()
if thread.is_alive():
print(f'* Dangling simulator thread (prefix={prefix})... :(')
else:
print(f'* Simulator thread exited cleanly (prefix={prefix})')
request.addfinalizer(stop_simulator)
ok = ready_event.wait(15)
if not ok:
raise TimeoutError('Simulator thread failed to start!')
print()
print(f'* Simulator thread started up! (prefix={prefix})')
return thread
@contextmanager
def no_simulator_updates(pvnames):
'''Context manager which pauses and resumes simulator PV updating'''
try:
caput(pvnames.pause_pv, 1)
time.sleep(0.1)
yield
finally:
caput(pvnames.pause_pv, 0)
# Give the simulator some time to start back up
time.sleep(0.5)
@pytest.mark.flaky(reruns=5, reruns_delay=2)
def testA_CreatePV(pvnames):
print('Simple Test: create pv\n')
pv = PV(pvnames.double_pv)
assert pv is not None
@pytest.mark.flaky(reruns=5, reruns_delay=2)
def testA_CreatedWithConn(pvnames):
print('Simple Test: create pv with conn callback\n')
CONN_DAT = {}
def onConnect(pvname=None, conn=None, chid=None, **kws):
nonlocal CONN_DAT
print(' :Connection status changed: %s connected=%s\n' % (pvname, conn))
CONN_DAT[pvname] = conn
print(f'Connecting to {pvnames.int_pv}')
pv = PV(pvnames.int_pv, connection_callback=onConnect)
val = pv.get(timeout=5)
conn = CONN_DAT.get(pvnames.int_pv, None)
assert conn
def test_caget(pvnames):
print('Simple Test of caget() function\n')
pvs = (pvnames.double_pv, pvnames.enum_pv, pvnames.str_pv)
for p in pvs:
val = caget(p)
assert val is not None
sval = caget(pvnames.str_pv)
assert sval == 'ao'
def test_smoke_cainfo(pvnames):
print('Simple Test of caget() function\n')
pvs = (pvnames.double_pv, pvnames.enum_pv, pvnames.str_pv)
for p in pvs:
for print_out in (True, False):
val = cainfo(p, print_out=print_out)
if not print_out:
assert val is not None
def test_caget_many(pvnames):
print('Simple Test of caget_many() function\n')
pvs = [pvnames.double_pv, pvnames.enum_pv, pvnames.str_pv]
vals = caget_many(pvs)
assert len(vals) == len(pvs)
assert isinstance(vals[0], float)
print(type(vals[1]))
assert isinstance(vals[1], (int, numpy.uint16))
assert isinstance(vals[2], str)
def test_caput_many_wait_all(pvnames):
print('Test of caput_many() function, waiting for all.\n')
pvs = [pvnames.double_pv, pvnames.enum_pv, 'ceci nest pas une PV']
vals = [0.5, 0, 23]
t0 = time.time()
success = caput_many(pvs, vals, wait='all', connection_timeout=0.5,
put_timeout=5.0)
t1 = time.time()
assert len(success) == len(pvs)
assert success[0] == 1
assert success[1] == 1
assert success[2] < 0
def test_caput_many_wait_each(pvnames):
print('Simple Test of caput_many() function, waiting for each.\n')
pvs = [pvnames.double_pv, pvnames.enum_pv, 'ceci nest pas une PV']
#pvs = ["MTEST:Val1", "MTEST:Val2", "MTEST:SlowVal"]
vals = [0.5, 0, 23]
success = caput_many(pvs, vals, wait='each', connection_timeout=0.5,
put_timeout=1.0)
assert len(success) == len(pvs)
assert success[0] == 1
assert success[1] == 1
assert success[2] < 0
def test_caput_many_no_wait(pvnames):
print('Simple Test of caput_many() function, without waiting.\n')
pvs = [pvnames.double_pv, pvnames.enum_pv, 'ceci nest pas une PV']
vals = [0.5, 0, 23]
success = caput_many(pvs, vals, wait=None, connection_timeout=0.5)
assert len(success) == len(pvs)
# If you don't wait, ca.put returns 1 as long as the PV connects
# and the put request is valid.
assert success[0] == 1
assert success[1] == 1
assert success[2] < 0
def test_get1(pvnames):
print('Simple Test: test value and char_value on an integer\n')
pv = PV(pvnames.int_pv)
val = pv.get()
cval = pv.get(as_string=True)
assert int(cval) == val
@pytest.mark.xfail(os.environ.get('BASE_VER') in ('R3.16.1', 'R7.0.1.1'),
reason='known issues with simulator on some BASE versions')
def test_get_string_waveform(pvnames, simulator):
epics = pytest.importorskip('epics')
print('String Array: \n')
with no_simulator_updates(pvnames):
pv = PV(pvnames.string_arr_pv)
val = pv.get()
assert len(val) > 10
assert isinstance(val[0], str)
assert len(val[0]) > 1
assert isinstance(val[1], str)
assert len(val[1]) > 1
def test_put_string_waveform(pvnames):
print('String Array: \n')
with no_simulator_updates(pvnames):
pv = PV(pvnames.string_arr_pv)
put_value = ['a', 'b', 'c']
pv.put(put_value)
get_value = pv.get(use_monitor=False, count=len(put_value))
numpy.testing.assert_array_equal(get_value, put_value)
@pytest.mark.skipif(os.environ.get("CAPROTO_SKIP_MOTORSIM_TESTS") is not None,
reason='No motorsim IOC')
@pytest.mark.skipif(sys.platform == 'win32',
reason='win32 motorsim IOC')
def test_putcomplete(pvnames):
print('Put with wait and put_complete (using real motor!) \n')
vals = (1.35, 1.50, 1.44, 1.445, 1.45, 1.453, 1.446, 1.447, 1.450,
1.450, 1.490, 1.5, 1.500)
p = PV(pvnames.motor1)
if not p.wait_for_connection():
raise TimeoutError('simulated motor connection failed?')
see_complete = []
for v in vals:
t0 = time.time()
p.put(v, use_complete=True)
count = 0
for i in range(100000):
time.sleep(0.001)
count = count + 1
if p.put_complete:
see_complete.append(True)
print('See completion')
break
# print('made it to value= %.3f, elapsed time= %.4f sec (count=%i)' % (v, time.time()-t0, count))
assert len(see_complete) > (len(vals) - 5)
@pytest.mark.skipif(os.environ.get("CAPROTO_SKIP_MOTORSIM_TESTS") is not None,
reason='No motorsim IOC')
@pytest.mark.skipif(sys.platform == 'win32',
reason='win32 motorsim IOC')
def test_putwait(pvnames):
print('Put with wait (using real motor!) \n')
pv = PV(pvnames.motor1)
if not pv.wait_for_connection():
raise TimeoutError('simulated motor connection failed?')
val = pv.get()
t0 = time.time()
if val < 5:
pv.put(val + 1.0, wait=True)
else:
pv.put(val - 1.0, wait=True)
dt = time.time()-t0
print(' put took %s sec\n' % dt)
assert dt > 0.1
# now with a callback!
put_callback_called = False
def onPutdone(pvname=None, **kws):
print('put done ', pvname, kws)
nonlocal put_callback_called
put_callback_called = True
val = pv.get()
if val < 5:
pv.put(val + 1.0, callback=onPutdone)
else:
pv.put(val - 1.0, callback=onPutdone)
t0 = time.time()
while time.time()-t0 < dt*1.50:
time.sleep(0.02)
print(' put should be done by now? %s \n' % put_callback_called)
assert put_callback_called
# now using pv.put_complete
val = pv.get()
if val < 5:
pv.put(val + 1.0, use_complete=True)
else:
pv.put(val - 1.0, use_complete=True)
t0 = time.time()
count = 0
while time.time()-t0 < dt*1.50:
if pv.put_complete:
break
count = count + 1
time.sleep(0.02)
print(' put_complete=%s (should be True), and count=%i (should be>3)\n' %
(pv.put_complete, count))
assert pv.put_complete
assert count > 3
@pytest.mark.xfail(os.environ.get('BASE_VER') in ('R3.16.1', 'R7.0.1.1'),
reason='known issues with simulator on some BASE versions')
def test_get_callback(pvnames, simulator):
epics = pytest.importorskip('epics')
print("Callback test: changing PV must be updated\n")
mypv = PV(pvnames.updating_pv1)
NEWVALS = []
def onChanges(pvname=None, value=None, char_value=None, **kw):
nonlocal NEWVALS
print('PV %s %s, %s Changed!\n' % (pvname, repr(value), char_value))
NEWVALS.append(repr(value))
mypv.add_callback(onChanges)
print('Added a callback. Now wait for changes...\n')
t0 = time.time()
while time.time() - t0 < 3:
time.sleep(1.e-4)
print(' saw %i changes.\n' % len(NEWVALS))
assert len(NEWVALS) > 3
mypv.clear_callbacks()
def test_subarrays(pvnames):
print("Subarray test: dynamic length arrays\n")
driver = PV(pvnames.subarr_driver)
subarr1 = PV(pvnames.subarr1)
subarr1.connect()
len_full = 64
len_sub1 = 16
full_data = numpy.arange(len_full)/1.0
caput("%s.NELM" % pvnames.subarr1, len_sub1)
caput("%s.INDX" % pvnames.subarr1, 0)
driver.put(full_data)
time.sleep(0.1)
subval = subarr1.get()
assert len(subval) == len_sub1
assert numpy.all(subval == full_data[:len_sub1])
print("Subarray test: C\n")
caput("%s.NELM" % pvnames.subarr2, 19)
caput("%s.INDX" % pvnames.subarr2, 3)
subarr2 = PV(pvnames.subarr2)
subarr2.get()
driver.put(full_data)
time.sleep(0.1)
subval = subarr2.get()
assert len(subval) == 19
assert numpy.all(subval == full_data[3:3+19])
caput("%s.NELM" % pvnames.subarr2, 5)
caput("%s.INDX" % pvnames.subarr2, 13)
driver.put(full_data)
time.sleep(0.1)
subval = subarr2.get()
assert len(subval) == 5
assert numpy.all(subval == full_data[13:5+13])
def test_subarray_zerolen(pvnames):
subarr1 = PV(pvnames.zero_len_subarr1)
subarr1.wait_for_connection()
val = subarr1.get(use_monitor=True, as_numpy=True)
assert isinstance(val, numpy.ndarray), 'using monitor'
assert len(val) == 0, 'using monitor'
# caproto returns things in big endian, not native type
# assert val.dtype == numpy.float64, 'using monitor'
val = subarr1.get(use_monitor=False, as_numpy=True)
assert isinstance(val, numpy.ndarray), 'no monitor'
assert len(val) == 0, 'no monitor'
# caproto returns things in big endian, not native type
# assert val.dtype == numpy.float64, 'no monitor'
def test_waveform_get_with_count_arg(pvnames):
wf = PV(pvnames.char_arr_pv, count=32)
val=wf.get()
assert len(val) == 32
val=wf.get(count=wf.nelm)
assert len(val) == wf.nelm
@pytest.mark.xfail(os.environ.get('BASE_VER') in ('R3.16.1', 'R7.0.1.1'),
reason='known issues with simulator on some BASE versions')
def test_waveform_callback_with_count_arg(pvnames, simulator):
epics = pytest.importorskip('epics')
values = []
wf = PV(pvnames.char_arr_pv, count=32)
def onChanges(pvname=None, value=None, char_value=None, **kw):
print('PV %s %s, %s Changed!\n' % (pvname, repr(value), char_value))
values.append(value)
wf.add_callback(onChanges)
print('Added a callback. Now wait for changes...\n')
t0 = time.time()
while time.time() - t0 < 3:
time.sleep(1.e-4)
if len(values)>0:
break
assert len(values) > 0
assert len(values[0]) == 32
wf.clear_callbacks()
def test_emptyish_char_waveform_no_monitor(pvnames):
'''a test of a char waveform of length 1 (NORD=1): value "\0"
without using auto_monitor
'''
zerostr = PV(pvnames.char_arr_pv, auto_monitor=False)
zerostr.wait_for_connection()
# elem_count = 128, requested count = None, libca returns count = 1
zerostr.put([0], wait=True)
assert zerostr.get(as_string=True) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False), [0])
assert zerostr.get(as_string=True, as_numpy=False) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False, as_numpy=False), [0])
# elem_count = 128, requested count = None, libca returns count = 2
zerostr.put([0, 0], wait=True)
assert zerostr.get(as_string=True) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False), [0, 0])
assert zerostr.get(as_string=True, as_numpy=False) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False,
as_numpy=False), [0, 0])
def test_emptyish_char_waveform_monitor(pvnames):
'''a test of a char waveform of length 1 (NORD=1): value "\0"
with using auto_monitor
'''
zerostr = PV(pvnames.char_arr_pv, auto_monitor=True)
zerostr.wait_for_connection()
zerostr.put([0], wait=True)
time.sleep(0.2)
assert zerostr.get(as_string=True) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False), [0])
assert zerostr.get(as_string=True, as_numpy=False) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False, as_numpy=False), [0])
zerostr.put([0, 0], wait=True)
time.sleep(0.2)
assert zerostr.get(as_string=True) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False), [0, 0])
assert zerostr.get(as_string=True, as_numpy=False) == ''
numpy.testing.assert_array_equal(zerostr.get(as_string=False, as_numpy=False), [0, 0])
zerostr.disconnect()
def testEnumPut(pvnames):
pv = PV(pvnames.enum_pv)
assert pv is not None
pv.put('Stop')
time.sleep(0.1)
val = pv.get()
assert val == 0
assert pv.get(as_string=True) == 'Stop'
@pytest.mark.xfail(os.environ.get('BASE_VER') in ('R3.16.1', 'R7.0.1.1'),
reason='known issues with simulator on some BASE versions')
def test_DoubleVal(pvnames, simulator):
epics = pytest.importorskip('epics')
pvn = pvnames.double_pv
pv = PV(pvn)
print('pv', pv)
value = pv.get()
print('pv get', value)
assert pv.connected
print('%s get value %s' % (pvn, value))
cdict = pv.get_ctrlvars()
print('Testing CTRL Values for a Double (%s)\n' % (pvn))
assert 'severity' in cdict
assert len(pv.host) > 1
assert pv.count == 1
assert pv.precision == pvnames.double_pv_prec
assert pv.units == pvnames.double_pv_units
assert pv.access.startswith('read')
def test_waveform_get_1elem(pvnames):
pv = PV(pvnames.double_arr_pv)
val = pv.get(count=1, use_monitor=False)
assert isinstance(val, numpy.ndarray)
assert len(val) == 1
def test_subarray_1elem(pvnames):
# pv = PV(pvnames.zero_len_subarr1)
pv = PV(pvnames.double_arr_pv)
pv.wait_for_connection()
val = pv.get(count=1, use_monitor=False)
print('val is', val, type(val))
assert isinstance(val, numpy.ndarray)
assert len(val) == 1
val = pv.get(count=1, as_numpy=False, use_monitor=False)
print('val is', val, type(val))
assert isinstance(val, list)
assert len(val) == 1
@pytest.mark.skipif(os.environ.get("CAPROTO_SKIP_MOTORSIM_TESTS") is not None,
reason='No motorsim IOC')
@pytest.mark.skipif(sys.platform == 'win32',
reason='win32 motorsim IOC')
def test_pyepics_pv(context):
pv1 = "sim:mtr1"
ctx = context
# Some user function to call when subscriptions receive data.
called = []
def user_callback(*, value, **kwargs):
print()
print('-- user callback', value)
called.append(True)
time_pv = PV(pv1, context=ctx, form='time')
ctrl_pv = PV(pv1, context=ctx, form='ctrl')
time_pv.wait_for_connection()
time_pv.add_callback(user_callback)
print('time read', time_pv.get())
print('ctrl read', ctrl_pv.get())
time_pv.put(3, wait=True)
time_pv.put(6, wait=True)
time.sleep(0.1)
assert time_pv.get() == 6
assert called
print('read', time_pv.get())
print('done')
repr(time_pv)
for k, v in PV.__dict__.items():
if isinstance(v, property):
getattr(time_pv, k)
getattr(ctrl_pv, k)
@pytest.fixture(scope='function')
def access_security_softioc(request, prefix, context):
'From pyepics test_cas.py'
access_rights_db = {
('{}:ao'.format(prefix), 'ao') : {
'ASG': "rps_threshold",
'DRVH': "10",
'DRVL': "0",
},
('{}:bo'.format(prefix), 'bo') : {
'ASG': "rps_lock",
'ZNAM': "OUT",
'ONAM': "IN",
},
('{}:ao2'.format(prefix), 'ao') : {
'DRVH': "5",
'DRVL': "1",
},
('{}:permit'.format(prefix), 'bo') : {
'VAL': "0",
'PINI': "1",
'ZNAM': "DISABLED",
'ONAM': "ENABLED",
},
}
access_rights_asg_rules = '''
ASG(DEFAULT) {
RULE(1,READ)
RULE(1,WRITE,TRAPWRITE)
}
ASG(rps_threshold) {
INPA("$(P):permit")
RULE(1, READ)
RULE(0, WRITE, TRAPWRITE) {
CALC("A=1")
}
RULE(1, WRITE, TRAPWRITE) {
CALC("A=0")
}
}
ASG(rps_lock) {
INPA("$(P):permit")
RULE(1, READ)
RULE(1, WRITE, TRAPWRITE) {
CALC("A=0")
}
}
'''
from .conftest import run_softioc, poll_readiness
handler = run_softioc(request, db=access_rights_db,
access_rules_text=access_rights_asg_rules,
macros={'P': prefix},
)
PV._default_context = context
process = handler.processes[-1]
pvs = {pv[len(prefix) + 1:]: PV(pv)
for pv, rtype in access_rights_db
}
pvs['ao.DRVH'] = PV(prefix + ':ao.DRVH')
poll_readiness(pvs['ao'].pvname, process=process)
for pv in pvs.values():
pv.wait_for_connection()
def finalize_context():
print('Cleaning up PV context')
broadcaster = PV._default_context.broadcaster
broadcaster.disconnect()
PV._default_context.disconnect()
PV._default_context = None
print('Done cleaning up PV context')
request.addfinalizer(finalize_context)
return SimpleNamespace(process=process, prefix=prefix,
name='access_rights', pvs=pvs, type='epics-base')
def test_permit_disabled(access_security_softioc):
# with the permit disabled, all test pvs should be readable/writable
pvs = access_security_softioc.pvs
for pv in pvs.values():
assert pv.read_access and pv.write_access
def test_permit_enabled(access_security_softioc):
pvs = access_security_softioc.pvs
# set the run-permit
pvs['permit'].put(1, wait=True)
assert pvs['permit'].get(as_string=True, use_monitor=False) == 'ENABLED'
# rps_lock rule should disable write access
assert pvs['bo'].write_access is False
with pytest.raises(AccessRightsException):
pvs['bo'].put(1, wait=True)
# rps_threshold rule should disable write access to metadata, not VAL
assert pvs['ao'].write_access is True
assert pvs['ao.DRVH'].write_access is False
with pytest.raises(AccessRightsException):
pvs['ao.DRVH'].put(100, wait=True)
def test_pv_access_event_callback(access_security_softioc):
pvs = access_security_softioc.pvs
# clear the run-permit
pvs['permit'].put(0, wait=True)
assert pvs['permit'].get(as_string=True, use_monitor=False) == 'DISABLED'
def lcb(read_access, write_access, pv=None):
assert pv.read_access == read_access
assert pv.write_access == write_access
pv.flag = True
bo = PV(pvs['bo'].pvname, access_callback=lcb)
bo.flag = False
# set the run-permit to trigger an access rights event
pvs['permit'].put(1, wait=True)
assert pvs['permit'].get(as_string=True, use_monitor=False) == 'ENABLED'
# give the callback a bit of time to run
time.sleep(0.2)
assert bo.flag is True
def test_get_with_metadata(pvnames):
with no_simulator_updates(pvnames):
pv = PV(pvnames.int_pv, form='native')
# Request time type
md = pv.get_with_metadata(use_monitor=False, form='time')
assert 'timestamp' in md
assert 'lower_ctrl_limit' not in md
# Request control type
md = pv.get_with_metadata(use_monitor=False, form='ctrl')
assert 'lower_ctrl_limit' in md
assert 'timestamp' not in md
# Use monitor: all metadata should come through
md = pv.get_with_metadata(use_monitor=True)
assert 'timestamp' in md
assert 'lower_ctrl_limit' in md
# Get a namespace
ns = pv.get_with_metadata(use_monitor=True, as_namespace=True)
assert hasattr(ns, 'timestamp')
assert hasattr(ns, 'lower_ctrl_limit')
|
download_anime.py
|
from subprocess import Popen, PIPE, STDOUT
import shlex
import sys
import threading
import time
import os
import subprocess
from threading import Thread
import queue
# this is a wrapper for: https://github.com/Pfuenzle/anime-loads
# docker run --rm -it -v $PWD/config:/config pfuenzle/anime-loads add
# docker run --rm -it -v $PWD/config:/config pfuenzle/anime-loads edit
# docker run --rm -it -v $PWD/config:/config pfuenzle/anime-loads remove
#HOW TO USE:
# python3 download_anime.py "search_string" [LANGUAGE] [RESOLUTION] [FORCE_ANIME_RESULT_NUMBER] [FORCE_RELEASE_NUMBER] [DRY_RUN]
# python3 download_anime.py "search_string" german -> adds to monitoring and downloads latest german 1080p release
# python3 download_anime.py "search_string" german 1080p -> adds to monitoring and downloads latest german 1080p release
# python3 download_anime.py "search_string" german 720p -> adds to monitoring and downloads latest german 720p release
# python3 download_anime.py "search_string" japanese -> adds to monitoring and downloads latest japanese 1080p release
# python3 download_anime.py "search_string" japanese 1080p -> adds to monitoring and downloads latest japanese 1080p release
# python3 download_anime.py "search_string" japanese 720p -> adds to monitoring and downloads latest japanese 720p release
# Valid language parameters (which is required!):
# japanese or jap
# german or ger
#remove a download entry from anime-loads docker config:
# docker exec -i pfuenzle-anime-loads1 python anibot.py --configfile /config/ani.json remove
RUNNING_DOCKER_CONTAINER="pfuenzle-anime-loads1"
CRED = '\033[33m'
CEND = '\033[0m'
RELEASE_ID_TO_DOWNLOAD=0
RELEASE_SELECTED=False
GOT_FIRST_EXIT_MESSAGE=False
SEARCH_RESULTS_FOUND=False
SEARCH_RESULT_TYPE="tv"
DRY_RUN=False
def reader(pipe, queue):
try:
with pipe:
for line in iter(pipe.readline, b''):
queue.put((pipe, line))
finally:
queue.put(None)
p = Popen(shlex.split("docker exec -i " + RUNNING_DOCKER_CONTAINER + " python anibot.py --configfile /config/ani.json add"), stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, bufsize=0)
q = queue.Queue()
Thread(target=reader, args=[p.stdout, q]).start()
Thread(target=reader, args=[p.stderr, q]).start()
if len(sys.argv) >= 7 and int(sys.argv[6]) > 0:
DRY_RUN=True
def threadSelectRelease():
global RELEASE_ID_TO_DOWNLOAD
global p
time.sleep(5)
if len(sys.argv) >= 6 and int(sys.argv[5]) > 0:
RELEASE_ID_TO_DOWNLOAD = int(sys.argv[5])
if RELEASE_ID_TO_DOWNLOAD > 0:
print("Selecting last matched result (because its the newest): " + str(RELEASE_ID_TO_DOWNLOAD))
print("Sending input: " + str(RELEASE_ID_TO_DOWNLOAD))
p.stdin.write((str(RELEASE_ID_TO_DOWNLOAD) + "\n").encode())
p.stdin.flush()
p.stdout.flush()
sys.stdout.flush()
else:
print("Keine Ergebnisse")
p.stdin.write(("exit\n").encode())
p.stdin.flush()
p.stdout.flush()
sys.stdout.flush()
p.terminate()
sys.exit()
#ok = True
#while ok:
while p.poll() is None:
for source, output in iter(q.get, None):
output = str(output.decode())
#output = p.stdout.readline().rstrip().decode()
print(CRED + output + CEND)
if "nach dem du suchen willst" in output and GOT_FIRST_EXIT_MESSAGE == False:
time.sleep(3)
print("Sending input: " + sys.argv[1])
#p.communicate((sys.argv[1] + '\n').encode())
p.stdin.write((sys.argv[1]+"\n").encode())
p.stdin.flush()
p.stdout.flush()
sys.stdout.flush()
if "Keine Ergebnisse" in output:
print("Keine Ergebnisse. Exit...")
p.stdin.write(("exit\n").encode())
p.stdin.flush()
p.stdout.flush()
sys.stdout.flush()
p.terminate()
sys.exit()
if "os error" in output:
print("OS Error. Exit...")
p.stdin.write(("exit\n").encode())
p.stdin.flush()
p.stdout.flush()
sys.stdout.flush()
p.terminate()
sys.exit()
if (("Exit" in output or "exit" in output) or ("nach dem du suchen willst" in output)) and GOT_FIRST_EXIT_MESSAGE == True:
print("Finished! Exit...")
print("Sending input: exit")
p.stdin.write(("exit\n").encode())
p.stdin.flush()
p.stdout.flush()
sys.stdout.flush()
p.terminate()
sys.exit()
elif "Exit" in output or "exit" in output and GOT_FIRST_EXIT_MESSAGE == False:
GOT_FIRST_EXIT_MESSAGE=True
if "Ergebnisse:" in output:
SEARCH_RESULTS_FOUND=True
SELECT_RESULT_ANIME_NAME="1"
if len(sys.argv) >= 5 and int(sys.argv[4]) > 0:
SELECT_RESULT_ANIME_NAME=str(sys.argv[4])
print("Found a matching result! Selecting " + SELECT_RESULT_ANIME_NAME + ". match.")
print("Sending input: " + SELECT_RESULT_ANIME_NAME)
p.stdin.write((SELECT_RESULT_ANIME_NAME + "\n").encode())
p.stdin.flush()
p.stdout.flush()
sys.stdout.flush()
if "[1] Name:" in output and ("Episoden: 1337" in output or "1337/1337" in output):
SEARCH_RESULT_TYPE="movie"
if "Releases:" in output:
th = threading.Thread(target=threadSelectRelease)
th.start()
RESOLUTION="1080p"
if len(sys.argv) >= 4:
RESOLUTION=sys.argv[3]
if sys.argv[2] == "" or sys.argv[2] == "german" or sys.argv[2] == "ger":
if ("Release ID:" in output) and (", Dub" in output and "Deutsch" in output.split(", Dub")[1].split("]")[0]) and ("Resolution: " + RESOLUTION) in output and "Du hast folgendes Release" not in output:
RELEASE_ID_TO_DOWNLOAD=int(output.split(',')[0].split('ID: ')[1])
print("Found german 1080p release. Set RELEASE_ID_TO_DOWNLOAD to: " + str(RELEASE_ID_TO_DOWNLOAD))
if RELEASE_SELECTED == False:
RELEASE_SELECTED=True
if sys.argv[2] == "japanese" or sys.argv[2] == "jap":
if ("Release ID:" in output) and (", Dub" in output and "Japanisch" in output.split(", Dub")[1].split("]")[0]) and ("Resolution: " + RESOLUTION) in output and "Du hast folgendes Release" not in output:
RELEASE_ID_TO_DOWNLOAD=int(output.split(',')[0].split('ID: ')[1])
print("Found japanese 1080p release. Set RELEASE_ID_TO_DOWNLOAD to: " + str(RELEASE_ID_TO_DOWNLOAD))
if RELEASE_SELECTED == False:
RELEASE_SELECTED=True
if ("Das Release hat" in output and "Episode(n)" in output):
# -- download all
print("Sending input: #")
p.stdin.write(("#\n").encode())
# -- download all new, so this script can run multiple times with a same series request without downloading it multiple times
#print("Downloading all new episodes... needs ENTER.")
#print("Sending input: ENTER")
#p.stdin.write(("\n").encode())
p.stdin.flush()
p.stdout.flush()
sys.stdout.flush()
if "Wieviel Episoden hast du bereits runtergeladen" in output and "Fehlerhafte Eingabe" in output:
# -- download all
print("Downloading all episodes so we need to send 0.")
print("Sending input: 0")
p.stdin.write(("0\n").encode())
p.stdin.flush()
p.stdout.flush()
sys.stdout.flush()
#if "dem Anime einen spezifischen Paketnamen geben" in output:
if "Wieviel Episoden hast du bereits runtergeladen" in output and "Fehlerhafte Eingabe" not in output:
#paket name: "search_string language resolution"
#sending invalid character to refresh output and get new input
print("Sending input:j")
p.stdin.write(("j\n").encode())
p.stdin.flush()
p.stdout.flush()
sys.stdout.flush()
lang_for_package_name = 'NA'
if sys.argv[2] == "japanese" or sys.argv[2] == "jap":
lang_for_package_name = "japanese"
elif sys.argv[2] == "" or sys.argv[2] == "german" or sys.argv[2] == "ger":
lang_for_package_name = "german"
print("Sending input: " + (sys.argv[1] + " " + lang_for_package_name + " " + RESOLUTION + " " + SEARCH_RESULT_TYPE))
if DRY_RUN == True:
print("DRY RUN aktiviert! Exit...")
p.terminate()
sys.exit()
p.stdin.write((sys.argv[1] + " " + lang_for_package_name + " " + RESOLUTION + " " + SEARCH_RESULT_TYPE + "\n").encode())
p.stdin.flush()
p.stdout.flush()
sys.stdout.flush()
#if "Paketnamen:" in output:
#if "dem Anime einen spezifischen Paketnamen geben" in output:
# #sending invalid character to refresh output and get new input
# p.stdin.write(("#\n").encode())
# p.stdin.flush()
# p.stdout.flush()
# sys.stdout.flush()
#
# lang_for_package_name = 'NA'
# if sys.argv[2] == "japanese" or sys.argv[2] == "jap":
# lang_for_package_name = "japanese"
# elif sys.argv[2] == "" or sys.argv[2] == "german" or sys.argv[2] == "ger":
# lang_for_package_name = "german"
# p.stdin.write((sys.argv[1] + " " + lang_for_package_name + " " + RESOLUTION + "\n").encode())
# p.stdin.flush()
# p.stdout.flush()
# sys.stdout.flush()
#p.stdin.write(("").encode())
#p.stdin.flush()
if output == '' and p.poll() is not None:
break
rc = p.poll()
|
webcam_demo.py
|
import argparse
import time
from collections import deque
from operator import itemgetter
from threading import Thread
import cv2
import numpy as np
import torch
from mmcv import Config, DictAction
from mmcv.parallel import collate, scatter
from mmaction.apis import init_recognizer
from mmaction.datasets.pipelines import Compose
FONTFACE = cv2.FONT_HERSHEY_COMPLEX_SMALL
FONTSCALE = 1
FONTCOLOR = (255, 255, 255) # BGR, white
MSGCOLOR = (128, 128, 128) # BGR, gray
THICKNESS = 1
LINETYPE = 1
EXCLUED_STEPS = [
'OpenCVInit', 'OpenCVDecode', 'DecordInit', 'DecordDecode', 'PyAVInit',
'PyAVDecode', 'RawFrameDecode', 'FrameSelector'
]
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('label', help='label file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--threshold',
type=float,
default=0.01,
help='recognition score threshold')
parser.add_argument(
'--average-size',
type=int,
default=1,
help='number of latest clips to be averaged for prediction')
parser.add_argument(
'--drawing-fps',
type=int,
default=20,
help='Set upper bound FPS value of the output drawing')
parser.add_argument(
'--inference-fps',
type=int,
default=4,
help='Set upper bound FPS value of model inference')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
args = parser.parse_args()
assert args.drawing_fps >= 0 and args.inference_fps >= 0, \
'upper bound FPS value of drawing and inference should be set as ' \
'positive number, or zero for no limit'
return args
def show_results():
print('Press "Esc", "q" or "Q" to exit')
text_info = {}
cur_time = time.time()
while True:
msg = 'Waiting for action ...'
_, frame = camera.read()
frame_queue.append(np.array(frame[:, :, ::-1]))
if len(result_queue) != 0:
text_info = {}
results = result_queue.popleft()
for i, result in enumerate(results):
selected_label, score = result
if score < threshold:
break
location = (0, 40 + i * 20)
text = selected_label + ': ' + str(round(score, 2))
text_info[location] = text
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
elif len(text_info) != 0:
for location, text in text_info.items():
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
else:
cv2.putText(frame, msg, (0, 40), FONTFACE, FONTSCALE, MSGCOLOR,
THICKNESS, LINETYPE)
cv2.imshow('camera', frame)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
if drawing_fps > 0:
# add a limiter for actual drawing fps <= drawing_fps
sleep_time = 1 / drawing_fps - (time.time() - cur_time)
if sleep_time > 0:
time.sleep(sleep_time)
cur_time = time.time()
def inference():
score_cache = deque()
scores_sum = 0
cur_time = time.time()
while True:
cur_windows = []
while len(cur_windows) == 0:
if len(frame_queue) == sample_length:
cur_windows = list(np.array(frame_queue))
if data['img_shape'] is None:
data['img_shape'] = frame_queue.popleft().shape[:2]
cur_data = data.copy()
cur_data['imgs'] = cur_windows
cur_data = test_pipeline(cur_data)
cur_data = collate([cur_data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
cur_data = scatter(cur_data, [device])[0]
with torch.no_grad():
scores = model(return_loss=False, **cur_data)[0]
score_cache.append(scores)
scores_sum += scores
if len(score_cache) == average_size:
scores_avg = scores_sum / average_size
num_selected_labels = min(len(label), 5)
scores_tuples = tuple(zip(label, scores_avg))
scores_sorted = sorted(
scores_tuples, key=itemgetter(1), reverse=True)
results = scores_sorted[:num_selected_labels]
result_queue.append(results)
scores_sum -= score_cache.popleft()
if inference_fps > 0:
# add a limiter for actual inference fps <= inference_fps
sleep_time = 1 / inference_fps - (time.time() - cur_time)
if sleep_time > 0:
time.sleep(sleep_time)
cur_time = time.time()
camera.release()
cv2.destroyAllWindows()
def main():
global frame_queue, camera, frame, results, threshold, sample_length, \
data, test_pipeline, model, device, average_size, label, \
result_queue, drawing_fps, inference_fps
args = parse_args()
average_size = args.average_size
threshold = args.threshold
drawing_fps = args.drawing_fps
inference_fps = args.inference_fps
device = torch.device(args.device)
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
model = init_recognizer(cfg, args.checkpoint, device=device)
camera = cv2.VideoCapture(args.camera_id)
data = dict(img_shape=None, modality='RGB', label=-1)
with open(args.label, 'r') as f:
label = [line.strip() for line in f]
# prepare test pipeline from non-camera pipeline
cfg = model.cfg
sample_length = 0
pipeline = cfg.data.test.pipeline
pipeline_ = pipeline.copy()
for step in pipeline:
if 'SampleFrames' in step['type']:
sample_length = step['clip_len'] * step['num_clips']
data['num_clips'] = step['num_clips']
data['clip_len'] = step['clip_len']
pipeline_.remove(step)
if step['type'] in EXCLUED_STEPS:
# remove step to decode frames
pipeline_.remove(step)
test_pipeline = Compose(pipeline_)
assert sample_length > 0
try:
frame_queue = deque(maxlen=sample_length)
result_queue = deque(maxlen=1)
pw = Thread(target=show_results, args=(), daemon=True)
pr = Thread(target=inference, args=(), daemon=True)
pw.start()
pr.start()
pw.join()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
_base.py
|
# Builtins
import datetime as dt
import time
from pathlib import Path
import yaml
import traceback
import threading
from typing import List, Dict, Any
from os.path import exists
# External libraries
import pandas as pd
# Submodule imports
from harvest.utils import *
class API:
"""
The API class communicates with various API endpoints to perform the
necessary operations. The Base class defines the interface for all API classes to
extend and implement.
Attributes
:interval_list: A list of supported intervals.
:exchange: The market the API trades on. Ignored if the API is not a broker.
"""
# List of supported intervals
interval_list = [
Interval.MIN_1,
Interval.MIN_5,
Interval.MIN_15,
Interval.MIN_30,
Interval.HR_1,
Interval.DAY_1,
]
# Name of the exchange this API trades on
exchange = ""
# List of attributes that are required to be in the secret file
req_keys = []
def __init__(self, path: str = None):
"""
Performs initializations of the class, such as setting the
timestamp and loading credentials.
There are three API class types, 'streamer', 'broker', and 'both'. A
'streamer' is responsible for fetching data and interacting with
the queue to store data. A 'broker' is used solely for buying and
selling stocks, cryptos and options. Finally, 'both' is used to
indicate that the broker fetch data and buy and sell stocks.
All subclass implementations should call this __init__ method
using `super().__init__(path)`.
:path: path to the YAML file containing credentials to communicate with the API.
If not specified, defaults to './secret.yaml'
"""
config = {}
if path is None:
path = "./secret.yaml"
# Check if file exists. If not, create a secret file
if not exists(path):
config = self.create_secret()
else:
# Open file
with open(path, "r") as stream:
config = yaml.safe_load(stream)
# Check if the file contains all the required parameters
if any(key not in config for key in self.req_keys):
config.update(self.create_secret())
with open(path, "w") as f:
yaml.dump(config, f)
self.config = config
def create_secret(self):
"""
This method is called when the yaml file with credentials
is not found. It returns a dictionary containing the necessary credentials.
"""
# raise Exception(f"{path} was not found.")
debugger.warning("Assuming API does not need account information.")
return None
def refresh_cred(self):
"""
Most API endpoints, for security reasons, require a refresh of the access token
every now and then. This method should perform a refresh of the access token.
"""
debugger.info(f"Refreshing credentials for {type(self).__name__}.")
def setup(self, stats: Stats, account: Account, trader_main=None) -> None:
"""
This function is called right before the algorithm begins,
and initializes several runtime parameters like
the symbols to watch and what interval data is needed.
:trader_main: A callback function to the trader which will pass the data to the algorithms.
"""
self.trader_main = trader_main
self.stats = stats
self.stats.timestamp = now()
self.account = account
min_interval = None
for sym in stats.watchlist_cfg:
inter = stats.watchlist_cfg[sym]["interval"]
# If the specified interval is not supported on this API, raise Exception
if inter < self.interval_list[0]:
raise Exception(f"Specified interval {inter} is not supported.")
# If the exact inteval is not supported but it can be recreated by aggregating
# candles from a more granular interval
if inter not in self.interval_list:
granular_int = [i for i in self.interval_list if i < inter]
new_inter = granular_int[-1]
stats.watchlist_cfg[sym]["aggregations"].append(inter)
stats.watchlist_cfg[sym]["interval"] = new_inter
if (
min_interval is None
or stats.watchlist_cfg[sym]["interval"] < min_interval
):
min_interval = stats.watchlist_cfg[sym]["interval"]
self.poll_interval = min_interval
debugger.debug(f"Poll Interval: {self.poll_interval}")
debugger.debug(f"{type(self).__name__} setup finished")
def start(self):
"""
This method begins streaming data from the API.
The default implementation below is for polling the API.
If your brokerage provides a streaming API, you should override
this method and configure it to use that API. In that case,
make sure to set the callback function to self.main().
"""
cur_min = -1
val, unit = expand_interval(self.poll_interval)
debugger.debug(f"{type(self).__name__} started...")
if unit == "MIN":
sleep = val * 60 - 10
while 1:
cur = now()
minutes = cur.minute
if minutes % val == 0 and minutes != cur_min:
self.stats.timestamp = cur
self.main()
time.sleep(sleep)
cur_min = minutes
elif unit == "HR":
sleep = val * 3600 - 60
while 1:
cur = now()
minutes = cur.minute
if minutes == 0 and minutes != cur_min:
self.stats.timestamp = cur
self.main()
time.sleep(sleep)
cur_min = minutes
else:
while 1:
cur = now()
minutes = cur.minute
hours = cur.hour
if hours == 19 and minutes == 50:
self.stats.timestamp = cur
self.main()
time.sleep(80000)
cur_min = minutes
def main(self):
"""
This method is called at the interval specified by the user.
It should create a dictionary where each key is the symbol for an asset,
and the value is the corresponding data in the following pandas dataframe format:
Symbol
open high low close volume
timestamp
--- --- --- --- --- ---
timestamp should be an offset-aware datetime object in UTC timezone.
The dictionary should be passed to the trader by calling `self.trader_main(dict)`
"""
# Iterate through securities in the watchlist. For those that have
# intervals that needs to be called now, fetch the latest data
df_dict = {}
for sym in self.stats.watchlist_cfg:
inter = self.stats.watchlist_cfg[sym]["interval"]
if is_freq(self.stats.timestamp, inter):
n = self.stats.timestamp
latest = self.fetch_price_history(
sym, inter, n - interval_to_timedelta(inter) * 2, n
)
debugger.debug(f"{sym} price fetch returned: {latest}")
if latest is None or latest.empty:
continue
df_dict[sym] = latest.iloc[-1]
self.trader_main(df_dict)
def exit(self):
"""
This function is called after every invocation of algo's handler.
The intended purpose is for brokers to clear any cache it may have created.
"""
debugger.debug(f"{type(self).__name__} exited")
def _exception_handler(func):
"""
Wrapper to handle unexpected errors in the wrapped function.
Most functions should be wrapped with this to properly handle errors, such as
when internet connection is lost.
:func: Function to wrap.
:returns: The returned value of func if func runs properly. Raises an Exception if func fails.
"""
def wrapper(*args, **kwargs):
tries = 3
while tries > 0:
try:
return func(*args, **kwargs)
except Exception as e:
self = args[0]
debugger.error(f"Error: {e}")
traceback.print_exc()
debugger.error("Logging out and back in...")
args[0].refresh_cred()
tries -= 1
debugger.error("Retrying...")
continue
return wrapper
def _run_once(func):
"""
Wrapper to only allows wrapped functions to be run once.
:func: Function to wrap.
:returns: The return of the inputted function if it has not been run before and None otherwise.
"""
ran = False
def wrapper(*args, **kwargs):
nonlocal ran
if not ran:
ran = True
return func(*args, **kwargs)
return None
return wrapper
# -------------- Streamer methods -------------- #
def get_current_time(self):
return now()
def fetch_price_history(
self,
symbol: str,
interval: Interval,
start: dt.datetime = None,
end: dt.datetime = None,
) -> pd.DataFrame:
"""
Fetches historical price data for the specified asset and period
using the API. The first row is the earliest entry and the last
row is the latest entry.
:param symbol: The stock/crypto to get data for. Note options are not supported.
:param interval: The interval of requested historical data.
:param start: The starting date of the period, inclusive.
:param end: The ending date of the period, inclusive.
:returns: A pandas dataframe, same format as main()
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this streamer method: `fetch_price_history`."
)
def fetch_latest_price(self, symbol: str) -> float:
interval = self.poll_interval
end = self.get_current_time()
start = end - interval_to_timedelta(interval) * 2
price = self.fetch_price_history(symbol, interval, start, end)
return price[symbol]["close"][-1]
def fetch_chain_info(self, symbol: str):
"""
Returns information about the symbol's options
:param symbol: Stock symbol. Cannot use crypto.
:returns: A dict with the following keys and values:
- chain_id: ID of the option chain
- exp_dates: List of expiration dates as datetime objects
- multiplier: Multiplier of the option, usually 100
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this streamer method: `fetch_chain_info`."
)
def fetch_chain_data(self, symbol: str, date):
"""
Returns the option chain for the specified symbol.
:param symbol: Stock symbol. Cannot use crypto.
:param date: Expiration date.
:returns: A dataframe in the following format:
exp_date strike type
OCC
--- --- --- ---
exp_date should be a timezone-aware datetime object localized to UTC
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this streamer method: `fetch_chain_data`."
)
def fetch_option_market_data(self, symbol: str):
"""
Retrieves data of specified option.
:param symbol: OCC symbol of option
:returns: A dictionary:
- price: price of option
- ask: ask price
- bid: bid price
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this streamer method: `fetch_option_market_data`."
)
def fetch_market_hours(self, date: datetime.date):
"""
Returns the market hours for a given day.
Hours are based on the exchange specified in the class's 'exchange' attribute.
:returns: A dictionary with the following keys and values:
- is_open: Boolean indicating whether the market is open or closed
- open_at: Time the market opens in UTC timezone.
- close_at: Time the market closes in UTC timezone.
"""
# raise NotImplementedError(
# f"{type(self).__name__} does not support this broker method: `fetch_market_hours`."
# )
return {"is_open": True, "open_at": None, "close_at": None}
# ------------- Broker methods ------------- #
def fetch_stock_positions(self):
"""
Returns all current stock positions
:returns: A list of dictionaries with the following keys and values:
- symbol: Ticker symbol of the stock
- avg_price: The average price the stock was bought at
- quantity: Quantity owned
"""
debugger.error(
f"{type(self).__name__} does not support this broker method: `fetch_stock_positions`. Returning an empty list."
)
return []
def fetch_option_positions(self):
"""
Returns all current option positions
:returns: A list of dictionaries with the following keys and values:
- symbol: OCC symbol of the option
- base_symbol: Ticker symbol of the underlying stock
- avg_price: Average price the option was bought at
- quantity: Quantity owned
- multiplier: How many stocks each option represents
- exp_date: When the option expires
- strike_price: Strike price of the option
- type: 'call' or 'put'
"""
debugger.error(
f"{type(self).__name__} does not support this broker method: `fetch_option_positions`. Returning an empty list."
)
return []
def fetch_crypto_positions(self):
"""
Returns all current crypto positions
:returns: A list of dictionaries with the following keys and values:
- symbol: Ticker symbol for the crypto, prepended with an '@'
- avg_price: The average price the crypto was bought at
- quantity: Quantity owned
"""
debugger.error(
f"{type(self).__name__} does not support this broker method: `fetch_crypto_positions`. Returning an empty list."
)
return []
# def update_option_positions(self, positions: List[Any]):
# """
# Updates entries in option_positions list with the latest option price.
# This is needed as options are priced based on various metrics,
# and cannot be easily calculated from stock prices.
# :positions: The option_positions list in the Trader class.
# :returns: Nothing
# """
# debugger.error(
# f"{type(self).__name__} does not support this broker method: `update_option_positions`. Doing nothing."
# )
def fetch_account(self):
"""
Returns current account information from the brokerage.
:returns: A dictionary with the following keys and values:
- equity: Total assets in the brokerage
- cash: Total cash in the brokerage
- buying_power: Total buying power
- multiplier: Scale of leverage, if leveraging
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `fetch_account`."
)
def fetch_stock_order_status(self, id):
"""
Returns the status of a stock order with the given id.
:id: ID of the stock order
:returns: A dictionary with the following keys and values:
- type: 'STOCK'
- order_id: ID of the order
- quantity: Quantity ordered
- filled_quantity: Quantity filled so far
- side: 'buy' or 'sell'
- time_in_force: Time the order is in force
- status: Status of the order
- filled_time: Time the order was filled
- filled_price: Price the order was filled at
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `fetch_stock_order_status`."
)
def fetch_option_order_status(self, id):
"""
Returns the status of a option order with the given id.
:id: ID of the option order
:returns: A dictionary with the following keys and values:
- type: 'OPTION'
- order_id: ID of the order
- quantity: Quantity ordered
- filled_quantity: Quantity filled so far
- side: 'buy' or 'sell'
- time_in_force: Time the order is in force
- status: Status of the order
- filled_time: Time the order was filled
- filled_price: Price the order was filled at
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `fetch_option_order_status`."
)
def fetch_crypto_order_status(self, id):
"""
Returns the status of a crypto order with the given id.
:id: ID of the crypto order
:returns: A dictionary with the following keys and values:
- type: 'CRYPTO'
- order_id: ID of the order
- quantity: Quantity ordered
- filled_quantity: Quantity filled so far
- side: 'buy' or 'sell'
- time_in_force: Time the order is in force
- status: Status of the order
- filled_time: Time the order was filled
- filled_price: Price the order was filled at
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `fetch_crypto_order_status`."
)
def fetch_order_queue(self):
"""
Returns all current pending orders
returns: A list of dictionaries with the following keys and values:
For stocks and crypto:
- order_type: "STOCK" or "CRYPTO"
- symbol: Symbol of asset
- quantity: Quantity ordered
- filled_qty: Quantity filled
- order_id: ID of order
- time_in_force: Time in force
- status: Status of the order
- side: 'buy' or 'sell'
- filled_time: Time the order was filled
- filled_price: Price the order was filled at
For options:
- order_type: "OPTION",
- symbol: OCC symbol of option
- base_symbol:
- quantity: Quantity ordered
- filled_qty: Quantity filled
- filled_time: Time the order was filled
- filled_price: Price the order was filled at
- order_id: ID of order
- time_in_force: Time in force
- status: Status of the order
- side: 'buy' or 'sell'
"""
debugger.error(
f"{type(self).__name__} does not support this broker method: `fetch_order_queue`. Returning an empty list."
)
return []
# --------------- Methods for Trading --------------- #
def order_stock_limit(
self,
side: str,
symbol: str,
quantity: float,
limit_price: float,
in_force: str = "gtc",
extended: bool = False,
):
"""
Places a limit order.
:symbol: symbol of stock
:side: 'buy' or 'sell'
:quantity: quantity to buy or sell
:limit_price: limit price
:in_force: 'gtc' by default
:extended: 'False' by default
:returns: A dictionary with the following keys and values:
- order_id: ID of order
- symbol: symbol of asset
Raises an exception if order fails.
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `order_stock_limit`."
)
def order_crypto_limit(
self,
side: str,
symbol: str,
quantity: float,
limit_price: float,
in_force: str = "gtc",
extended: bool = False,
):
"""
Places a limit order.
:symbol: symbol of crypto
:side: 'buy' or 'sell'
:quantity: quantity to buy or sell
:limit_price: limit price
:in_force: 'gtc' by default
:extended: 'False' by default
:returns: A dictionary with the following keys and values:
- order_id: ID of order
- symbol: symbol of asset
Raises an exception if order fails.
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `order_crypto_limit`."
)
def order_option_limit(
self,
side: str,
symbol: str,
quantity: float,
limit_price: float,
option_type: str,
exp_date: dt.datetime,
strike: float,
in_force: str = "gtc",
):
"""
Order an option.
:side: 'buy' or 'sell'
:symbol: symbol of asset
:in_force:
:limit_price: limit price
:quantity: quantity to sell or buy
:exp_date: expiration date
:strike: strike price
:option_type: 'call' or 'put'
:returns: A dictionary with the following keys and values:
- order_id: ID of order
- symbol: symbol of asset
Raises an exception if order fails.
"""
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `order_option_limit`."
)
def cancel_stock_order(self, order_id):
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `cancel_stock_order`."
)
def cancel_crypto_order(self, order_id):
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `cancel_crypto_order`."
)
def cancel_option_order(self, order_id):
raise NotImplementedError(
f"{type(self).__name__} does not support this broker method: `cancel_option_order`."
)
# -------------- Built-in methods -------------- #
# These do not need to be re-implemented in a subclass
def buy(
self,
symbol: str,
quantity: int,
limit_price: float,
in_force: str = "gtc",
extended: bool = False,
):
"""
Buys the specified asset.
:symbol: Symbol of the asset to buy
:quantity: Quantity of asset to buy
:limit_price: Limit price to buy at
:in_force: Duration the order is in force
:extended: Whether to trade in extended hours or not.
:returns: The result of order_limit(). Returns None if there is an issue with the parameters.
"""
debugger.debug(f"{type(self).__name__} ordered a buy of {quantity} {symbol}")
typ = symbol_type(symbol)
if typ == "STOCK":
return self.order_stock_limit(
"buy", symbol, quantity, limit_price, in_force, extended
)
elif typ == "CRYPTO":
return self.order_crypto_limit(
"buy", symbol[1:], quantity, limit_price, in_force, extended
)
elif typ == "OPTION":
sym, exp_date, option_type, strike = self.occ_to_data(symbol)
return self.order_option_limit(
"buy",
sym,
quantity,
limit_price,
option_type,
exp_date,
strike,
in_force,
)
else:
debugger.error(f"Invalid asset type for {symbol}")
def sell(
self,
symbol: str = None,
quantity: int = 0,
limit_price: float = 0.0,
in_force: str = "gtc",
extended: bool = False,
):
"""Sells the specified asset.
:symbol: Symbol of the asset to buy
:quantity: Quantity of asset to buy
:limit_price: Limit price to buy at
:in_force: Duration the order is in force
:extended: Whether to trade in extended hours or not.
:returns: The result of order_limit(). Returns None if there is an issue with the parameters.
"""
debugger.debug(f"{type(self).__name__} ordered a sell of {quantity} {symbol}")
typ = symbol_type(symbol)
if typ == "STOCK":
return self.order_stock_limit(
"sell", symbol, quantity, limit_price, in_force, extended
)
elif typ == "CRYPTO":
return self.order_crypto_limit(
"sell", symbol[1:], quantity, limit_price, in_force, extended
)
elif typ == "OPTION":
sym, exp_date, option_type, strike = self.occ_to_data(symbol)
return self.order_option_limit(
"sell",
sym,
quantity,
limit_price,
option_type,
exp_date,
strike,
in_force,
)
else:
debugger.error(f"Invalid asset type for {symbol}")
def cancel(self, order_id):
for o in self.account.orders.orders:
if o.order_id == order_id:
asset_type = symbol_type(o.symbol)
if asset_type == "STOCK":
self.cancel_stock_order(order_id)
elif asset_type == "CRYPTO":
self.cancel_crypto_order(order_id)
elif asset_type == "OPTION":
self.cancel_option_order(order_id)
# def buy_option(self, symbol: str, quantity: int = 0, in_force: str = "gtc"):
# """
# Buys the specified option.
# :symbol: Symbol of the asset to buy, in OCC format.
# :quantity: Quantity of asset to buy
# :in_force: Duration the order is in force
# :returns: The result of order_option_limit(). Returns None if there is an issue with the parameters.
# """
# if quantity <= 0.0:
# debugger.error(
# f"Quantity cannot be less than or equal to 0: was given {quantity}"
# )
# return None
# if self.trader is None:
# buy_power = self.fetch_account()["buying_power"]
# price = self.streamer.fetch_option_market_data(symbol)["price"]
# else:
# buy_power = self.trader.account["buying_power"]
# price = self.trader.streamer.fetch_option_market_data(symbol)["price"]
# limit_price = mark_up(price)
# total_price = limit_price * quantity
# if total_price >= buy_power:
# debugger.warning(
# "Not enough buying power.\n" +
# f"Total price ({price} * {quantity} * 1.05 = {limit_price*quantity}) exceeds buying power {buy_power}.\n" +
# "Reduce purchase quantity or increase buying power."
# )
# sym, date, option_type, strike = self.occ_to_data(symbol)
# return self.order_option_limit(
# "buy",
# sym,
# quantity,
# limit_price,
# option_type,
# date,
# strike,
# in_force=in_force,
# )
# def sell_option(self, symbol: str, quantity: int = 0, in_force: str = "gtc"):
# """
# Sells the specified option.
# :symbol: Symbol of the asset to buy, in OCC format.
# :quantity: Quantity of asset to buy
# :in_force: Duration the order is in force
# :returns: The result of order_option_limit(). Returns None if there is an issue with the parameters.
# """
# if quantity <= 0.0:
# debugger.error(
# f"Quantity cannot be less than or equal to 0: was given {quantity}"
# )
# return None
# if self.trader is None:
# price = self.streamer.fetch_option_market_data(symbol)["price"]
# else:
# price = self.trader.streamer.fetch_option_market_data(symbol)["price"]
# limit_price = mark_down(price)
# debugger.debug(f"{type(self).__name__} ordered a sell of {quantity} {symbol}")
# sym, date, option_type, strike = self.occ_to_data(symbol)
# return self.order_option_limit(
# "sell",
# sym,
# quantity,
# limit_price,
# option_type,
# date,
# strike,
# in_force=in_force,
# )
# -------------- Helper methods -------------- #
def has_interval(self, interval: str):
return interval in self.interval_list
def data_to_occ(
self, symbol: str, date: dt.datetime, option_type: str, price: float
):
return data_to_occ(symbol, date, option_type, price)
def occ_to_data(self, symbol: str):
return occ_to_data(symbol)
def current_timestamp(self):
return self.timestamp
class StreamAPI(API):
""" """
def __init__(self, path: str = None):
super().__init__(path)
self.block_lock = (
threading.Lock()
) # Lock for streams that receive data asynchronously.
self.block_queue = {}
self.first = True
def setup(self, stats, account, trader_main=None) -> None:
super().setup(stats, account, trader_main)
self.blocker = {}
def start(self):
debugger.debug(f"{type(self).__name__} started...")
def main(self, df_dict):
"""
Streaming is event driven, so sometimes not all data comes in at once.
StreamAPI class
"""
self.block_lock.acquire()
got = [k for k in df_dict]
# First, identify which symbols need to have data fetched
# for this timestamp
if self.first:
self.needed = [
sym
for sym in self.stats.watchlist_cfg
if is_freq(now(), self.stats.watchlist_cfg[sym]["interval"])
]
self.stats.timestamp = df_dict[got[0]].index[0]
debugger.debug(f"Needs: {self.needed}")
debugger.debug(f"Got data for: {got}")
missing = list(set(self.needed) - set(got))
debugger.debug(f"Still need data for: {missing}")
self.block_queue.update(df_dict)
# debugger.debug(self.block_queue)
# If all data has been received, pass on the data
if len(missing) == 0:
debugger.debug("All data received")
self.trader_main(self.block_queue)
self.block_queue = {}
self.all_recv = True
self.first = True
self.block_lock.release()
return
# If there are data that has not been received, start a timer
if self.first:
timer = threading.Thread(target=self.timeout, daemon=True)
timer.start()
self.all_recv = False
self.first = False
self.needed = missing
self.got = got
self.block_lock.release()
def timeout(self):
debugger.debug("Begin timeout timer")
time.sleep(1)
if not self.all_recv:
debugger.debug("Force flush")
self.flush()
def flush(self):
# For missing data, return a OHLC with all zeroes.
self.block_lock.acquire()
for n in self.needed:
data = pd.DataFrame(
{"open": 0, "high": 0, "low": 0, "close": 0, "volume": 0},
index=[self.stats.timestamp],
)
data.columns = pd.MultiIndex.from_product([[n], data.columns])
self.block_queue[n] = data
self.block_lock.release()
self.trader_main(self.block_queue)
self.block_queue = {}
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum.bitcoin import NetworkConstants
pp = servers.get(host, NetworkConstants.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'ZCL')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
server.py
|
#!/usr/bin/python3
import socket as soc
import threading as thread
import os
import subprocess
your_ip = subprocess.getoutput("hostname -i")
your_port = 12345
# TO CREATE A SOCKET AND BIND IP AND PORT NUMBER :
skt2 = soc.socket(soc.AF_INET, soc.SOCK_DGRAM)
skt2.bind((your_ip, your_port))
# WE CAN USE THIS FUNCTION TO RECIEVING AND PRINTING THE MESSAGE :
def recieve_msg():
while True:
os.system("tput setaf 2")
msgRcv = skt2.recvfrom(1024)
if msgRcv[0].decode() == "quit" or msgRcv[0].decode() == "bye bye" or msgRcv[0].decode() == "exit":
print("NOW YOUR FRIEND GOES OFFLINE.....")
os._exit(1)
print("\n\t\t\t The values from device is as follow: --->" + msgRcv[0].decode())
xy = msgRcv[0].decode()
subprocess.getoutput("echo {} >> new.txt".format(xy))
# WE CAN USE THIS THREAD FOR RECIVING THE MESSAGE FUNCTION :
t4 = thread.Thread(target=recieve_msg)
# WE CAN USE THIS FUNCTION TO STARTING OUR THREADS :
t4.start()
|
ble.py
|
from time import sleep
from bleson import get_provider, Observer, logger
from multiprocessing import Process, Manager
from queue import Queue
from logging import ERROR
logger.set_level(ERROR)
class BLEObserver(object):
'''Bluetooth LE communications with bleson'''
@staticmethod
def _getDataBackground(queue, sharedData=''):
(observer, q) = BLEObserver.start()
for advertisement in BLEObserver.getQueueLines(q):
if (sharedData['stop']):
break
try:
mac = advertisement.address.address if advertisement.address is not None else None
if (mac not in sharedData['whitelist']):
continue
if (advertisement.mfg_data is None):
continue
data = bytearray(advertisement.mfg_data)
queue.put((mac, data.hex()))
except GeneratorExit:
break
except Exception as e:
print('Error happened:', e)
continue
BLEObserver.stop(observer)
@staticmethod
def start():
adapter = get_provider().get_adapter()
q = Queue()
observer = Observer(adapter)
observer.on_advertising_data = q.put # Put advertisement to queue
observer.start()
return (observer, q)
@staticmethod
def stop(observer):
observer.stop()
@staticmethod
def getQueueLines(queue):
try:
while True:
nextItem = queue.get()
yield nextItem
except KeyboardInterrupt as e:
return
except Exception as e:
print('Exception while reading queue:', e)
@staticmethod
def getData(whitelist):
'''Get data from whitelisted bluetooth LE devices'''
m = Manager()
q = m.Queue()
sharedData = m.dict()
sharedData['whitelist'] = whitelist
sharedData['stop'] = False
p = Process(target=BLEObserver._getDataBackground, args=(q, sharedData))
p.start()
try:
while True:
while not q.empty():
data = q.get()
yield data
sleep(0.1) # sleep a bit
except GeneratorExit:
pass
sharedData['stop'] = True
p.join()
|
clock.py
|
import tkinter as tk
from tkinter.font import Font
from time import strftime, localtime, sleep
from threading import Thread
class Clock(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.time = strftime("%I:%M:%S %p", localtime())
self.date = strftime("%a, %b, %d", localtime())
self.kill = False
self.time_label = tk.Label(self, text=self.time,
font=Font(family='Helvetica', size=36,
weight='bold'))
self.date_label = tk.Label(self, text=self.date,
font=Font(family='Helvetica', size=27,
weight='bold'))
self.time_label.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.date_label.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.thread = Thread(target=self.update, daemon=True)
def update(self):
while True:
if self.kill:
break
self.time = strftime("%I:%M:%S %p", localtime())
self.date = strftime("%a, %b, %d", localtime())
self.time_label["text"] = self.time
self.date_label["text"] = self.date
sleep(1)
if __name__ == "__main__":
root = tk.Tk()
root.title("Clock")
root.geometry("600x185")
root.minsize(400, 150)
clock = Clock(root)
clock.pack(fill=tk.BOTH, expand=1)
clock.thread.start()
root.mainloop()
|
parallel-python-benchmark.py
|
import os
import time
import threading
import multiprocessing
import concurrent.futures
import gevent
import gevent.pool
NUM_WORKERS = os.cpu_count()
def only_sleep(unused_arg=None):
""" Do nothing, wait for a timer to expire """
print("PID: %s, Process Name: %s, Thread Name: %s" % (
os.getpid(),
multiprocessing.current_process().name,
threading.current_thread().name)
)
time.sleep(1)
def only_sleep_gevent(unused_arg=None):
""" Do nothing, wait for a timer to expire """
print("PID: %s, Process Name: %s, Thread Name: %s" % (
os.getpid(),
multiprocessing.current_process().name,
threading.current_thread().name)
)
gevent.sleep(1)
def crunch_numbers(unused_arg=None):
""" Do some computations """
print("PID: %s, Process Name: %s, Thread Name: %s" % (
os.getpid(),
multiprocessing.current_process().name,
threading.current_thread().name)
)
x = 0
while x < 100000000:
x += 1
return x
def test_sleep():
print("\n\ntesting sleep...")
## Run tasks serially
start_time = time.time()
for _ in range(NUM_WORKERS):
only_sleep()
end_time = time.time()
print("Serial time=", end_time - start_time, '\n')
# Run tasks using threads
start_time = time.time()
threads = [threading.Thread(target=only_sleep) for _ in range(NUM_WORKERS)]
[thread.start() for thread in threads]
[thread.join() for thread in threads]
end_time = time.time()
print("Threads time=", end_time - start_time, '\n')
# Run tasks using concurrent futures
start_time = time.time()
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
futures = {executor.submit(only_sleep, arg) for arg in range(NUM_WORKERS)}
concurrent.futures.wait(futures)
end_time = time.time()
print("Concurrent future time=", end_time - start_time, '\n')
# Run tasks using process pool
start_time = time.time()
with multiprocessing.Pool(processes=NUM_WORKERS) as pool:
results = pool.map(only_sleep, range(NUM_WORKERS))
# results.wait()
end_time = time.time()
print("Process pool time=", end_time - start_time, '\n')
# Run tasks using processes
start_time = time.time()
processes = [multiprocessing.Process(target=only_sleep()) for _ in range(NUM_WORKERS)]
[process.start() for process in processes]
[process.join() for process in processes]
end_time = time.time()
print("Parallel time=", end_time - start_time, '\n')
start_time = time.time()
pool = gevent.pool.Pool(NUM_WORKERS)
for arg in range(NUM_WORKERS):
pool.spawn(only_sleep_gevent, arg)
pool.join()
end_time = time.time()
print("gevent poll time=", end_time - start_time, '\n')
def test_crunch():
print("\n\ntesting crunch...")
start_time = time.time()
for _ in range(NUM_WORKERS):
crunch_numbers()
end_time = time.time()
print("Serial time=", end_time - start_time, '\n')
start_time = time.time()
threads = [threading.Thread(target=crunch_numbers) for _ in range(NUM_WORKERS)]
[thread.start() for thread in threads]
[thread.join() for thread in threads]
end_time = time.time()
print("Threads time=", end_time - start_time, '\n')
# Run tasks using concurrent futures
start_time = time.time()
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
futures = {executor.submit(crunch_numbers, arg) for arg in range(NUM_WORKERS)}
concurrent.futures.wait(futures)
end_time = time.time()
print("Concurrent future time=", end_time - start_time, '\n')
# Run tasks using process pool
start_time = time.time()
with multiprocessing.Pool(processes=NUM_WORKERS) as pool:
results = pool.map_async(crunch_numbers, range(NUM_WORKERS))
results.wait()
end_time = time.time()
print("Process pool time=", end_time - start_time, '\n')
start_time = time.time()
processes = [multiprocessing.Process(target=crunch_numbers) for _ in range(NUM_WORKERS)]
[process.start() for process in processes]
[process.join() for process in processes]
end_time = time.time()
print("Parallel time=", end_time - start_time, '\n')
start_time = time.time()
pool = gevent.pool.Pool(NUM_WORKERS)
pool.map(crunch_numbers, range(NUM_WORKERS))
pool.kill()
end_time = time.time()
print("gevent poll time=", end_time - start_time, '\n')
def main():
test_sleep()
test_crunch()
if __name__ == '__main__':
main()
|
mesh_pool.py
|
import torch
import torch.nn as nn
from threading import Thread
from models.layers.mesh_union import MeshUnion
import numpy as np
from heapq import heappop, heapify
class MeshPool(nn.Module):
def __init__(self, target, multi_thread=False):
super(MeshPool, self).__init__()
self.__out_target = target
self.__multi_thread = multi_thread
self.__fe = None
self.__updated_fe = None
self.__meshes = None
self.__merge_edges = [-1, -1]
def __call__(self, fe, meshes):
return self.forward(fe, meshes)
def forward(self, fe, meshes):
self.__updated_fe = [[] for _ in range(len(meshes))]
pool_threads = []
self.__fe = fe
self.__meshes = meshes
# iterate over batch
for mesh_index in range(len(meshes)):
if self.__multi_thread:
pool_threads.append(Thread(target=self.__pool_main, args=(mesh_index,)))
pool_threads[-1].start()
else:
self.__pool_main(mesh_index)
if self.__multi_thread:
for mesh_index in range(len(meshes)):
pool_threads[mesh_index].join()
out_features = torch.cat(self.__updated_fe).view(len(meshes), -1, self.__out_target)
return out_features
def __pool_main(self, mesh_index):
mesh = self.__meshes[mesh_index]
queue = self.__build_queue(self.__fe[mesh_index, :, :mesh.edges_count], mesh.edges_count)
# recycle = []
# last_queue_len = len(queue)
last_count = mesh.edges_count + 1
mask = np.ones(mesh.edges_count, dtype=np.bool)
edge_groups = MeshUnion(mesh.edges_count, self.__fe.device)
self.to_merge_edges = []
#self.to_remove_edges = []
while mesh.edges_count > self.__out_target:
try:
value, edge_id = heappop(queue)
except:
print(self.__out_target)
print(mesh.edges_count)
raise
edge_id = int(edge_id)
if mask[edge_id]:
self.__pool_edge(mesh, edge_id, mask, edge_groups)
MeshPool.__union_multiple_groups(mesh, edge_groups, self.to_merge_edges)
#for k in self.to_remove_edges:
#MeshPool.__remove_group(mesh, edge_groups, k)
#mesh.remove_edge(k)
mesh.clean(mask, edge_groups)
fe = edge_groups.rebuild_features(self.__fe[mesh_index], mask, self.__out_target)
self.__updated_fe[mesh_index] = fe
def __pool_edge(self, mesh, edge_id, mask, edge_groups):
if self.has_boundaries(mesh, edge_id):
return False
elif self.__clean_side(mesh, edge_id, mask, edge_groups, 0)\
and self.__clean_side(mesh, edge_id, mask, edge_groups, 2) \
and self.__is_one_ring_valid(mesh, edge_id):
self.__merge_edges[0] = self.__pool_side(mesh, edge_id, mask, edge_groups, 0)
self.__merge_edges[1] = self.__pool_side(mesh, edge_id, mask, edge_groups, 2)
mesh.merge_vertices(edge_id)
mask[edge_id] = False
MeshPool.__remove_group(mesh, edge_groups, edge_id)
mesh.edges_count -= 1
return True
else:
return False
def __clean_side(self, mesh, edge_id, mask, edge_groups, side):
if mesh.edges_count <= self.__out_target:
return False
invalid_edges = self.__get_invalids(mesh, edge_id, edge_groups, side)
while len(invalid_edges) != 0 and mesh.edges_count > self.__out_target:
self.__remove_triplete(mesh, mask, edge_groups, invalid_edges)
if mesh.edges_count <= self.__out_target:
return False
if self.has_boundaries(mesh, edge_id):
return False
invalid_edges = self.__get_invalids(mesh, edge_id, edge_groups, side)
return True
@staticmethod
def has_boundaries(mesh, edge_id):
for edge in mesh.gemm_edges[edge_id]:
if edge == -1 or -1 in mesh.gemm_edges[edge]:
return True
return False
@staticmethod
def __is_one_ring_valid(mesh, edge_id):
v_a = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1))
v_b = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1))
shared = v_a & v_b - set(mesh.edges[edge_id])
return len(shared) == 2
def __pool_side(self, mesh, edge_id, mask, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, _, other_side_b, _, other_keys_b = info
self.__redirect_edges(mesh, key_a, side_a - side_a % 2, other_keys_b[0], mesh.sides[key_b, other_side_b])
self.__redirect_edges(mesh, key_a, side_a - side_a % 2 + 1, other_keys_b[1], mesh.sides[key_b, other_side_b + 1])
self.to_merge_edges.append((key_b, key_a))
self.to_merge_edges.append((edge_id, key_a))
#self.to_remove_edges.append(key_b)
#MeshPool.__union_groups(mesh, edge_groups, key_b, key_a)
#MeshPool.__union_groups(mesh, edge_groups, edge_id, key_a)
mask[key_b] = False
MeshPool.__remove_group(mesh, edge_groups, key_b)
mesh.remove_edge(key_b)
mesh.edges_count -= 1
return key_a
def __get_invalids(self, mesh, edge_id, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b = info
shared_items = MeshPool.__get_shared_items(other_keys_a, other_keys_b)
if len(shared_items) == 0:
return []
else:
assert (len(shared_items) == 2)
middle_edge = other_keys_a[shared_items[0]]
update_key_a = other_keys_a[1 - shared_items[0]]
update_key_b = other_keys_b[1 - shared_items[1]]
update_side_a = mesh.sides[key_a, other_side_a + 1 - shared_items[0]]
update_side_b = mesh.sides[key_b, other_side_b + 1 - shared_items[1]]
MeshPool.__redirect_edges(mesh, edge_id, side, update_key_a, update_side_a)
MeshPool.__redirect_edges(mesh, edge_id, side + 1, update_key_b, update_side_b)
MeshPool.__redirect_edges(mesh, update_key_a, MeshPool.__get_other_side(update_side_a), update_key_b, MeshPool.__get_other_side(update_side_b))
self.to_merge_edges.append((key_a, edge_id))
self.to_merge_edges.append((key_b, edge_id))
self.to_merge_edges.append((key_a, update_key_a))
self.to_merge_edges.append((middle_edge, update_key_a))
self.to_merge_edges.append((key_b, update_key_b))
self.to_merge_edges.append((middle_edge, update_key_b))
return [key_a, key_b, middle_edge]
@staticmethod
def __redirect_edges(mesh, edge_a_key, side_a, edge_b_key, side_b):
mesh.gemm_edges[edge_a_key, side_a] = edge_b_key
mesh.gemm_edges[edge_b_key, side_b] = edge_a_key
mesh.sides[edge_a_key, side_a] = side_b
mesh.sides[edge_b_key, side_b] = side_a
@staticmethod
def __get_shared_items(list_a, list_b):
shared_items = []
for i in range(len(list_a)):
for j in range(len(list_b)):
if list_a[i] == list_b[j]:
shared_items.extend([i, j])
return shared_items
@staticmethod
def __get_other_side(side):
return side + 1 - 2 * (side % 2)
@staticmethod
def __get_face_info(mesh, edge_id, side):
key_a = mesh.gemm_edges[edge_id, side]
key_b = mesh.gemm_edges[edge_id, side + 1]
side_a = mesh.sides[edge_id, side]
side_b = mesh.sides[edge_id, side + 1]
other_side_a = (side_a - (side_a % 2) + 2) % 4
other_side_b = (side_b - (side_b % 2) + 2) % 4
other_keys_a = [mesh.gemm_edges[key_a, other_side_a], mesh.gemm_edges[key_a, other_side_a + 1]]
other_keys_b = [mesh.gemm_edges[key_b, other_side_b], mesh.gemm_edges[key_b, other_side_b + 1]]
return key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b
@staticmethod
def __remove_triplete(mesh, mask, edge_groups, invalid_edges):
vertex = set(mesh.edges[invalid_edges[0]])
for edge_key in invalid_edges:
vertex &= set(mesh.edges[edge_key])
mask[edge_key] = False
MeshPool.__remove_group(mesh, edge_groups, edge_key)
mesh.edges_count -= 3
vertex = list(vertex)
assert(len(vertex) == 1), 'Error in %s' % mesh.filename
mesh.remove_vertex(vertex[0])
def __build_queue(self, features, edges_count):
# delete edges with smallest norm
squared_magnitude = torch.sum(features * features, 0)
if squared_magnitude.shape[-1] != 1:
squared_magnitude = squared_magnitude.unsqueeze(-1)
edge_ids = torch.arange(edges_count, device=squared_magnitude.device, dtype=torch.float32).unsqueeze(-1)
heap = torch.cat((squared_magnitude, edge_ids), dim=-1).tolist()
heapify(heap)
return heap
@staticmethod
def __union_groups(mesh, edge_groups, source, target):
edge_groups.union(source, target)
mesh.union_groups(source, target)
@staticmethod
def __union_multiple_groups(mesh, edge_groups, source_target):
if len(source_target) == 0:
return
combined = [list(t) for t in zip(*source_target)]
edge_groups.union(combined[0], combined[1])
for s, t in source_target:
mesh.union_groups(s, t)
@staticmethod
def __remove_group(mesh, edge_groups, index):
edge_groups.remove_group(index)
mesh.remove_group(index)
|
server.py
|
import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
import os
import logging
import json
from google.protobuf import any_pb2
import grpc
import time
from threading import Thread
import sys
import redis
import cache
import service_pb2
import service_pb2_grpc
import boto3
s3client = None
app = FastAPI()
# Mandatory variables in envirnment
MANDATORY_ENV_VARS = {
'AWS_REGION': 'ap-northeast-1',
'REDIS_HOST': 'localhost',
'REDIS_PORT': 6379,
'RANK_PORT': 5400
}
# Notice channel
recall_notice_to_rank = 'recall_notice_to_rank'
rank_notice_to_filter = 'rank_notice_to_filter'
sleep_interval = 10 # second
action_model_type = 'action-model'
embedding_type = 'embedding'
pickle_type = 'inverted-list'
json_type = 'ps-result'
def xasync(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
@app.get('/rank/status', tags=["monitoring"])
def status():
logging.info('Collecting status information from server & plugin...')
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.RankStub(channel)
response = stub.Status(service_pb2.google_dot_protobuf_dot_empty__pb2.Empty())
statusAny = any_pb2.Any()
response.status.Unpack(statusAny)
pStatus = json.loads(statusAny.value.decode('utf-8'))
return {
'env': MANDATORY_ENV_VARS,
'redis': rCache.connection_status(),
'plugin_status': pStatus
}
@app.get('/ping', tags=["monitoring"])
def ping():
logging.info('Processing default request...')
return {'result': 'ping'}
def check_plugin_status():
logging.info('check plugin status')
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.RankStub(channel)
response = stub.Status(service_pb2.google_dot_protobuf_dot_empty__pb2.Empty())
if response.code == 0:
logging.info('plugin startup succeed')
return True
else:
logging.info('plugin startup failed')
return False
@xasync
def poll_recall_notice_to_rank():
logging.info('poll_recall_notice_to_rank start')
while True:
try:
message_redis = rCache.lpop_data_from_list(recall_notice_to_rank)
if message_redis:
logging.info('get message {} from {}'.format(message_redis, recall_notice_to_rank))
message = json.loads(message_redis, encoding='utf-8')
user_id = message['user_id']
recall_result = message['recall_result']
logging.info('start rank_process in poll_recall_notice_to_rank')
logging.info('user_id {}'.format(user_id))
logging.info('recall_result {}'.format(recall_result))
reqDicts = any_pb2.Any()
reqDicts.value = json.dumps({
'user_id': user_id,
'recall_result': recall_result
}).encode('utf-8')
rankProcessRequest = service_pb2.RankProcessRequest(apiVersion='v1', metadata='Rank', type='RankResult')
rankProcessRequest.dicts.Pack(reqDicts)
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.RankStub(channel)
response = stub.RankProcess(rankProcessRequest)
results = any_pb2.Any()
response.results.Unpack(results)
presults = json.loads(results.value.decode('utf-8'))
logging.info('rank result: {}'.format(presults))
if response.code == 0:
rCache.rpush_data_into_list(rank_notice_to_filter, json.dumps({
'user_id': user_id,
'recall_result': recall_result,
'rank_result': presults
}).encode('utf-8'))
else:
time.sleep(sleep_interval)
except Exception:
localtime = time.asctime(time.localtime(time.time()))
logging.info('Rank process error, time: {}'.format(localtime))
def read_stream_messages():
logging.info('read_stream_messages start')
read_action_model_message()
read_embedding_message()
read_pickle_message()
read_json_message()
@xasync
def read_action_model_message():
logging.info('read_action_model_message start')
# Read existed stream message
stream_message = rCache.read_stream_message(action_model_type)
if stream_message:
logging.info("Handle existed stream action_model_type message")
handle_stream_message(stream_message)
while True:
logging.info('wait for reading action_model_type message')
try:
stream_message = rCache.read_stream_message_block(action_model_type)
if stream_message:
handle_stream_message(stream_message)
except redis.ConnectionError:
localtime = time.asctime(time.localtime(time.time()))
logging.info('get ConnectionError, time: {}'.format(localtime))
time.sleep(sleep_interval)
@xasync
def read_embedding_message():
logging.info('read_embedding_message start')
# Read existed stream message
stream_message = rCache.read_stream_message(embedding_type)
if stream_message:
logging.info("Handle existed stream embedding_type message")
handle_stream_message(stream_message)
while True:
logging.info('wait for reading embedding_type message')
try:
stream_message = rCache.read_stream_message_block(embedding_type)
if stream_message:
handle_stream_message(stream_message)
except redis.ConnectionError:
localtime = time.asctime(time.localtime(time.time()))
logging.info('get ConnectionError, time: {}'.format(localtime))
time.sleep(sleep_interval)
@xasync
def read_pickle_message():
logging.info('read_pickle_message start')
# Read existed stream message
stream_message = rCache.read_stream_message(pickle_type)
if stream_message:
logging.info("Handle existed stream pickle_type message")
handle_stream_message(stream_message)
while True:
logging.info('wait for reading pickle_type message')
try:
stream_message = rCache.read_stream_message_block(pickle_type)
if stream_message:
handle_stream_message(stream_message)
except redis.ConnectionError:
localtime = time.asctime(time.localtime(time.time()))
logging.info('get ConnectionError, time: {}'.format(localtime))
time.sleep(sleep_interval)
@xasync
def read_json_message():
logging.info('read_json_message start')
# Read existed stream message
stream_message = rCache.read_stream_message(json_type)
if stream_message:
logging.info("Handle existed stream json_type message")
handle_stream_message(stream_message)
while True:
logging.info('wait for reading json_type message')
try:
stream_message = rCache.read_stream_message_block(json_type)
if stream_message:
handle_stream_message(stream_message)
except redis.ConnectionError:
localtime = time.asctime(time.localtime(time.time()))
logging.info('get ConnectionError, time: {}'.format(localtime))
time.sleep(sleep_interval)
def handle_stream_message(stream_message):
logging.info('get stream message from {}'.format(stream_message))
file_type, file_path, file_list = parse_stream_message(stream_message)
logging.info('start reload data process in handle_stream_message')
logging.info('file_type {}'.format(file_type))
logging.info('file_path {}'.format(file_path))
logging.info('file_list {}'.format(file_list))
reqDicts = any_pb2.Any()
reqDicts.value = json.dumps({
'file_type': file_type,
'file_list': file_list
}).encode('utf-8')
reloadRequest = service_pb2.ReloadRequest()
reloadRequest.dicts.Pack(reqDicts)
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.RankStub(channel)
response = stub.Reload(reloadRequest)
if response.code == 0:
logging.info('reload plugin succeeded')
else:
logging.info('reload plugin failed, description: {}'.format(response.description))
def parse_stream_message(stream_message):
for stream_name, message in stream_message:
for message_id, value in message:
decode_value = convert(value)
file_type = decode_value['file_type']
file_path = decode_value['file_path']
file_list = decode_value['file_list']
return file_type, file_path, file_list
# convert stream data to str
def convert(data):
if isinstance(data, bytes):
return data.decode('ascii')
elif isinstance(data, dict):
return dict(map(convert, data.items()))
elif isinstance(data, tuple):
return map(convert, data)
else:
return data
def wait_for_plugin_service():
while True:
if check_plugin_status():
return
else:
logging.info('wait for plugin startup')
time.sleep(sleep_interval)
def init():
# Check out environments
for var in MANDATORY_ENV_VARS:
if var not in os.environ:
logging.error("Mandatory variable {%s} is not set, using default value {%s}.", var, MANDATORY_ENV_VARS[var])
else:
MANDATORY_ENV_VARS[var] = os.environ.get(var)
aws_region = MANDATORY_ENV_VARS['AWS_REGION']
logging.info("aws_region={}".format(aws_region))
boto3.setup_default_session(region_name=MANDATORY_ENV_VARS['AWS_REGION'])
global s3client
s3client = boto3.client('s3')
logging.info(json.dumps(s3client.list_buckets(), default=str))
# Initial redis connection
global rCache
rCache = cache.RedisCache(host=MANDATORY_ENV_VARS['REDIS_HOST'], port=MANDATORY_ENV_VARS['REDIS_PORT'])
logging.info('redis status is {}'.format(rCache.connection_status()))
wait_for_plugin_service()
logging.info('rank service start')
poll_recall_notice_to_rank()
read_stream_messages()
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
init()
uvicorn.run(app, host="0.0.0.0", port=MANDATORY_ENV_VARS['RANK_PORT'])
|
complex_image.py
|
import threading
from instance import *
class Complex:
def __init__(self):
self.images_info_obj_list = []
self.threading_list = []
self.threading_page = 0
self.max_thread = Vars.cfg.data.get("max_thread")
self.semaphore = threading.Semaphore(self.max_thread)
def add_image_info_obj(self, image_info_obj):
self.images_info_obj_list.append(image_info_obj)
def start_download_threading(self):
print("插画列表加载完毕...")
if len(self.images_info_obj_list) != 0:
print("开始下载, 一共:", len(self.images_info_obj_list), "幅插画\n\n")
self.threading_list = [threading.Thread(target=self.thread_download_images, args=(images_info,))
for images_info in self.images_info_obj_list]
for thread_ing in self.threading_list:
thread_ing.start()
for thread_ing in self.threading_list:
thread_ing.join()
self.threading_list.clear()
else:
print("线程队列为空,没有可下载的插画!")
self.images_info_obj_list.clear()
def thread_download_images(self, images_info):
self.semaphore.acquire()
self.threading_page += 1
images_info.show_images_information(thread_status=True)
if images_info.page_count == 1:
images_info.save_image(images_info.original_url)
else:
images_info.save_image(images_info.original_url_list)
# print(images_info.image_name, "的作品下载完毕")
print("下载进度:{}/{}".format(self.threading_page, len(self.images_info_obj_list)), end="\r")
self.semaphore.release()
|
NonSSL.py
|
#!/bin/env python3
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import requests
import os
from threading import Thread
import sys
from multiprocessing import current_process
import sessionvalidation.sessionvalidation as sv
import lib.result as result
import extractHeader
import mainProcess
import json
import gzip
bSTOP = False
def createDummyBodywithLength(numberOfbytes):
if numberOfbytes <= 0:
return None
body = 'a'
while numberOfbytes != 1:
body += 'b'
numberOfbytes -= 1
return body
def handleResponse(response, *args, **kwargs):
print(response.status_code)
# resp=args[0]
#expected_output_split = resp.getHeaders().split('\r\n')[ 0].split(' ', 2)
#expected_output = (int(expected_output_split[1]), str( expected_output_split[2]))
#r = result.Result(session_filename, expected_output[0], response.status_code)
# print(r.getResultString(colorize=True))
# make sure len of the message body is greater than length
def gen():
yield 'pforpersia,champaignurbana'.encode('utf-8')
yield 'there'.encode('utf-8')
def txn_replay(session_filename, txn, proxy, result_queue, request_session):
""" Replays a single transaction
:param request_session: has to be a valid requests session"""
req = txn.getRequest()
resp = txn.getResponse()
# Construct HTTP request & fire it off
txn_req_headers = req.getHeaders()
txn_req_headers_dict = extractHeader.header_to_dict(txn_req_headers)
txn_req_headers_dict['Content-MD5'] = txn._uuid # used as unique identifier
if 'body' in txn_req_headers_dict:
del txn_req_headers_dict['body']
#print("Replaying session")
try:
# response = request_session.request(extractHeader.extract_txn_req_method(txn_req_headers),
# 'http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers),
# headers=txn_req_headers_dict,stream=False) # making stream=False raises contentdecoding exception? kill me
method = extractHeader.extract_txn_req_method(txn_req_headers)
response = None
body = None
content = None
if 'Transfer-Encoding' in txn_req_headers_dict:
# deleting the host key, since the STUPID post/get functions are going to add host field anyway, so there will be multiple host fields in the header
# This confuses the ATS and it returns 400 "Invalid HTTP request". I don't believe this
# BUT, this is not a problem if the data is not chunked encoded.. Strange, huh?
del txn_req_headers_dict['Host']
if 'Content-Length' in txn_req_headers_dict:
#print("ewww !")
del txn_req_headers_dict['Content-Length']
body = gen()
if 'Content-Length' in txn_req_headers_dict:
nBytes = int(txn_req_headers_dict['Content-Length'])
body = createDummyBodywithLength(nBytes)
#print("request session is",id(request_session))
if method == 'GET':
r1 = request_session.request('GET', extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content # byte array
#print("len: {0} received {1}".format(responseHeaders['Content-Length'], responseContent))
elif method == 'POST':
r1 = request_session.request('POST', extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content
#print("len: {0} received {1}".format(responseHeaders['Content-Length'], responseContent))
elif method == 'HEAD':
r1 = request_session.request('HEAD', extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content
else: # EXPERIMENTAL
r1 = request_session.request(method, extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content
#gzip_file = gzip.GzipFile(fileobj=responseContent)
#shutil.copyfileobj(gzip_file, f)
expected = extractHeader.responseHeader_to_dict(resp.getHeaders())
# print("------------EXPECTED-----------")
# print(expected)
# print("------------RESP--------------")
# print(responseHeaders)
# print()
if mainProcess.verbose:
expected_output_split = resp.getHeaders().split('\r\n')[0].split(' ', 2)
expected_output = (int(expected_output_split[1]), str(expected_output_split[2]))
r = result.Result(session_filename, expected_output[0], r1.status_code, responseContent)
b_res, res = r.getResult(responseHeaders, expected, colorize=True)
print(res)
if not b_res:
print("Received response")
print(responseHeaders)
print("Expected response")
print(expected)
# result_queue.put(r)
except UnicodeEncodeError as e:
# these unicode errors are due to the interaction between Requests and our wiretrace data.
# TODO fix
print("UnicodeEncodeError exception")
except requests.exceptions.ContentDecodingError as e:
print("ContentDecodingError", e)
except:
e = sys.exc_info()
print("ERROR in NonSSLReplay: ", e, response, session_filename)
def session_replay(input, proxy, result_queue):
global bSTOP
''' Replay all transactions in session
This entire session will be replayed in one requests.Session (so one socket / TCP connection)'''
# if timing_control:
# time.sleep(float(session._timestamp)) # allow other threads to run
while bSTOP == False:
for session in iter(input.get, 'STOP'):
# print(bSTOP)
if session == 'STOP':
print("Queue is empty")
bSTOP = True
break
with requests.Session() as request_session:
request_session.proxies = proxy
for txn in session.getTransactionIter():
try:
txn_replay(session._filename, txn, proxy, result_queue, request_session)
except:
e = sys.exc_info()
print("ERROR in replaying: ", e, txn.getRequest().getHeaders())
bSTOP = True
#print("Queue is empty")
input.put('STOP')
break
def client_replay(input, proxy, result_queue, nThread):
Threads = []
for i in range(nThread):
t = Thread(target=session_replay, args=[input, proxy, result_queue])
t.start()
Threads.append(t)
for t1 in Threads:
t1.join()
|
Trading.py
|
# -*- coding: UTF-8 -*-
# @yasinkuyu
# Define Python imports
import os
import sys
import time
import config
import threading
import math
# Define Custom imports
from Database import Database
from Orders import Orders
class Trading():
# Define trade vars
order_id = 0
order_data = None
buy_filled = True
sell_filled = True
buy_filled_qty = 0
sell_filled_qty = 0
# percent (When you drop 10%, sell panic.)
stop_loss = 0
# Buy/Sell qty
quantity = 0
# BTC amount
amount = 0
# float(step_size * math.floor(float(free)/step_size))
step_size = 0
# Define static vars
WAIT_TIME_BUY_SELL = 1 # seconds
WAIT_TIME_CHECK_BUY_SELL = 0.2 # seconds
WAIT_TIME_CHECK_SELL = 5 # seconds
WAIT_TIME_STOP_LOSS = 20 # seconds
MAX_TRADE_SIZE = 7 # int
def __init__(self, option):
# Get argument parse options
self.option = option
# Define parser vars
self.order_id = self.option.orderid
self.quantity = self.option.quantity
self.wait_time = self.option.wait_time
self.stop_loss = self.option.stop_loss
self.increasing = self.option.increasing
self.decreasing = self.option.decreasing
# BTC amount
self.amount = self.option.amount
def buy(self, symbol, quantity, buyPrice):
# Do you have an open order?
self.checkorder()
try:
# Create order
orderId = Orders.buy_limit(symbol, quantity, buyPrice)
# Database log
Database.write([orderId, symbol, 0, buyPrice, 'BUY', quantity, self.option.profit])
print ('Buy order created id:%d, q:%.8f, p:%.8f' % (orderId, quantity, float(buyPrice)))
self.order_id = orderId
return orderId
except Exception as e:
print ('bl: %s' % (e))
time.sleep(self.WAIT_TIME_BUY_SELL)
return None
def sell(self, symbol, quantity, orderId, sell_price, last_price):
'''
The specified limit will try to sell until it reaches.
If not successful, the order will be canceled.
'''
buy_order = Orders.get_order(symbol, orderId)
if buy_order['status'] == 'FILLED' and buy_order['side'] == "BUY":
print ("Buy order filled... Try sell...")
else:
time.sleep(self.WAIT_TIME_CHECK_BUY_SELL)
if buy_order['status'] == 'FILLED' and buy_order['side'] == "BUY":
print ("Buy order filled after 0.1 second... Try sell...")
elif buy_order['status'] == 'PARTIALLY_FILLED' and buy_order['side'] == "BUY":
print ("Buy order partially filled... Try sell... Cancel remaining buy...")
self.cancel(symbol, orderId)
else:
self.cancel(symbol, orderId)
print ("Buy order fail (Not filled) Cancel order...")
self.order_id = 0
return
sell_order = Orders.sell_limit(symbol, quantity, sell_price)
sell_id = sell_order['orderId']
print ('Sell order create id: %d' % sell_id)
time.sleep(self.WAIT_TIME_CHECK_SELL)
if sell_order['status'] == 'FILLED':
print ('Sell order (Filled) Id: %d' % sell_id)
print ('LastPrice : %.8f' % last_price)
print ('Profit: %%%s. Buy price: %.8f Sell price: %.8f' % (self.option.profit, float(sell_order['price']), sell_price))
self.order_id = 0
self.order_data = None
return
'''
If all sales trials fail,
the grievance is stop-loss.
'''
if self.stop_loss > 0:
# If sell order failed after 5 seconds, 5 seconds more wait time before selling at loss
time.sleep(self.WAIT_TIME_CHECK_SELL)
if self.stop(symbol, quantity, sell_id, last_price):
if Orders.get_order(symbol, sell_id)['status'] != 'FILLED':
print ('We apologize... Sold at loss...')
else:
print ('We apologize... Cant sell even at loss... Please sell manually... Stopping program...')
self.cancel(symbol, sell_id)
exit(1)
while (sell_status != "FILLED"):
time.sleep(self.WAIT_TIME_CHECK_SELL)
sell_status = Orders.get_order(symbol, sell_id)['status']
lastPrice = Orders.get_ticker(symbol)
print ('Status: %s Current price: %.8f Sell price: %.8f' % (sell_status, lastPrice, sell_price))
print ('Sold! Continue trading...')
self.order_id = 0
self.order_data = None
def stop(self, symbol, quantity, orderId, last_price):
# If the target is not reached, stop-loss.
stop_order = Orders.get_order(symbol, orderId)
stopprice = self.calc(float(stop_order['price']))
lossprice = stopprice - (stopprice * self.stop_loss / 100)
status = stop_order['status']
# Order status
if status == 'NEW' or status == 'PARTIALLY_FILLED':
if self.cancel(symbol, orderId):
# Stop loss
if last_price >= lossprice:
sello = Orders.sell_market(symbol, quantity)
print ('Stop-loss, sell market, %s' % (last_price))
sell_id = sello['orderId']
if sello == True:
return True
else:
# Wait a while after the sale to the loss.
time.sleep(self.WAIT_TIME_STOP_LOSS)
statusloss = sello['status']
if statusloss != 'NEW':
print ('Stop-loss, sold')
return True
else:
self.cancel(symbol, sell_id)
return False
else:
sello = Orders.sell_limit(symbol, quantity, lossprice)
print ('Stop-loss, sell limit, %s' % (lossprice))
time.sleep(self.WAIT_TIME_STOP_LOSS)
statusloss = sello['status']
if statusloss != 'NEW':
print ('Stop-loss, sold')
return True
else:
self.cancel(symbol, sell_id)
return False
else:
print ('Cancel did not work... Might have been sold before stop loss...')
return True
elif status == 'FILLED':
self.order_id = 0
self.order_data = None
print('Order filled')
return True
else:
return False
def check(self, symbol, orderId, quantity):
# If profit is available and there is no purchase from the specified price, take it with the market.
# Do you have an open order?
self.checkorder()
trading_size = 0
time.sleep(self.WAIT_TIME_BUY_SELL)
while trading_size < self.MAX_TRADE_SIZE:
# Order info
order = Orders.get_order(symbol, orderId)
side = order['side']
price = float(order['price'])
# TODO: Sell partial qty
orig_qty = float(order['origQty'])
self.buy_filled_qty = float(order['executedQty'])
status = order['status']
print ('Wait buy order: %s id:%d, price: %.8f, orig_qty: %.8f' % (symbol, order['orderId'], price, orig_qty))
if status == 'NEW':
if self.cancel(symbol, orderId):
buyo = Orders.buy_market(symbol, quantity)
print ('Buy market order')
self.order_id = buyo['orderId']
self.order_data = buyo
if buyo == True:
break
else:
trading_size += 1
continue
else:
break
elif status == 'FILLED':
self.order_id = order['orderId']
self.order_data = order
print ("Filled")
break
elif status == 'PARTIALLY_FILLED':
print ("Partial filled")
break
else:
trading_size += 1
continue
def cancel(self, symbol, orderId):
# If order is not filled, cancel it.
check_order = Orders.get_order(symbol, orderId)
if not check_order:
self.order_id = 0
self.order_data = None
return True
if check_order['status'] == 'NEW' or check_order['status'] != "CANCELLED":
Orders.cancel_order(symbol, orderId)
self.order_id = 0
self.order_data = None
return True
def calc(self, lastBid):
try:
return lastBid + (lastBid * self.option.profit / 100)
except Exception as e:
print ('c: %s' % (e))
return
def checkorder(self):
# If there is an open order, exit.
if self.order_id > 0:
exit(1)
def action(self, symbol):
# Order amount
quantity = self.quantity
# Fetches the ticker price
lastPrice = Orders.get_ticker(symbol)
# Order book prices
lastBid, lastAsk = Orders.get_order_book(symbol)
# Target buy price, add little increase #87
buyPrice = lastBid + self.increasing
# Target sell price, decrease little
sellPrice = lastAsk - self.decreasing
# Spread ( profit )
profitableSellingPrice = self.calc(lastBid)
# Check working mode
if self.option.mode == 'range':
buyPrice = float(self.option.buyprice)
sellPrice = float(self.option.sellprice)
profitableSellingPrice = sellPrice
# Screen log
if self.option.prints and self.order_id == 0:
spreadPerc = (lastAsk/lastBid - 1) * 100.0
print ('price:%.8f buyp:%.8f sellp:%.8f-bid:%.8f ask:%.8f spread:%.2f' % (lastPrice, buyPrice, profitableSellingPrice, lastBid, lastAsk, spreadPerc))
# analyze = threading.Thread(target=analyze, args=(symbol,))
# analyze.start()
if self.order_id > 0:
# Profit mode
if self.order_data is not None:
order = self.order_data;
# Last control
newProfitableSellingPrice = self.calc(float(order['price']))
if (lastAsk >= newProfitableSellingPrice):
profitableSellingPrice = newProfitableSellingPrice
# range mode
if self.option.mode == 'range':
profitableSellingPrice = self.option.sellprice
'''
If the order is complete,
try to sell it.
'''
# Perform buy action
sellAction = threading.Thread(target=self.sell, args=(symbol, quantity, self.order_id, profitableSellingPrice, lastPrice,))
sellAction.start()
return
'''
Did profit get caught
if ask price is greater than profit price,
buy with my buy price,
'''
if (lastAsk >= profitableSellingPrice and self.option.mode == 'profit') or \
(lastPrice <= float(self.option.buyprice) and self.option.mode == 'range'):
if self.order_id == 0:
self.buy(symbol, quantity, buyPrice)
# Perform check/sell action
# checkAction = threading.Thread(target=self.check, args=(symbol, self.order_id, quantity,))
# checkAction.start()
def logic(self):
return 0
def filters(self):
symbol = self.option.symbol
# Get symbol exchance info
symbol_info = Orders.get_info(symbol)
if not symbol_info:
print ("Invalid symbol, please try again...")
exit(1)
symbol_info['filters'] = {item['filterType']: item for item in symbol_info['filters']}
return symbol_info
def format_step(self, quantity, stepSize):
return float(stepSize * math.floor(float(quantity)/stepSize))
def validate(self):
valid = True
symbol = self.option.symbol
filters = self.filters()['filters']
# Order book prices
lastBid, lastAsk = Orders.get_order_book(symbol)
lastPrice = Orders.get_ticker(symbol)
minQty = float(filters['LOT_SIZE']['minQty'])
minPrice = float(filters['PRICE_FILTER']['minPrice'])
minNotional = float(filters['MIN_NOTIONAL']['minNotional'])
quantity = float(self.option.quantity)
# stepSize defines the intervals that a quantity/icebergQty can be increased/decreased by.
stepSize = float(filters['LOT_SIZE']['stepSize'])
# tickSize defines the intervals that a price/stopPrice can be increased/decreased by
tickSize = float(filters['PRICE_FILTER']['tickSize'])
# If option increasing default tickSize greater than
if (float(self.option.increasing) < tickSize):
self.increasing = tickSize
# If option decreasing default tickSize greater than
if (float(self.option.decreasing) < tickSize):
self.decreasing = tickSize
# Just for validation
lastBid = lastBid + self.increasing
# Set static
# If quantity or amount is zero, minNotional increase 10%
quantity = (minNotional / lastBid)
quantity = quantity + (quantity * 10 / 100)
notional = minNotional
if self.amount > 0:
# Calculate amount to quantity
quantity = (self.amount / lastBid)
if self.quantity > 0:
# Format quantity step
quantity = self.quantity
quantity = self.format_step(quantity, stepSize)
notional = lastBid * float(quantity)
# Set Globals
self.quantity = quantity
self.step_size = stepSize
# minQty = minimum order quantity
if quantity < minQty:
print ("Invalid quantity, minQty: %.8f (u: %.8f)" % (minQty, quantity))
valid = False
if lastPrice < minPrice:
print ("Invalid price, minPrice: %.8f (u: %.8f)" % (minPrice, lastPrice))
valid = False
# minNotional = minimum order value (price * quantity)
if notional < minNotional:
print ("Invalid notional, minNotional: %.8f (u: %.8f)" % (minNotional, notional))
valid = False
if not valid:
exit(1)
def run(self):
cycle = 0
actions = []
symbol = self.option.symbol
print ('@yasinkuyu, 2018')
print ('Auto Trading for Binance.com. --symbol: %s\n' % (symbol))
print ('... \n')
# Validate symbol
self.validate()
print ('Started... --quantity: %.8f\n' % (self.quantity))
if self.option.mode == 'range':
if self.option.buyprice == 0 or self.option.sellprice == 0:
print ('Plese enter --buyprice / --sellprice\n')
exit(1)
print ('Wait buyprice:%.8f sellprice:%.8f' % (self.option.buyprice, self.option.sellprice))
else:
print ('%s%% profit scanning for %s \n' % (self.option.profit, symbol))
print ('Between Ask and Bid %s%% profit hunting' % (self.option.profit))
print ('buyp : BuyPrice (Bid+ --increasing %.8f)' % (self.increasing))
print ('sellp: SellPrice (Bid- --decreasing %.8f)' % (self.decreasing))
print ('... \n')
while (cycle <= self.option.loop):
startTime = time.time()
actionTrader = threading.Thread(target=self.action, args=(symbol,))
actions.append(actionTrader)
actionTrader.start()
endTime = time.time()
if endTime - startTime < self.wait_time:
time.sleep(self.wait_time - (endTime - startTime))
# 0 = Unlimited loop
if self.option.loop > 0:
cycle = cycle + 1
|
engine.py
|
# -*- coding: utf-8 -*-
import sys
import threading
# Import logger
from iemlav.lib.log_monitor.server_log.server_logger import ServerLogger
# Import utilities
from iemlav.lib.log_monitor.server_log import utils
# Import log parser
from iemlav.lib.log_monitor.server_log.parser import apache
from iemlav.lib.log_monitor.server_log.parser import nginx
# Import detection modules
from iemlav.lib.log_monitor.server_log.detect.attacks import xss
from iemlav.lib.log_monitor.server_log.detect.attacks import sqli
from iemlav.lib.log_monitor.server_log.detect.attacks import lfi
from iemlav.lib.log_monitor.server_log.detect.attacks import web_shell
from iemlav.lib.log_monitor.server_log.detect.attacks import ddos
from iemlav.lib.log_monitor.server_log.detect.recon import port_scan
from iemlav.lib.log_monitor.server_log.detect.recon import fuzzer
from iemlav.lib.log_monitor.server_log.detect.recon import spider
from iemlav.lib.log_monitor.server_log import user_filter
class Engine(object):
"""ServerLog Monitor Engine."""
def __init__(self,
debug=False,
log_type=None,
log_file=None,
window=30,
ip_list=None,
status_code=None):
"""
Initialize ServerLog Monitor Engine.
Args:
debug (bool): Log on terminal or not
log_type (str): Type of log file (Apache, Nginx)
log_file (str): Path of the log file
window (int): Days old log to process (default: 30 days)
ip_list (list): List of IPs to filter / grab of the log file
status_code (list): List of status code to filter / grab of the log file
Raises:
None
Returns:
None
"""
# Initialize logger
self.logger = ServerLogger(
__name__,
debug=debug
)
if log_type is None:
self.logger.log(
"No server type selected, exiting.",
logtype="error"
)
sys.exit(0)
# Initialize log file path as None
self.log_file_path = None
# OS to log file path mapping
self.system_log_file_map = {
"apache": {
"debian": "/var/log/apache2/access.log",
"fedora": "/var/log/httpd/access_log",
"freebsd": "/var/log/httpd-access.log"
},
"nginx": {
"debian": "/var/log/nginx/access.log"
}
}
if log_file:
self.log_file_path = str(log_file)
else:
os_name = utils.categorize_os()
if os_name:
try:
self.log_file_path = self.system_log_file_map[log_type][os_name]
except KeyError:
self.logger.log(
"Could not find a suitable log file path, exiting.",
logtype="error"
)
sys.exit(0)
else:
self.logger.log(
"OS not recognized, log file path not selected, exiting.",
logtype="error"
)
sys.exit(0)
# Create specific parser objects
if self.log_file_path: # if log file path is valid
if log_type == "apache": # if Apache log file
self.parser_obj = apache.ApacheParser(debug=debug,
window=window,
path=self.log_file_path)
elif log_type == "nginx": # if Nginx log file
self.parser_obj = nginx.NginxParser(debug=debug,
window=window,
path=self.log_file_path)
if self.log_file_path and self.parser_obj: # if log file path is valid
# Cross Site Scripting (XSS) Detection
self.xss_obj = xss.CrossSite(debug=True)
# SQL injection (SQLi) Detection
self.sqli_obj = sqli.SQLi(debug=debug)
# Local File Inclusion (LFI) Detection
self.lfi_obj = lfi.LFI(debug=debug)
# Web Shell Detection
self.web_shell_obj = web_shell.WebShell(debug=debug)
# Port Scan Detection
self.port_scan_obj = port_scan.PortScan(debug=debug)
# URL Fuzzer Detection
self.fuzzer_obj = fuzzer.FuzzerDetect(debug=debug)
# Spider / Web Crawler / Bad user agent
self.spider_obj = spider.SpiderDetect(debug=debug)
# DDoS Detection
self.ddos_obj = ddos.DDoS(debug=debug)
# UserFilter object
self.user_filter_obj = user_filter.UserFilter(debug=debug,
ip_list=ip_list,
status_code=status_code)
def run(self):
"""
Start the ServerLog Monitor Engine.
Args:
None
Raises:
None
Returns:
None
"""
thread_pool = [] # Collection of all the threads
while True: # Run in an endless parent thread loop
# Parse the logfile
data = self.parser_obj.parse()
# Create multiple threads for various detection
xss_thread = threading.Thread(target=self.xss_obj.detect_xss, args=(data,))
sqli_thread = threading.Thread(target=self.sqli_obj.detect_sqli, args=(data,))
lfi_thread = threading.Thread(target=self.lfi_obj.detect_lfi, args=(data,))
web_shell_thread = threading.Thread(target=self.web_shell_obj.detect_web_shell, args=(data,))
port_scan_thread = threading.Thread(target=self.port_scan_obj.detect_port_scan, args=(data,))
fuzzer_thread = threading.Thread(target=self.fuzzer_obj.detect_fuzzer, args=(data,))
spider_thread = threading.Thread(target=self.spider_obj.detect_spider, args=(data,))
ddos_thread = threading.Thread(target=self.ddos_obj.detect_ddos, args=(data,))
user_filter_thread = threading.Thread(target=self.user_filter_obj.filter_user_criteria, args=(data,))
# Add created threads to the thread pool
thread_pool.append(xss_thread)
thread_pool.append(sqli_thread)
thread_pool.append(lfi_thread)
thread_pool.append(web_shell_thread)
thread_pool.append(port_scan_thread)
thread_pool.append(fuzzer_thread)
thread_pool.append(spider_thread)
thread_pool.append(ddos_thread)
thread_pool.append(user_filter_thread)
# Start the thread process
xss_thread.start()
sqli_thread.start()
lfi_thread.start()
web_shell_thread.start()
port_scan_thread.start()
fuzzer_thread.start()
spider_thread.start()
ddos_thread.start()
user_filter_thread.start()
# Complete the thread execution
for thread in thread_pool:
thread.join()
|
socks.py
|
#!/usr/bin/env python
"""Minimal non-feature complete socks proxy"""
from __future__ import print_function
import socket
from struct import pack, unpack
import threading
import sys
# Python 3 renamed SocketServer to socketserver
try:
from socketserver import StreamRequestHandler, ThreadingTCPServer
except BaseException:
from SocketServer import StreamRequestHandler, ThreadingTCPServer
def debug(s):
print('socks.py: ', s, file=sys.stderr)
def error(s):
print('socks.py, ERROR: ', s, file=sys.stderr)
class MyTCPServer(ThreadingTCPServer):
allow_reuse_address = True
def handle_timeout(self):
raise Exception('timeout')
CLOSE = object()
VERSION = '\x05'
NOAUTH = '\x00'
USERPASS = '\x02'
CONNECT = '\x01'
UDP_ASSOCIATE = '\x03'
IPV4 = '\x01'
IPV6 = '\x04'
DOMAIN_NAME = '\x03'
SUCCESS = '\x00'
password = None
username = None
allow_v4 = False
def send(dest, msg):
if msg == CLOSE:
try:
dest.shutdown(socket.SHUT_WR)
except BaseException:
pass
dest.close()
return 0
else:
return dest.sendall(msg)
def recv(source, buffer):
data = source.recv(buffer)
if data == '':
return CLOSE
else:
return data
def forward(source, dest, name):
while True:
data = recv(source, 4000)
if data == CLOSE:
send(dest, CLOSE)
debug('%s hung up' % name)
return
# debug('Forwarding (%d) %r' % (len(data), data))
send(dest, data)
def spawn_forwarder(source, dest, name):
t = threading.Thread(target=forward, args=(source, dest, name))
t.daemon = True
t.start()
class SocksHandler(StreamRequestHandler):
"""Highly feature incomplete SOCKS 5 implementation"""
def close_request(self):
self.server.close_request(self.request)
def read(self, n):
data = ''
while len(data) < n:
extra = self.rfile.read(n)
if extra == '':
raise Exception('Connection closed')
data += extra
return data
def handle(self):
# IMRPOVEMENT: Report who requests are from in logging
# IMPROVEMENT: Timeout on client
debug('Connection - authenticating')
version = self.read(1)
if allow_v4 and version == '\x04':
cmd = self.read(1)
if cmd != CONNECT and cmd != UDP_ASSOCIATE:
error('Only supports connect and udp-associate method not (%r) closing' % cmd)
self.close_request()
return
raw_dest_port = self.read(2)
dest_port, = unpack('>H', raw_dest_port)
raw_dest_address = self.read(4)
dest_address = '.'.join(map(str, unpack('>4B', raw_dest_address)))
user_id = ''
c = self.read(1)
while c != '\0':
user_id += c
c = self.read(1)
outbound_sock = socket.socket(socket.AF_INET)
out_address = socket.getaddrinfo(dest_address, dest_port)[0][4]
debug("Creating forwarder connection to %s:%d" % (out_address[0], out_address[1]))
outbound_sock.connect(out_address)
self.send_reply_v4(outbound_sock.getsockname())
spawn_forwarder(outbound_sock, self.request, 'destination')
forward(self.request, outbound_sock, 'client')
return
if version != '\x05':
error('Wrong version number (%r) closing...' % version)
self.close_request()
return
nmethods = ord(self.read(1))
method_list = self.read(nmethods)
global password
global username
if password is None and NOAUTH in method_list:
self.send_no_auth_method()
debug('Authenticated (no-auth)')
elif USERPASS in method_list:
self.send_user_pass_auth_method()
auth_version = self.read(1)
if auth_version != '\x01':
error('Wrong sub-negotiation version number (%r) closing...' % version)
self.close_request()
return
usr_len = ord(self.read(1))
usr_name = self.read(usr_len)
pwd_len = ord(self.read(1))
pwd = self.read(pwd_len)
if usr_name != username or pwd != password:
error('Invalid username or password')
self.close_request()
return
debug('Authenticated (user/password)')
self.send_authenticated()
else:
error('Server only supports NOAUTH and user/pass')
self.send_no_method()
return
# If we were authenticating it would go here
version, cmd, zero, address_type = self.read(4)
if version != '\x05':
error('Wrong version number (%r) closing...' % version)
self.close_request()
elif cmd != CONNECT and cmd != UDP_ASSOCIATE:
error('Only supports connect and udp-associate method not (%r) closing' % cmd)
self.close_request()
elif zero != '\x00':
error('Mangled request. Reserved field (%r) is not null' % zero)
self.close_request()
if address_type == IPV4:
raw_dest_address = self.read(4)
dest_address = '.'.join(map(str, unpack('>4B', raw_dest_address)))
elif address_type == IPV6:
raw_dest_address = self.read(16)
dest_address = ":".join([hex(x)[2:] for x in unpack('>8H', raw_dest_address)])
elif address_type == DOMAIN_NAME:
dns_length = ord(self.read(1))
dns_name = self.read(dns_length)
dest_address = dns_name
else:
error('Unknown addressing (%r)' % address_type)
self.close_request()
raw_dest_port = self.read(2)
dest_port, = unpack('>H', raw_dest_port)
if address_type == IPV6:
outbound_sock = socket.socket(socket.AF_INET6)
else:
outbound_sock = socket.socket(socket.AF_INET)
try:
out_address = socket.getaddrinfo(dest_address, dest_port)[0][4]
except Exception as e:
print(e)
return
if cmd == UDP_ASSOCIATE:
debug("no UDP support yet, closing")
return
debug("Creating forwarder connection to %s:%d" % (out_address[0], out_address[1]))
try:
outbound_sock.connect(out_address)
except Exception as e:
print(e)
return
if address_type == IPV6:
self.send_reply6(outbound_sock.getsockname())
else:
self.send_reply(outbound_sock.getsockname())
spawn_forwarder(outbound_sock, self.request, 'destination')
try:
forward(self.request, outbound_sock, 'client')
except Exception as e:
print(e)
def send_reply_v4(self, xxx_todo_changeme):
(bind_addr, bind_port) = xxx_todo_changeme
self.wfile.write('\0\x5a\0\0\0\0\0\0')
self.wfile.flush()
def send_reply(self, xxx_todo_changeme1):
(bind_addr, bind_port) = xxx_todo_changeme1
bind_tuple = tuple(map(int, bind_addr.split('.')))
full_address = bind_tuple + (bind_port,)
debug('Setting up forwarding port %r' % (full_address,))
msg = pack('>cccc4BH', VERSION, SUCCESS, '\x00', IPV4, *full_address)
self.wfile.write(msg)
def send_reply6(self, xxx_todo_changeme2):
(bind_addr, bind_port, unused1, unused2) = xxx_todo_changeme2
bind_tuple = tuple([int(x, 16) for x in bind_addr.split(':')])
full_address = bind_tuple + (bind_port,)
debug('Setting up forwarding port %r' % (full_address,))
msg = pack('>cccc8HH', VERSION, SUCCESS, '\x00', IPV6, *full_address)
self.wfile.write(msg)
def send_no_method(self):
self.wfile.write('\x05\xff')
self.close_request()
def send_no_auth_method(self):
self.wfile.write('\x05\x00')
self.wfile.flush()
def send_user_pass_auth_method(self):
self.wfile.write('\x05\x02')
self.wfile.flush()
def send_authenticated(self):
self.wfile.write('\x01\x00')
self.wfile.flush()
if __name__ == '__main__':
listen_port = 8002
i = 1
while i < len(sys.argv):
if sys.argv[i] == '--username':
username = sys.argv[i + 1]
i += 1
elif sys.argv[i] == '--password':
password = sys.argv[i + 1]
i += 1
elif sys.argv[i] == '--port':
listen_port = int(sys.argv[i + 1])
i += 1
elif sys.argv[i] == '--allow-v4':
allow_v4 = True
else:
if sys.argv[i] != '--help':
debug('unknown option "%s"' % sys.argv[i])
print('usage: socks.py [--username <user> --password <password>] [--port <listen-port>]')
sys.exit(1)
i += 1
debug('Listening on port %d...' % listen_port)
server = MyTCPServer(('localhost', listen_port), SocksHandler)
server.timeout = 190
while True:
server.handle_request()
|
queue_calibs.py
|
'''
This script is used to produce lists of CCDs or bricks.
This is useful for production purposes (building qdo queue, eg).
python legacypipe/queue-calibs.py | qdo load cal -
eg, DR3:
* Staging to $SCRATCH to images required to run the EDR region:
module switch legacysurvey/dr3
python legacypipe/queue-calibs.py --region edr --write-ccds edr-ccds.fits --calibs --touching --nper 100
for x in $(tablist edr-ccds.fits"[col image_filename]" | awk '{print $2}' | sort | uniq); do
rsync -LRarv $LEGACY_SURVEY_DIR/images/./$(echo $x | sed s/o.i/o??/g) /global/cscratch1/sd/desiproc/legacypipe-dir/images/;
done
* Running calibration preprocessing for EDR:
module switch legacysurvey/dr3-cori-scratch
python legacypipe/queue-calibs.py --region edr --calibs --touching --nper 100
qdo load cal jobs
# qdo launch cal 1 --cores_per_worker 1 --batchqueue shared --script "python legacypipe/run-calib.py --splinesky" --walltime 4:00:00 --keep_env --batchopts "-a 0-15"
qdo launch cal 1 --cores_per_worker 1 --batchqueue shared --script $(pwd)/cal.sh --walltime 4:00:00 --keep_env --batchopts "-a 0-15"
cal.sh:
cd /global/cscratch1/sd/desiproc/code/legacypipe/py
python legacypipe/run-calib.py --splinesky $*
* Running bricks for EDR:
python legacypipe/queue-calibs.py --region edr --brickq 0 > bricks
grep '^....[pm]...$' bricks | qdo load edr0 -
qdo launch edr0 16 --cores_per_worker 8 --batchqueue regular --walltime 12:00:00 --script ../bin/pipebrick.sh --keep_env
Brick stage 1:
python legacypipe/queue-calibs.py --region edr --brickq 1 > bricks1
grep '^....[pm]...$' bricks1 | awk '{print "/global/cscratch1/sd/desiproc/code/legacypipe/bin/pipebrick.sh",$1}' | qdo load jobs -
* Staging larger regions:
python legacypipe/queue-calibs.py --region northwest --write-ccds ccds-nw.fits --calibs --near --nper 100 --command --opt "--threads 8"
for x in $(tablist ccds-nw.fits"[col image_filename]" | awk '{print $2}' | sort | uniq); do rsync -LRarv /project/projectdirs/desiproc/dr3/images/./$(echo $x | sed s/o.i/o??/g) /global/cscratch1/sd/desiproc/dr3/images/; done
qdo load --priority -10 jobs jobs
'''
import sys
import os
import numpy as np
from astrometry.util.fits import fits_table
from astrometry.libkd.spherematch import match_radec
from legacypipe.survey import LegacySurveyData, wcs_for_brick, ccds_touching_wcs
def log(*s):
print(' '.join([str(ss) for ss in s]), file=sys.stderr)
def main(args):
"""Main program.
"""
import argparse
parser = argparse.ArgumentParser(description="This script is used to produce lists of CCDs or bricks, for production purposes (building qdo queue, eg).")
parser.add_argument('--calibs', action='store_true',
help='Output CCDs that need to be calibrated.')
parser.add_argument('--nper', type=int, default=None,
help='Batch N calibs per line')
parser.add_argument('--byexp', action='store_true', default=False,
help='Run one whole exposure per job (not one CCD per job)')
parser.add_argument('--forced', action='store_true',
help='Output forced-photometry commands')
parser.add_argument('--lsb', action='store_true',
help='Output Low-Surface-Brightness commands')
parser.add_argument('--stage', help='Stage image files to given directory')
parser.add_argument('--touching', action='store_true',
help='Cut to only CCDs touching selected bricks')
parser.add_argument('--near', action='store_true',
help='Quick cut to only CCDs near selected bricks')
parser.add_argument('--check-coadd', action='store_true',
help='Check which coadds actually need to run.')
parser.add_argument('--out', help='Output filename for calibs, default %(default)s',
default='jobs')
parser.add_argument('--command', action='store_true',
help='Write out full command-line to run calib')
parser.add_argument('--opt', help='With --command, extra options to add')
parser.add_argument('--maxra', type=float, help='Maximum RA to run')
parser.add_argument('--minra', type=float, help='Minimum RA to run')
parser.add_argument('--maxdec', type=float, help='Maximum Dec to run')
parser.add_argument('--mindec', type=float, help='Minimum Dec to run')
parser.add_argument('--region', help='Region to select')
parser.add_argument('--bricks', help='Set bricks.fits file to load')
parser.add_argument('--ccds', help='Set ccds.fits file to load')
parser.add_argument('--ignore_cuts', action='store_true',default=False,help='no photometric cuts')
parser.add_argument('--save_to_fits', action='store_true',default=False,help='save cut brick,ccd to fits table')
parser.add_argument('--name', action='store',default='dr3',help='save with this suffix, e.g. refers to ccds table')
parser.add_argument('--delete-sky', action='store_true',
help='Delete any existing sky calibration files')
parser.add_argument('--write-ccds', help='Write CCDs list as FITS table?')
parser.add_argument('--nccds', action='store_true', default=False, help='Prints number of CCDs per brick')
parser.add_argument('--bands', default='g,r,z', help='Set bands to keep: comma-separated list.')
opt = parser.parse_args(args)
want_ccds = (opt.calibs or opt.forced or opt.lsb)
want_bricks = not want_ccds
survey = LegacySurveyData()
if opt.bricks is not None:
B = fits_table(opt.bricks)
log('Read', len(B), 'from', opt.bricks)
else:
B = survey.get_bricks()
log('Bricks Dec range:', B.dec.min(), B.dec.max())
if opt.ccds is not None:
T = fits_table(opt.ccds)
log('Read', len(T), 'from', opt.ccds)
else:
T = survey.get_ccds()
log(len(T), 'CCDs')
T.index = np.arange(len(T))
if opt.ignore_cuts == False:
log('Applying CCD cuts...')
if 'ccd_cuts' in T.columns():
T.cut(T.ccd_cuts == 0)
log(len(T), 'CCDs survive cuts')
bands = opt.bands.split(',')
log('Filters:', np.unique(T.filter))
T.cut(np.flatnonzero(np.array([f in bands for f in T.filter])))
log('Cut to', len(T), 'CCDs in filters', bands)
log('CCDs Dec range:', T.dec.min(), T.dec.max())
# I,J,d,counts = match_radec(B.ra, B.dec, T.ra, T.dec, 0.2, nearest=True, count=True)
# plt.clf()
# plt.hist(counts, counts.max()+1)
# plt.savefig('bricks.png')
# B.cut(I[counts >= 9])
# plt.clf()
# plt.plot(B.ra, B.dec, 'b.')
# #plt.scatter(B.ra[I], B.dec[I], c=counts)
# plt.savefig('bricks2.png')
# DES Stripe82
#rlo,rhi = 350.,360.
# rlo,rhi = 300., 10.
# dlo,dhi = -6., 4.
# TINY bit
#rlo,rhi = 350.,351.1
#dlo,dhi = 0., 1.1
# EDR+
# 860 bricks
# ~10,000 CCDs
#rlo,rhi = 239,246
#dlo,dhi = 5, 13
# DR1
#rlo,rhi = 0, 360
# part 1
#dlo,dhi = 25, 40
# part 2
#dlo,dhi = 20,25
# part 3
#dlo,dhi = 15,20
# part 4
#dlo,dhi = 10,15
# part 5
#dlo,dhi = 5,10
# the rest
#dlo,dhi = -11, 5
#dlo,dhi = 15,25.5
dlo,dhi = -25, 40
rlo,rhi = 0, 360
# Arjun says 3x3 coverage area is roughly
# RA=240-252 DEC=6-12 (but not completely rectangular)
# COSMOS
#rlo,rhi = 148.9, 151.2
#dlo,dhi = 0.9, 3.5
# A nice well-behaved region (EDR2/3)
# rlo,rhi = 243.6, 244.6
# dlo,dhi = 8.1, 8.6
# 56 bricks, ~725 CCDs
#B.cut((B.ra > 240) * (B.ra < 242) * (B.dec > 5) * (B.dec < 7))
# 240 bricks, ~3000 CCDs
#B.cut((B.ra > 240) * (B.ra < 244) * (B.dec > 5) * (B.dec < 9))
# 535 bricks, ~7000 CCDs
#B.cut((B.ra > 240) * (B.ra < 245) * (B.dec > 5) * (B.dec < 12))
if opt.region in ['test1', 'test2', 'test3', 'test4']:
nm = dict(test1='2446p115', # weird stuff around bright star
test2='1183p292', # faint sources around bright galaxy
test3='3503p005', # DES
test4='1163p277', # Pollux
)[opt.region]
B.cut(np.flatnonzero(np.array([s == nm for s in B.brickname])))
log('Cut to', len(B), 'bricks')
log(B.ra, B.dec)
dlo,dhi = -90,90
rlo,rhi = 0, 360
elif opt.region == 'edr':
# EDR:
# 535 bricks, ~7000 CCDs
rlo,rhi = 240,245
dlo,dhi = 5, 12
elif opt.region == 'dr8-decam':
rlo,rhi = 0, 360
dlo,dhi = -70, 40
log('DR8-DECam region')
elif opt.region == 'edrplus':
rlo,rhi = 235,248
dlo,dhi = 5, 15
elif opt.region == 'edr-south':
rlo,rhi = 240,245
dlo,dhi = 5, 10
elif opt.region == 'cosmos1':
# 16 bricks in the core of the COSMOS field.
rlo,rhi = 149.75, 150.75
dlo,dhi = 1.6, 2.6
elif opt.region == 'pristine':
# Stream?
rlo,rhi = 240,250
dlo,dhi = 10,15
elif opt.region == 'des':
dlo, dhi = -6., 4.
rlo, rhi = 317., 7.
T.cut(np.flatnonzero(np.array(['CPDES82' in fn for fn in T.cpimage])))
log('Cut to', len(T), 'CCDs with "CPDES82" in filename')
elif opt.region == 'subdes':
rlo,rhi = 320., 360.
dlo,dhi = -1.25, 1.25
elif opt.region == 'northwest':
rlo,rhi = 240,360
dlo,dhi = 20,40
elif opt.region == 'north':
rlo,rhi = 120,240
dlo,dhi = 20,40
elif opt.region == 'northeast':
rlo,rhi = 0,120
dlo,dhi = 20,40
elif opt.region == 'southwest':
rlo,rhi = 240,360
dlo,dhi = -20,0
elif opt.region == 'south':
rlo,rhi = 120,240
dlo,dhi = -20,0
elif opt.region == 'southeast':
rlo,rhi = 0,120
dlo,dhi = -20,0
elif opt.region == 'southsoutheast':
rlo,rhi = 0,120
dlo,dhi = -20,-10
elif opt.region == 'midwest':
rlo,rhi = 240,360
dlo,dhi = 0,20
elif opt.region == 'middle':
rlo,rhi = 120,240
dlo,dhi = 0,20
elif opt.region == 'mideast':
rlo,rhi = 0,120
dlo,dhi = 0,20
elif opt.region == 'grz':
# Bricks with grz coverage.
# Be sure to use --bricks survey-bricks-in-dr1.fits
# which has_[grz] columns.
B.cut((B.has_g == 1) * (B.has_r == 1) * (B.has_z == 1))
log('Cut to', len(B), 'bricks with grz coverage')
elif opt.region == 'nogrz':
# Bricks without grz coverage.
# Be sure to use --bricks survey-bricks-in-dr1.fits
# which has_[grz] columns.
B.cut(np.logical_not((B.has_g == 1) * (B.has_r == 1) * (B.has_z == 1)))
log('Cut to', len(B), 'bricks withOUT grz coverage')
elif opt.region == 'deep2':
rlo,rhi = 250,260
dlo,dhi = 30,35
elif opt.region == 'deep2f2':
rlo,rhi = 251.4, 254.4
dlo,dhi = 34.6, 35.3
elif opt.region == 'deep2f3':
rlo,rhi = 351.25, 353.75
dlo,dhi = 0, 0.5
elif opt.region == 'deep3':
rlo,rhi = 214,216
dlo,dhi = 52.25,53.25
elif opt.region == 'virgo':
rlo,rhi = 185,190
dlo,dhi = 10, 15
elif opt.region == 'virgo2':
rlo,rhi = 182,192
dlo,dhi = 8, 18
elif opt.region == 'coma':
# van Dokkum et al Coma cluster ultra-diffuse galaxies: 3x3 field centered on Coma cluster
rc,dc = 195., 28.
dd = 1.5
cosdec = np.cos(np.deg2rad(dc))
rlo,rhi = rc - dd/cosdec, rc + dd/cosdec
dlo,dhi = dc - dd, dc + dd
elif opt.region == 'lsb':
rlo,rhi = 147.2, 147.8
dlo,dhi = -0.4, 0.4
elif opt.region == 'eboss-sgc':
# generous boundaries to make sure get all relevant images
# RA -45 to +45
# Dec -5 to +7
rlo,rhi = 310., 50.
dlo,dhi = -6., 6.
elif opt.region == 'eboss-ngc':
# generous boundaries to make sure get all relevant images
# NGC ELGs
# RA 115 to 175
# Dec 15 to 30
# rlo,rhi = 122., 177.
# dlo,dhi = 12., 32.
rlo,rhi = 126., 168.
dlo,dhi = 18., 33.
elif opt.region == 'mzls':
dlo,dhi = -10., 90. # -10: pull in Stripe 82 data too
elif opt.region == 'dr4-bootes':
# https://desi.lbl.gov/trac/wiki/DecamLegacy/DR4sched
#dlo,dhi = 34., 35.
#rlo,rhi = 209.5, 210.5
dlo,dhi = 33., 36.
rlo,rhi = 216.5, 219.5
elif opt.region == 'des-sn-x3':
#rlo,rhi = 36., 37.
#dlo,dhi = -5., -4.
rlo,rhi = 36., 36.5
dlo,dhi = -4.5, -4.
elif opt.region == 'ngc2632':
# open cluster
rlo,rhi = 129.0, 131.0
dlo,dhi = 19.0, 20.5
elif opt.region == 'dr8sky':
rlo,rhi = 35.0, 37.0
dlo,dhi = -3.0, -1.0
# ADM DR8 test regions, see, e.g.:
# https://desi.lbl.gov/trac/wiki/DecamLegacy/DR8#Testregions
elif opt.region == 'dr8-test-s82':
rlo, rhi = 0, 45
dlo, dhi = -1.25, 1.25
elif opt.region == 'dr8-test-hsc-sgc':
rlo, rhi = 30, 40
dlo, dhi = -6.5, -1.25
elif opt.region == 'dr8-test-hsc-ngc':
rlo, rhi = 177.5, 182.5
dlo, dhi = -1, 1
elif opt.region == 'dr8-test-edr':
rlo, rhi = 240, 245
dlo, dhi = 5, 12
elif opt.region == 'dr8-test-hsc-north':
rlo, rhi = 240, 250
dlo, dhi = 42, 45
elif opt.region == 'dr8-test-deep2-egs':
rlo, rhi = 213, 216.5
dlo, dhi = 52, 54
elif opt.region == 'dr8-test-overlap':
rlo, rhi = 132, 140.5
dlo, dhi = 31.5, 35
if opt.mindec is not None:
dlo = opt.mindec
if opt.maxdec is not None:
dhi = opt.maxdec
if opt.minra is not None:
rlo = opt.minra
if opt.maxra is not None:
rhi = opt.maxra
if rlo < rhi:
B.cut((B.ra >= rlo) * (B.ra <= rhi) *
(B.dec >= dlo) * (B.dec <= dhi))
else: # RA wrap
B.cut(np.logical_or(B.ra >= rlo, B.ra <= rhi) *
(B.dec >= dlo) * (B.dec <= dhi))
log(len(B), 'bricks in range; cut Dec range', B.dec.min(), B.dec.max())
#for name in B.get('brickname'):
# print(name)
#B.writeto('bricks-cut.fits')
bricksize = 0.25
# A bit more than 0.25-degree brick radius + Bok image radius ~ 0.57
search_radius = 1.05 * np.sqrt(2.) * (bricksize +
(0.455 * 4096 / 3600.))/2.
log(len(T), 'CCDs')
log(len(B), 'Bricks')
I,J,_ = match_radec(B.ra, B.dec, T.ra, T.dec, search_radius,
nearest=True)
B.cut(I)
log('Cut to', len(B), 'bricks near CCDs')
log('Bricks Dec range:', B.dec.min(), B.dec.max())
# plt.clf()
# plt.plot(B.ra, B.dec, 'b.')
# plt.title('DR3 bricks')
# plt.axis([360, 0, np.min(B.dec)-1, np.max(B.dec)+1])
# plt.savefig('bricks.png')
if opt.touching:
I,J,_ = match_radec(T.ra, T.dec, B.ra, B.dec, search_radius,
nearest=True)
# list the ones that will be cut
# drop = np.ones(len(T))
# drop[I] = False
# for i in np.flatnonzero(drop):
# from astrometry.util.starutil_numpy import degrees_between
# dists = degrees_between(B.ra, B.dec, T.ra[i], T.dec[i])
# mindist = min(dists)
# print('Dropping:', T.ra[i], T.dec[i], 'min dist', mindist, 'search_radius', search_radius)
T.cut(I)
log('Cut to', len(T), 'CCDs near bricks')
if opt.forced:
log('Writing forced-photometry commands to', opt.out)
f = open(opt.out,'w')
log('Total of', len(T), 'CCDs')
#T.cut(allI)
camexp = set([(c,e) for c,e in zip(T.camera, T.expnum)])
print(len(camexp), 'unique camera/exposure pairs')
for cam,exp in camexp:
#expstr = '%08i' % exp
#outfn = os.path.join('forced', cam, expstr[:5], 'forced-%s-%s.fits' % (cam, exp))
#f.write('%s %s all %s\n' % (cam, exp, outfn))
f.write('%s %s\n' % (cam, exp))
f.close()
log('Wrote', opt.out)
return 0
# sort by RA increasing
B.cut(np.argsort(B.ra))
if opt.save_to_fits:
assert(opt.touching)
# Write cut tables to file
for tab,typ in zip([B,T],['bricks','ccds']):
fn='%s-%s-cut.fits' % (typ,opt.region)
if os.path.exists(fn):
os.remove(fn)
tab.writeto(fn)
log('Wrote %s' % fn)
# Write text files listing ccd and filename names
# nm1,nm2= 'ccds-%s.txt'% opt.region,'filenames-%s.txt' % opt.region
# if os.path.exists(nm1):
# os.remove(nm1)
# if os.path.exists(nm2):
# os.remove(nm2)
# f1,f2=open(nm1,'w'),open(nm2,'w')
# fns= list(set(T.get('image_filename')))
# for fn in fns:
# f2.write('%s\n' % fn.strip())
# for ti in T:
# f1.write('%s\n' % ti.get('image_filename').strip())
# f1.close()
# f2.close()
# log('Wrote *-names.txt')
if opt.touching:
if want_bricks:
# Shortcut the list of bricks that are definitely touching CCDs --
# a brick-ccd pair within this radius must be touching.
closest_radius = 0.95 * (bricksize + 0.262 * 2048 / 3600.) / 2.
J1,_,_ = match_radec(B.ra, B.dec, T.ra, T.dec, closest_radius, nearest=True)
log(len(J1), 'of', len(B), 'bricks definitely touch CCDs')
tocheck = np.ones(len(B), bool)
tocheck[J1] = False
J2 = []
for j in np.flatnonzero(tocheck):
b = B[j]
wcs = wcs_for_brick(b)
I = ccds_touching_wcs(wcs, T)
log(len(I), 'CCDs for brick', b.brickname)
if len(I) == 0:
continue
J2.append(j)
J = np.hstack((J1, J2))
J = np.sort(J).astype(int)
B.cut(J)
log('Cut to', len(B), 'bricks touching CCDs')
else:
J = []
allI = set()
for j,b in enumerate(B):
wcs = wcs_for_brick(b)
I = ccds_touching_wcs(wcs, T)
log(len(I), 'CCDs for brick', b.brickname)
if len(I) == 0:
continue
allI.update(I)
J.append(j)
allI = list(allI)
allI.sort()
B.cut(np.array(J))
log('Cut to', len(B), 'bricks touching CCDs')
elif opt.near:
# Find CCDs near bricks
allI,_,_ = match_radec(T.ra, T.dec, B.ra, B.dec, search_radius, nearest=True)
# Find bricks near CCDs
J,_,_ = match_radec(B.ra, B.dec, T.ra, T.dec, search_radius, nearest=True)
B.cut(J)
log('Cut to', len(B), 'bricks near CCDs')
else:
allI = np.arange(len(T))
if opt.byexp:
_,eI = np.unique(T.expnum[allI], return_index=True)
allI = allI[eI]
print('Cut to', len(allI), 'expnums')
if opt.nccds:
from queue import Queue
from threading import Thread
log('Checking number of CCDs per brick')
def worker():
while True:
i = q.get()
if i is None:
break
b = B[i]
wcs = wcs_for_brick(b)
I = ccds_touching_wcs(wcs, T)
log(b.brickname, len(I))
q.task_done()
q = Queue()
num_threads = 24
threads = []
for i in range(num_threads):
t = Thread(target=worker)
t.start()
threads.append(t)
for i in range(len(B)):
q.put(i)
q.join()
for i in range(num_threads):
q.put(None)
for t in threads:
t.join()
if opt.write_ccds:
T[allI].writeto(opt.write_ccds)
log('Wrote', opt.write_ccds)
if want_bricks:
# Print the list of bricks and exit.
for b in B:
print(b.brickname)
if opt.save_to_fits:
B.writeto('bricks-%s-touching.fits' % opt.region)
if not want_ccds:
return 0
## Be careful here -- T has been cut; we want to write out T.index.
## 'allI' contains indices into T.
if opt.stage is not None:
cmd_pat = 'rsync -LRarv %s %s'
fns = set()
for iccd in allI:
im = survey.get_image_object(T[iccd])
fns.update([im.imgfn, im.wtfn, im.dqfn, im.psffn, im.merged_psffn,
im.merged_skyfn, im.skyfn])
for i,fn in enumerate(fns):
print('File', i+1, 'of', len(fns), ':', fn)
if not os.path.exists(fn):
print('No such file:', fn)
continue
base = survey.get_survey_dir()
if base.endswith('/'):
base = base[:-1]
rel = os.path.relpath(fn, base)
dest = os.path.join(opt.stage, rel)
print('Dest:', dest)
if os.path.exists(dest):
print('Exists:', dest)
continue
cmd = cmd_pat % ('%s/./%s' % (base, rel), opt.stage)
print(cmd)
rtn = os.system(cmd)
assert(rtn == 0)
return 0
if opt.lsb:
log('Writing LSB commands to', opt.out)
f = open(opt.out,'w')
log('Total of', len(allI), 'CCDs')
for j,i in enumerate(allI):
exp = T.expnum[i]
ext = T.ccdname[i].strip()
outfn = 'lsb/lsb-%s-%s.fits' % (exp, ext)
f.write('python legacyanalysis/lsb.py --expnum %i --extname %s --out %s -F -n > lsb/lsb-%s-%s.log 2>&1\n' % (exp, ext, outfn, exp, ext))
f.close()
log('Wrote', opt.out)
return 0
log('Writing calibs to', opt.out)
f = open(opt.out,'w')
log('Total of', len(allI), 'CCDs')
batch = []
def write_batch(f, batch, cmd):
if cmd is None:
cmd = ''
f.write(cmd + ' '.join(batch) + '\n')
cmd = None
if opt.command:
cmd = 'python legacypipe/run-calib.py '
if opt.opt is not None:
cmd += opt.opt + ' '
for j,i in enumerate(allI):
if opt.delete_sky:
log(j+1, 'of', len(allI))
im = survey.get_image_object(T[i])
if opt.delete_sky and os.path.exists(im.skyfn):
log(' deleting:', im.skyfn)
os.unlink(im.skyfn)
if opt.command:
if opt.byexp:
s = '--expnum %i' % (T.expnum[i])
else:
s = '%i-%s' % (T.expnum[i], T.ccdname[i])
prefix = 'python legacypipe/run-calib.py '
if opt.opt is not None:
prefix = prefix + opt.opt
#('python legacypipe/run-calib.py --expnum %i --ccdname %s' %
# (T.expnum[i], T.ccdname[i]))
else:
s = '%i' % T.index[i]
prefix = ''
if j < 10:
print('Index', T.index[i], 'expnum', T.expnum[i], 'ccdname', T.ccdname[i],
'filename', T.image_filename[i])
if not opt.nper:
f.write(prefix + s + '\n')
else:
batch.append(s)
if len(batch) >= opt.nper:
write_batch(f, batch, cmd)
batch = []
if len(batch):
write_batch(f, batch, cmd)
f.close()
log('Wrote', opt.out)
return 0
if __name__ == '__main__':
main(sys.argv[1:])
|
PrefixSumParallelNaive.py
|
#!/usr/bin/env python
'''
Naive parallel algorithm of prefix sum
http://people.cs.vt.edu/yongcao/teaching/cs5234/spring2013/slides/Lecture10.pdf
'''
import threading
import math
import TestFunction
# test_data = [2,6,2,3,5]
test_data = [3, 1, 7, 0, 4, 1, 6, 3]
'''
Generic sum function
'''
def accumulate(in_list, index, stride, out_list, out_index):
sum = in_list[index] + in_list[index - stride]
out_list[out_index] = sum
'''
What is prefix sum?
A = [2,6,2,3,5], then R = AllPrefixSum(A) = [0,2,8,10,13,18]
'''
def prefixSum(num_list):
# create new output holder of the same size
out = list(num_list)
current_list = list(num_list)
size = len(num_list)
iterations = int(math.log(size, 2)) # not sure if base 2
# print(str(iterations))
stride = 1
for iteration in range(0, iterations):
jobs = []
for i in range(stride, size):
thread = threading.Thread(target = accumulate(current_list, i, stride, out, i))
jobs.append(thread)
for job in jobs:
job.start()
for job in jobs:
job.join()
stride *= 2
current_list = list(out)
# print(out)
return out
result = prefixSum(test_data)
# print(result)
TestFunction.Test(prefixSum, 64)
TestFunction.Test(prefixSum, 128)
TestFunction.Test(prefixSum, 256)
TestFunction.Test(prefixSum, 256)
TestFunction.Test(prefixSum, 512)
TestFunction.Test(prefixSum, 1024)
TestFunction.Test(prefixSum, 2048)
TestFunction.Test(prefixSum, 4096)
TestFunction.Test(prefixSum, 8192)
|
estimator.py
|
"""
"""
import cPickle
import copy
from functools import partial
from multiprocessing import Process, Pipe
import time
import numpy as np
import hyperopt
import scipy.sparse
from . import components
# Constants for partial_fit
# The partial_fit method will not be run if there is less than
# timeout * timeout_buffer number of seconds left before timeout
timeout_buffer = 0.05
# The minimum number of iterations of the partial_fit method that must be run
# before early stopping can kick in is min_n_iters
min_n_iters = 7
# After best_loss_cutoff_n_iters iterations have occured, the training can be
# stopped early if the validation scores are far from the best scores
best_loss_cutoff_n_iters = 35
# Early stopping can occur when the best validation score of the earlier runs is
# greater than that of the later runs, tipping_pt_ratio determines the split
tipping_pt_ratio = 0.6
# Retraining will be done with all training data for retrain_fraction
# multiplied by the number of iterations used to train the original classifier
retrain_fraction = 1.2
class NonFiniteFeature(Exception):
"""
"""
def _cost_fn(argd, Xfit, yfit, Xval, yval, info, timeout,
_conn, best_loss=None):
try:
t_start = time.time()
if 'classifier' in argd:
classifier = argd['classifier']
preprocessings = argd['preprocessing']
else:
classifier = argd['model']['classifier']
preprocessings = argd['model']['preprocessing']
untrained_classifier = copy.deepcopy( classifier )
# -- N.B. modify argd['preprocessing'] in-place
for pp_algo in preprocessings:
info('Fitting', pp_algo, 'to X of shape', Xfit.shape)
pp_algo.fit(Xfit)
info('Transforming fit and Xval', Xfit.shape, Xval.shape)
Xfit = pp_algo.transform(Xfit)
Xval = pp_algo.transform(Xval)
# np.isfinite() does not work on sparse matrices
if not (scipy.sparse.issparse(Xfit) or scipy.sparse.issparse(Xval)):
if not (
np.all(np.isfinite(Xfit))
and np.all(np.isfinite(Xval))):
# -- jump to NonFiniteFeature handler below
raise NonFiniteFeature(pp_algo)
info('Training classifier', classifier,
'on X of dimension', Xfit.shape)
def should_stop(scores):
#TODO: possibly extend min_n_iters based on how close the current
# score is to the best score, up to some larger threshold
if len(scores) < min_n_iters:
return False
tipping_pt = int(tipping_pt_ratio * len(scores))
early_scores = scores[:tipping_pt]
late_scores = scores[tipping_pt:]
if max(early_scores) >= max(late_scores):
return True
#TODO: make this less confusing and possibly more accurate
if len(scores) > best_loss_cutoff_n_iters and \
max(scores) < 1 - best_loss and \
3 * ( max(late_scores) - max(early_scores) ) < \
1 - best_loss - max(late_scores):
info("stopping early due to best_loss cutoff criterion")
return True
return False
n_iters = 0 # Keep track of the number of training iterations
best_classifier = None
if hasattr( classifier, "partial_fit" ):
if timeout is not None:
timeout_tolerance = timeout * timeout_buffer
rng = np.random.RandomState(6665)
train_idxs = rng.permutation(Xfit.shape[0])
validation_scores = []
while timeout is not None and \
time.time() - t_start < timeout - timeout_tolerance:
n_iters += 1
rng.shuffle(train_idxs)
classifier.partial_fit(Xfit[train_idxs], yfit[train_idxs],
classes=np.unique( yfit ))
validation_scores.append(classifier.score(Xval, yval))
if max(validation_scores) == validation_scores[-1]:
best_classifier = copy.deepcopy(classifier)
if should_stop(validation_scores):
break
info('VSCORE', validation_scores[-1])
classifier = best_classifier
else:
classifier.fit( Xfit, yfit )
if classifier is None:
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': 'Not enough time to train anything',
'duration': t_done - t_start,
}
rtype = 'return'
else:
info('Scoring on Xval of shape', Xval.shape)
loss = 1.0 - classifier.score(Xval,yval)
# -- squared standard error of mean
lossvar = (loss * (1 - loss)) / max(1, len(yval) - 1)
info('OK trial with accuracy %.1f +- %.1f' % (
100 * (1.0 - loss),
100 * np.sqrt(lossvar)))
t_done = time.time()
rval = {
'loss': loss,
'loss_variance': lossvar,
'classifier': untrained_classifier,
'preprocs': preprocessings,
'status': hyperopt.STATUS_OK,
'duration': t_done - t_start,
'iterations': n_iters,
}
rtype = 'return'
except (NonFiniteFeature,), exc:
print 'Failing trial due to NaN in', str(exc)
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': str(exc),
'duration': t_done - t_start,
}
rtype = 'return'
except (ValueError,), exc:
if ('k must be less than or equal'
' to the number of training points') in str(exc):
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': str(exc),
'duration': t_done - t_start,
}
rtype = 'return'
else:
rval = exc
rtype = 'raise'
except (AttributeError,), exc:
print 'Failing due to k_means_ weirdness'
if "'NoneType' object has no attribute 'copy'" in str(exc):
# -- sklearn/cluster/k_means_.py line 270 raises this sometimes
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': str(exc),
'duration': t_done - t_start,
}
rtype = 'return'
else:
rval = exc
rtype = 'raise'
except Exception, exc:
rval = exc
rtype = 'raise'
# -- return the result to calling process
_conn.send((rtype, rval))
class hyperopt_estimator(object):
def __init__(self,
preprocessing=None,
classifier=None,
space=None,
algo=None,
max_evals=100,
verbose=0,
trial_timeout=None,
fit_increment=1,
fit_increment_dump_filename=None,
seed=None,
):
"""
Parameters
----------
preprocessing: pyll.Apply node
This should evaluate to a list of sklearn-style preprocessing
modules (may include hyperparameters).
classifier: pyll.Apply node
This should evaluates to sklearn-style classifier (may include
hyperparameters).
algo: hyperopt suggest algo (e.g. rand.suggest)
max_evals: int
Fit() will evaluate up to this-many configurations. Does not apply
to fit_iter, which continues to search indefinitely.
trial_timeout: float (seconds), or None for no timeout
Kill trial evaluations after this many seconds.
fit_increment: int
Every this-many trials will be a synchronization barrier for
ongoing trials, and the hyperopt Trials object may be
check-pointed. (Currently evaluations are done serially, but
that might easily change in future to allow e.g. MongoTrials)
fit_increment_dump_filename : str or None
Periodically dump self.trials to this file (via cPickle) during
fit() Saves after every `fit_increment` trial evaluations.
"""
self.max_evals = max_evals
self.verbose = verbose
self.trial_timeout = trial_timeout
self.fit_increment = fit_increment
self.fit_increment_dump_filename = fit_increment_dump_filename
if space is None:
if classifier is None:
classifier = components.any_classifier('classifier')
if preprocessing is None:
preprocessing = components.any_preprocessing('preprocessing')
self.space = hyperopt.pyll.as_apply({
'classifier': classifier,
'preprocessing': preprocessing,
})
else:
assert classifier is None
assert preprocessing is None
self.space = hyperopt.pyll.as_apply(space)
if algo is None:
self.algo=hyperopt.rand.suggest
else:
self.algo = algo
if seed is not None:
self.rstate = np.random.RandomState(seed)
else:
self.rstate = np.random.RandomState()
def info(self, *args):
if self.verbose:
print ' '.join(map(str, args))
def fit_iter(self, X, y, weights=None, increment=None):
"""Generator of Trials after ever-increasing numbers of evaluations
"""
assert weights is None
increment = self.fit_increment if increment is None else increment
# len does not work on sparse matrices, so using shape[0] instead
# shape[0] does not work on lists, so using len() for those
if scipy.sparse.issparse(X):
data_length = X.shape[0]
else:
data_length = len(X)
if type(X) is list:
X = np.array(X)
if type(y) is list:
y = np.array(y)
p = np.random.RandomState(123).permutation( data_length )
n_fit = int(.8 * data_length)
Xfit = X[p[:n_fit]]
yfit = y[p[:n_fit]]
Xval = X[p[n_fit:]]
yval = y[p[n_fit:]]
self.trials = hyperopt.Trials()
self._best_loss = float('inf')
fn=partial(_cost_fn,
Xfit=Xfit, yfit=yfit,
Xval=Xval, yval=yval,
info=self.info,
timeout=self.trial_timeout)
self._best_loss = float('inf')
def fn_with_timeout(*args, **kwargs):
conn1, conn2 = Pipe()
kwargs['_conn'] = conn2
th = Process(target=partial(fn, best_loss=self._best_loss),
args=args, kwargs=kwargs)
th.start()
if conn1.poll(self.trial_timeout):
fn_rval = conn1.recv()
th.join()
else:
self.info('TERMINATING DUE TO TIMEOUT')
th.terminate()
th.join()
fn_rval = 'return', {
'status': hyperopt.STATUS_FAIL,
'failure': 'TimeOut'
}
assert fn_rval[0] in ('raise', 'return')
if fn_rval[0] == 'raise':
raise fn_rval[1]
# -- remove potentially large objects from the rval
# so that the Trials() object below stays small
# We can recompute them if necessary, and it's usually
# not necessary at all.
if fn_rval[1]['status'] == hyperopt.STATUS_OK:
fn_loss = float(fn_rval[1].get('loss'))
fn_preprocs = fn_rval[1].pop('preprocs')
fn_classif = fn_rval[1].pop('classifier')
fn_iters = fn_rval[1].pop('iterations')
if fn_loss < self._best_loss:
self._best_preprocs = fn_preprocs
self._best_classif = fn_classif
self._best_loss = fn_loss
self._best_iters = fn_iters
return fn_rval[1]
while True:
new_increment = yield self.trials
if new_increment is not None:
increment = new_increment
hyperopt.fmin(fn_with_timeout,
space=self.space,
algo=self.algo,
trials=self.trials,
max_evals=len(self.trials.trials) + increment,
rstate=self.rstate,
# -- let exceptions crash the program,
# so we notice them.
catch_eval_exceptions=False,
return_argmin=False, # -- in case no success so far
)
def retrain_best_model_on_full_data(self, X, y, weights=None):
for pp_algo in self._best_preprocs:
pp_algo.fit(X)
X = pp_algo.transform(X * 1) # -- * 1 avoids read-only copy bug
if hasattr(self._best_classif, 'partial_fit'):
rng = np.random.RandomState(6665)
train_idxs = rng.permutation(X.shape[0])
for i in xrange(int(self._best_iters * retrain_fraction)):
rng.shuffle(train_idxs)
self._best_classif.partial_fit(X[train_idxs], y[train_idxs],
classes=np.unique(y))
else:
self._best_classif.fit(X,y)
def fit(self, X, y, weights=None):
"""
Search the space of classifiers and preprocessing steps for a good
predictive model of y <- X. Store the best model for predictions.
"""
filename = self.fit_increment_dump_filename
fit_iter = self.fit_iter(X, y,
weights=weights,
increment=self.fit_increment)
fit_iter.next()
while len(self.trials.trials) < self.max_evals:
increment = min(self.fit_increment,
self.max_evals - len(self.trials.trials))
fit_iter.send(increment)
if filename is not None:
with open(filename, 'wb') as dump_file:
self.info('---> dumping trials to', filename)
cPickle.dump(self.trials, dump_file)
self.retrain_best_model_on_full_data(X, y, weights)
def predict(self, X):
"""
Use the best model found by previous fit() to make a prediction.
"""
# -- copy because otherwise np.utils.check_arrays sometimes does not
# produce a read-write view from read-only memory
if scipy.sparse.issparse(X):
X = scipy.sparse.csr_matrix(X)
else:
X = np.array(X)
for pp in self._best_preprocs:
self.info("Transforming X of shape", X.shape)
X = pp.transform(X)
self.info("Predicting X of shape", X.shape)
return self._best_classif.predict(X)
def score( self, X, y ):
"""
Return the accuracy of the classifier on a given set of data
"""
# -- copy because otherwise np.utils.check_arrays sometimes does not
# produce a read-write view from read-only memory
X = np.array(X)
for pp in self._best_preprocs:
self.info("Transforming X of shape", X.shape)
X = pp.transform(X)
self.info("Classifying X of shape", X.shape)
return self._best_classif.score(X, y)
def best_model( self ):
"""
Returns the best model found by the previous fit()
"""
return {'classifier': self._best_classif,
'preprocs': self._best_preprocs}
|
process_manager.py
|
"""Tools for managing subprocesses."""
from enum import Enum, auto as enum_auto
import subprocess
from typing import NamedTuple, Optional
from threading import Thread
import os
import signal
class ProcessExitResult(Enum):
"""Result of the process termination."""
# Process exited successfully.
Ok = enum_auto()
# Process did not exit successfully and was killed.
Killed = enum_auto()
class ProcessOutput(NamedTuple):
"""Outputs collected during process execution."""
exit_result: ProcessExitResult
exit_code: int
stdout: str
stderr: str
class ProcessManager:
"""ProcessManager is an entity capable of running a process
in a separate thread, joining it and collecting outputs."""
def __init__(self, command: str):
self._thread_handle = Thread(target=self._start_subprocess)
self._command = command
self._process: Optional[subprocess.Popen] = None
self._killed = False
self._output: Optional[ProcessOutput] = None
def _start_subprocess(self) -> None:
# We specify "shell=True" to be able to safely kill the process if we'll have to.
# With this argument process will start in separate shell, not related to the shell
# in which script is executed.
self._process = subprocess.Popen(
self._command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid
)
exit_code = self._process.wait()
stdout, stderr = map(lambda x: str(x, "utf-8"), self._process.communicate())
exit_result = ProcessExitResult.Ok if not self._killed else ProcessExitResult.Killed
self._output = ProcessOutput(exit_result, exit_code, stdout, stderr)
def _kill_process(self) -> None:
assert self._process is not None
self._killed = True
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
def run_sync(self) -> ProcessOutput:
"""Runs the process in the current thread, blocking until it exits."""
self._start_subprocess()
assert self._output is not None
return self._output
def start(self) -> None:
"""Launches the shell command in the separate thread."""
self._thread_handle.setDaemon(True)
self._thread_handle.start()
def join_process(self, timeout: float, kill_on_timeout: bool = True) -> ProcessOutput:
"""Tries to wait until process is terminated, kills it otherwise."""
self._thread_handle.join(timeout=timeout)
if self._thread_handle.is_alive() and kill_on_timeout:
# Process didn't stop, kill it.
self._kill_process()
# After process is killed, we can wait thread to stop finally.
self._thread_handle.join()
assert self._output is not None
return self._output
|
map_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import threading
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
def _make_coordinated_sloppy_dataset(num_elements, num_parallel_calls):
"""Produces a dataset iterator and events to control the order of elements.
Args:
num_elements: the number of input elements
num_parallel_calls: the degree of map parallelism
Returns:
A dataset iterator (represented as `get_next` op) and events that can be
used to control the order of output elements.
"""
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
coordination_events = {i: threading.Event() for i in range(num_elements)}
def map_py_fn(x):
coordination_events[x].wait()
coordination_events[x].clear()
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
options = dataset_ops.Options()
options.experimental_deterministic = False
dataset = dataset_ops.Dataset.range(num_elements).map(
map_fn, num_parallel_calls).with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
return next_element, coordination_events
class MapDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_parallel_calls,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn, num_parallel_calls=num_parallel_calls)
.prefetch(output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_parallel_calls = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(
components, count, num_parallel_calls, output_buffer_size)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
def do_test(num_parallel_calls_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_parallel_calls_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureIterator(self):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return dataset_ops.Dataset.range(10).map(_map_fn)
def _build_graph():
captured_iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10))
ds = _build_ds(captured_iterator)
iterator = ds.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
return captured_iterator.initializer, init_op, get_next
with ops.Graph().as_default() as g:
captured_init_op, init_op, get_next = _build_graph()
with self.session(graph=g) as sess:
sess.run(captured_init_op)
sess.run(init_op)
for i in range(10):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = dataset_ops.make_initializable_iterator(
input_sentences
.map(lambda x: string_ops.string_split([x]).values).map(table.lookup))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(table.initializer)
sess.run(init_op)
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: (queue.dequeue(), queue_2.dequeue())))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for i in range(100):
self.assertEqual(sorted([elements[i * 2], elements[i * 2 + 1]]),
sorted(sess.run(get_next)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.NotFoundError):
sess.run(get_next)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testStatefulMapKeepsStateAcrossIterators(self):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11))
.repeat(1000)
.batch(10))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
random_values = sess.run(get_next)
# Assert that one of the next 99 batches yielded by the iterator is
# different from the first.
i = 0
while i < 99:
if np.any(random_values != sess.run(get_next)):
break
i += 1
self.assertLess(i, 99)
def testStatefulOperationInShortCircuit(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
def increment_fn(x):
counter_var.assign_add(1)
return x
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(increment_fn))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testMapDict(self):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"]))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i**2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = dataset_ops.make_one_shot_iterator(dataset_tuple).get_next()
next_namedtuple = dataset_ops.make_one_shot_iterator(
dataset_namedtuple).get_next()
# make sure both datasets contain the same data
with self.cached_session() as sess:
for i in range(count):
tuple_, namedtuple_ = sess.run([next_tuple, next_namedtuple])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_namedtuple)
def testUseStepContainerInMap(self):
row = np.arange(6)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(row)
.map(lambda elems: functional_ops.map_fn(lambda x: x * x, elems)))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
self.assertAllEqual(row**2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaseAndCondInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = {
math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):
defaults_two,
}
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensor_slices(row).map(
lambda x: control_map_fn(x, num)))
init_op = iterator.initializer
get_next = iterator.get_next()
return init_op, get_next
with self.cached_session() as sess:
row = np.arange(6)
for num in [2, 3, 4]:
init_op, get_next = build_dataset(row, num)
sess.run(init_op)
for i in range(6):
self.assertEqual(
(i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,
sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaseInWhileInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
pred_fn_pairs = {
math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):
divide,
}
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
# pylint: disable=g-long-lambda
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(row).map(
lambda elems: functional_ops.map_fn(
lambda x: control_map_fn(x, num), elems)))
init_op = iterator.initializer
get_next = iterator.get_next()
return init_op, get_next
with self.cached_session() as sess:
row = np.arange(6)
for num in [2, 3, 4]:
init_op, get_next = build_dataset(row, num)
sess.run(init_op)
self.assertAllEqual(
[x // 2 if (num == 2 or num == 3) else x * 2 for x in row],
sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaseAndCondInWhileInMap(self):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = {
math_ops.logical_or(math_ops.equal(y, 2), math_ops.equal(y, 3)):
defaults_two,
}
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
row = np.arange(6)
num = 2
# pylint: disable=g-long-lambda
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(row).map(
lambda elems: functional_ops.map_fn(
lambda x: control_map_fn(x, num), elems)))
# pylint: enable=g-long-lambda
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
self.assertAllEqual([(x // 2 if x % 2 else x * 2) if
(num == 2 or num == 3) else x * 2 for x in row],
sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
buffer_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(100)
.map(_map_fn)
.prefetch(buffer_size_placeholder))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
# Simple test that prefetch yields the expected values in the
# expected order.
for buffer_size in [1, 10, 100, 1000]:
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
for buffer_size in range(1, set_event_during_invocation):
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, sess.run(get_next))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReturnList(self):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10)
.map(lambda x: [x, constant_op.constant(37.0)]))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(_map_fn))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(_sparse))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertIsInstance(actual, sparse_tensor.SparseTensorValue)
self.assertSparseValuesEqual(actual, _sparse(i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(_sparse).map(_check))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertIsInstance(actual, sparse_tensor.SparseTensorValue)
self.assertSparseValuesEqual(actual, _check(_sparse(i)).eval())
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapOutOfRangeError(self):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(105)
.map(lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(100):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConstantOutput(self):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).map(lambda x: [x, "hello", 10]))
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, b"hello", 10), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testWarnOnLookupTable(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer([], []), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating lookup tables inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def testNestedDatasetMap(self):
# TODO(b/110122868): When iterators can yield a `tf.data.Dataset`, remove
# the `get_single_element()` call.
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]).map(
dataset_ops.Dataset.from_tensor_slices).map(
lambda ds: ds.batch(3)).flat_map(lambda x: x)
self.assertDatasetProduces(dataset, [[1.0, 2.0, 3.0]])
def testReturnValueError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\): None."):
_ = dataset.map(lambda x: None)
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type
# attr.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
iterator = dataset_ops.make_initializable_iterator(dataset)
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "BrokenConst"):
sess.run(iterator.initializer)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Map", lambda dataset, func:
dataset_ops.MapDataset(dataset, func, use_inter_op_parallelism=False)),
("ParallelMap", lambda dataset, func:
dataset_ops.ParallelMapDataset(dataset, func, num_parallel_calls=1,
use_inter_op_parallelism=False)),
)
def testNoInterOpParallelism(self, make_dataset_fn):
dataset = dataset_ops.Dataset.from_tensors(0)
def _get_tid():
return np.int64(threading.current_thread().ident)
def _map_fn(_):
tids = []
for _ in range(10):
tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
return tids
dataset = make_dataset_fn(dataset, _map_fn)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
with self.cached_session() as sess:
tids = sess.run(get_next)
self.assertTrue(all(tids[0] == tid for tid in tids))
# pylint: enable=g-long-lambda
@parameterized.named_parameters(
("SequentialIdentity", None, lambda x: x, None),
("SequentialReplicate", None, lambda x: (x, x), None),
("SequentialSwap", (None, None), lambda x, y: (y, x), None),
("SequentialProject", (None, None), lambda x, y: x, None),
("ParallelIdentity", None, lambda x: x, 10),
("ParallelReplicate", None, lambda x: (x, x), 10),
("ParallelSwap", (None, None), lambda x, y: (y, x), 10),
("ParallelProject", (None, None), lambda x, y: x, 10),
)
def testShortCircuit(self, structure, map_fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat().map(
map_fn, num_parallel_calls=num_parallel_calls)
get_next = dataset_ops.make_one_shot_iterator(dataset).get_next()
with self.cached_session() as sess:
if isinstance(structure, tuple):
expected = map_fn(*sess.run(self.structuredElement(structure)))
else:
expected = map_fn(sess.run(self.structuredElement(structure)))
self.assertEqual(expected, sess.run(get_next))
@parameterized.named_parameters(
("Sequential", None),
("Parallel", 10),
)
def testShortCircuitCapturedInput(self, num_parallel_calls):
captured_t = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self.structuredDataset(None).repeat().map(
lambda x: captured_t, num_parallel_calls=num_parallel_calls)
iterator = dataset_ops.make_initializable_iterator(dataset)
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer, feed_dict={captured_t: 42})
self.assertEqual(42, sess.run(get_next))
@parameterized.named_parameters(
("1", 1, 1),
("2", 10, 1),
("3", 10, 10),
("4", 100, 1),
("5", 100, 10),
("6", 100, 100),
)
def testSloppyInterleaveInOrder(self, num_elements, num_parallel_calls):
get_next, coordination_events = _make_coordinated_sloppy_dataset(
num_elements, num_parallel_calls)
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=num_parallel_calls + 1,
use_per_session_threads=True)
with self.cached_session(config=config) as sess:
for i in range(num_elements):
coordination_events[i].set()
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@parameterized.named_parameters(
("1", 10, 10),
("2", 100, 10),
("3", 100, 100),
)
def testSloppyInterleaveOutOfOrder(self, num_elements, num_parallel_calls):
get_next, coordination_events = _make_coordinated_sloppy_dataset(
num_elements, num_parallel_calls)
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=num_parallel_calls + 1,
use_per_session_threads=True)
with self.cached_session(config=config) as sess:
elements = [x for x in range(num_elements)]
for i in [1, 4, 7]:
elements[i], elements[i + 1] = elements[i + 1], elements[i]
for element in elements:
coordination_events[element].set()
self.assertEqual(element * element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
getters.py
|
"""Store classes that retrieve data."""
import logging
import sys
import webbrowser # lazy loaded
import textwrap # lazy loaded
from pathlib import Path
from threading import RLock, Thread
from time import sleep
from typing import TYPE_CHECKING, Optional, Type, Union
from wiki_music.constants import (API_KEY_FILE, API_KEY_MESSAGE,
GOOGLE_API_URL, NLTK_DOWNLOAD_MESSAGE)
from .parser_utils import ThreadPool
from .sync import Action, IniSettings
from .utils import MultiLog, limited_input
log = logging.getLogger(__name__)
if TYPE_CHECKING:
import nltk
Nltk = Type[nltk]
__all__ = ["GoogleApiKey", "NLTK"]
class GoogleApiKey:
"""Class that reads and stores google API key.
If the key is not found, show prompt to download.
"""
_log: MultiLog = MultiLog(log)
_api_key: Optional[str] = None
@classmethod
def value(cls, GUI: bool) -> Optional[str]:
"""Reads google api key needed by lyricsfinder from file.
Returns
-------
Optional[str]
google API key
"""
if not cls._api_key:
# load google api key for lyrics search
try:
cls._api_key = API_KEY_FILE.read_text("r").strip()
except Exception:
cls._log.debug("api key not present in file")
if IniSettings.read("api_key_dont_bother", False):
cls._log.debug("will try to obtain api key from internet")
cls._api_key = cls.get(GUI)
else:
cls._log.debug("don't bother with api key in settings")
cls._api_key = None
return cls._api_key
@classmethod
def get(cls, GUI: bool, in_thread: bool = True) -> Optional[str]:
"""Prompt user to input google API key.
Asks user through GUI or CLI if he wants to get Google API key. Three
options are available: yes, no and don't bother me again.
Parameters
----------
GUI: bool
if we are running in GUI or CLI mode
in_thread: bool
when running from GUI create a separate thread,
so GUI is not halted
Returns
-------
Optional[str]
key in string format or none if key was not retrieved
Note
----
If the method is called with in_thread=True then a new thread is
spawned and download runs in it. This is for use in GUI so the main
thread is not halted.
"""
if in_thread:
cls._log.debug("api key download started from GUI, "
"spawning new background thread")
Thread(target=cls.get, args=(GUI, False),
name="API-key-getter", daemon=True).start()
return None
# ask user if he wants to get the google API key
cls._log.debug("show api key prompt")
if GUI:
a = Action("api_key", API_KEY_MESSAGE)
inpt = a.response
else:
print(textwrap.fill(API_KEY_MESSAGE, width=70))
inpt = limited_input(dont_bother=True)
if inpt == "d":
cls._log.debug("answer is don't bother")
IniSettings.write("api_key_dont_bother", True)
return None
elif inpt:
cls._log.debug("opening browser page")
# open page in browser
webbrowser.open_new_tab(GOOGLE_API_URL)
cls._log.debug("waiting for api key input")
if GUI:
api_key = Action("load_api_key").response
else:
# wait for key input
api_key = str(input("Paste the key here: ")).strip()
cls._log.debug("saving api key file to file")
# delete don't bother setting
IniSettings.delete("api_key_dont_bother")
# write key to file
API_KEY_FILE.write_text(api_key)
return api_key
else:
return None
class _NltkMeta(type):
"""Metaclass which defines thread safe nltk property for NLTK class.
See Also
--------
:class:`NLTK`
References
----------
https://stackoverflow.com/questions/128573/using-property-on-classmethods
"""
_lock: RLock = RLock()
_nltk: "Nltk"
@property
def nltk(cls) -> "Nltk":
"""Thread safe property which holds reference to nltk lib.
:type: nltk
"""
with cls._lock:
return cls._nltk
class NLTK(metaclass=_NltkMeta):
"""A thread safe nltk importer. Checks if nltk data is downloaded.
Will make other threads wait if they want to access nltk
until it is imported. If the data is not downloaded it will as to download
it.
Attributes
----------
nltk: nltk
thread safe attribute is provided by metaclass
"""
_import_running: bool = False
_GUI: bool = False
_multi_threaded: bool = True
_log: MultiLog = MultiLog(log)
@classmethod
def run_import(cls, GUI: bool = False, delay: float = 1,
multi_threaded_download: bool = True):
"""Import nltk in separate thread and assign it to class attribute.
Parameters
----------
GUI: bool
tells if we are running in GUI mode
delay: float
delays the start of import by specified amount of seconds
multi_threaded_download: bool
if NLTK data needs to be dowloaded, this switch controls, whether
it will be in parallel
"""
def imp(delay: float):
if delay:
sleep(delay)
with cls._lock:
cls._log.debug("import nltk")
try:
import nltk
cls._nltk = nltk
except ImportError as e:
cls._log.debug(f"failed to import nltk: {e}")
else:
cls._log.debug("import nltk done, checking data...")
cls._check_nltk_data()
cls._GUI = GUI
cls._multi_threaded = multi_threaded_download
if not cls._import_running:
cls._import_running = True
# load NLTK in separate thread
Thread(target=imp, args=(delay, ), name="ImportNLTK").start()
else:
cls._log.debug("nltk import already running")
@classmethod
def _check_nltk_data(cls):
# check if user rejected download previously
if IniSettings.read("nltk_dont_bother", False):
cls._log.debug("do not bother with nltk data")
return
# try to read package defined path
path = IniSettings.read("nltk_data_path", "")
if path:
cls.nltk.data.path.append(path)
# check if any of the paths exists
for p in cls._nltk.data.path:
if Path(p).exists():
nltk_downloaded = True
cls._log.debug("NLTK data present")
break
else:
nltk_downloaded = False
# if none exists ask user for download
if not nltk_downloaded:
if cls._GUI:
a = Action("nltk_data", NLTK_DOWNLOAD_MESSAGE)
inpt = a.response
else:
print(textwrap.fill(NLTK_DOWNLOAD_MESSAGE, width=70))
inpt = limited_input(dont_bother=True)
if inpt == "d":
IniSettings.write("nltk_dont_bother", True)
elif inpt:
cls.download_data()
else:
pass
@classmethod
def download_data(cls, in_thread: bool = False):
"""Get download path from GUI or CLI, then download data there.
Parameters
----------
in_thread: bool
when running from GUI create a separate thread,
so GUI is not halted
Note
----
If the method is called with in_thread=True then a new thread is
spawned and download runs in it. This is for use in GUI so the main
thread is not halted.
"""
if in_thread:
Thread(target=cls.download_data, args=(False,),
name="NLTK-downloader", daemon=True).start()
return
NLTK_DATA = cls._get_default_path()
# get nltk path from CLI or GUI
if cls._GUI:
a = Action("download_nltk_data", str(NLTK_DATA))
NLTK_DATA = Path(a.response)
else:
while True:
inpt = str(input(f"({NLTK_DATA}[ENTER]): ")).strip()
if not inpt and NLTK_DATA:
break
else:
NLTK_DATA = Path(inpt)
# check if the path is valid
try:
NLTK_DATA.mkdir(parents=True, exist_ok=True)
except FileExistsError:
cls._log.warning("Invalid path, a file with this name already "
"exists.")
cls.download_data()
except PermissionError:
cls._log.warning("Invalid path, cannot write in this directory.")
cls.download_data()
# append path to defaults and save to settings so app may find it
cls._nltk.data.path.append(NLTK_DATA)
IniSettings.write("nltk_data_path", str(NLTK_DATA))
# delete don't bother setting
IniSettings.delete("nltk_dont_bother")
# download data
cls._log.info("Downloading nltk data")
datas = ("words", "stopwords", "maxent_ne_chunker",
"averaged_perceptron_tagger", "punkt")
t = ThreadPool(cls._nltk.download, [(d, NLTK_DATA) for d in datas])
if cls._multi_threaded:
t.run()
else:
t.run_serial()
# clear unnecessary files
cls._download_postprocess(NLTK_DATA)
@classmethod
def _get_default_path(cls) -> Path:
"""Get platform specific nltk default data path.
Raises
------
Warn
if running on unsupprted platform
Returns
-------
Path
default nltk data path for current platform
"""
if sys.platform.startswith("win32"):
return Path("C:/nltk_data")
elif sys.platform.startswith("linux"):
return Path("/usr/share/nltk_data")
elif sys.platform.startswith("darwin"):
return Path("/usr/local/share/nltk_data")
else:
msg = "Usupported platform! you must specify path manually."
cls._log.warning(msg)
return ""
@classmethod
def _download_postprocess(cls, path: Path):
cls._log.info("Deleting unnecessary files...")
for p in path.rglob("*"):
if p.name.endswith((".zip", ".pickle", "README")):
# avoid deletinng some files
if ("english" not in p.name and
"_perceptron_tagger.p" not in p.name): # noqa E129
p.unlink()
cls._log.info("Done")
|
EfficientDetFinetuningModel.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright 2020-2021 antillia.com Toshiyuki Arai
"""The main training script."""
#2021/09/01 Merged with the google/automl/efficientdet.
#2021/09/20 Fixed ckpt method in TrainConfigParser.py
#This is based on the google/automl/efficientdet/main.py
import multiprocessing
import os
# <added date="2021/0810"> arai
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
# </added>
import sys
import traceback
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import dataloader
#import det_model_fn
#2021/11/17
import det_model_fn2 as det_model_fn
import hparams_config
import utils
import pprint
from io import StringIO
from LabelMapReader import LabelMapReader
from TrainConfigParser import TrainConfigParser
from mAPEarlyStopping import mAPEarlyStopping
from FvalueEarlyStopping import FvalueEarlyStopping
from COCOMetricsWriter import COCOMetricsWriter
from EpochChangeNotifier import EpochChangeNotifier
from TrainingLossesWriter import TrainingLossesWriter
from CategorizedAPWriter import CategorizedAPWriter
class EfficientDetFinetuningModel(object):
def __init__(self, train_config):
self.TRAIN = 'train'
self.EVAL = 'eval'
self.TRAIN_AND_EVAL = 'train_and_eval'
self.parser = TrainConfigParser(train_config)
self.model_dir = self.parser.model_dir()
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
eval_dir = self.parser.eval_dir()
print("=== eval_dir {}",format(eval_dir))
if os.path.exists(eval_dir) == False:
os.makedirs(eval_dir)
training_losses_file = self.parser.training_losses_file()
print("=== training_losses_file{}".format(training_losses_file))
self.label_map_pbtxt = self.parser.label_map_pbtxt()
labelMapReader = LabelMapReader()
self.label_map, classes = labelMapReader.read( self.label_map_pbtxt)
print("=== label_map {}".format(self.label_map))
self.training_losses_writer = TrainingLossesWriter(training_losses_file)
coco_ap_per_class_file = self.parser.coco_ap_per_class_file()
print("=== coco_ap_per_class_file {}".format(coco_ap_per_class_file ))
self.disable_per_class_ap = self.parser.disable_per_class_ap()
self.categorized_ap_writer = None
if self.disable_per_class_ap == False:
self.categorized_ap_writer = CategorizedAPWriter(self.label_map_pbtxt, coco_ap_per_class_file)
coco_metrics_file = self.parser.coco_metrics_file()
print("=== evaluation_results_file {}".format(coco_metrics_file))
self.coco_metrics_writer = COCOMetricsWriter(coco_metrics_file)
self.early_stopping_metric = self.parser.early_stopping_metric()
patience = self.parser.early_stopping_patience()
self.early_stopping = None
if patience > 0:
# 2021/10/13
if self.early_stopping_metric == "map":
self.early_stopping = mAPEarlyStopping(patience=patience, verbose=1)
elif self.early_stopping_metric == "fvalue":
self.early_stopping = FvalueEarlyStopping(patience=patience, verbose=1)
def train(self):
ipaddress = self.parser.epoch_change_notifier_ipaddress()
port = self.parser.epoch_change_notifier_port()
self.epoch_change_notifier = EpochChangeNotifier(ipaddress, port)
self.epoch_change_notifier.begin_training()
if self.parser.strategy() == 'tpu':
self.tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
self.parser.tpu(), zone=self.parser.tpu_zone(), project=self.parser.gcp_project() )
tpu_grpc_url = tpu_cluster_resolver.get_master()
self.tf.Session.reset(tpu_grpc_url)
else:
self.pu_cluster_resolver = None
# Check data path
if self.parser.mode() in ('train', 'train_and_eval'):
if self.parser.train_file_pattern() is None:
raise RuntimeError('Must specify --train_file_pattern for train.')
if self.parser.mode() in ('eval', 'train_and_eval'):
if self.parser.val_file_pattern() is None:
raise RuntimeError('Must specify --val_file_pattern for eval.')
# Parse and override hparams
config = hparams_config.get_detection_config(self.parser.model_name())
#hparams="image_size=416x416"
hparams = self.parser.hparams()
#2021/11/10 Checking hparams
if hparams:
config.override(self.parser.hparams())
if self.parser.num_epochs(): # NOTE: remove this flag after updating all docs.
config.num_epochs = self.parser.num_epochs()
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if self.parser.use_spatial_partition():
# Checks input_partition_dims agrees with num_cores_per_replica.
if self.parser.num_cores_per_replica() != np.prod(self.parser.input_partition_dims() ):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
'image_masks': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(self.parser.input_partition_dims() ) == 0)
return len(partitionable_index[0]) == len(self.parser.input_partition_dims() )
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = self.parser.input_partition_dims()
labels_partition_dims['cls_targets_%d' %
level] = self.parser.input_partition_dims()
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = self.parser.num_cores_per_replica()
input_partition_dims = [self.parser.input_partition_dims(), labels_partition_dims]
num_shards = self.parser.num_cores() // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = self.parser.num_cores()
params = dict(
config.as_dict(),
model_name=self.parser.model_name(),
iterations_per_loop=self.parser.iterations_per_loop(),
model_dir=self.parser.model_dir(),
num_shards=num_shards,
num_examples_per_epoch=self.parser.num_examples_per_epoch(),
strategy=self.parser.strategy(),
backbone_ckpt=self.parser.backbone_ckpt(),
ckpt=self.parser.ckpt(),
val_json_file=self.parser.val_json_file(),
testdev_dir=self.parser.testdev_dir(),
profile=self.parser.profile(),
mode=self.parser.mode())
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
print("=== params ")
pprint.pprint(params)
if self.parser.strategy() != 'tpu':
if self.parser.use_xla():
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
model_dir = self.parser.model_dir()
model_fn_instance = det_model_fn.get_model_fn(self.parser.model_name())
max_instances_per_image = config.max_instances_per_image
if self.parser.eval_samples():
self.eval_steps = int((self.parser.eval_samples() + self.parser.eval_batch_size() - 1) //
self.parser.eval_batch_size())
else:
self.eval_steps = None
total_examples = int(config.num_epochs * self.parser.num_examples_per_epoch())
train_steps = total_examples // self.parser.train_batch_size()
logging.info(params)
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
config_file = os.path.join(model_dir, 'config.yaml')
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, 'w').write(str(config))
self.train_input_fn = dataloader.InputReader(
self.parser.train_file_pattern(),
is_training=True,
use_fake_data=self.parser.use_fake_data(),
max_instances_per_image=max_instances_per_image)
self.eval_input_fn = dataloader.InputReader(
self.parser.val_file_pattern(),
is_training=False,
use_fake_data=self.parser.use_fake_data(),
max_instances_per_image=max_instances_per_image)
if self.parser.strategy() == 'tpu':
tpu_config = tf.estimator.tpu.TPUConfig(
iterations_per_loop if self.parser.strategy() == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=self.parser.iterations_per_loop(),
session_config=config_proto,
tpu_config=tpu_config,
save_checkpoints_steps=self.parser.save_checkpoints_steps(),
tf_random_seed=self.parser.tf_random_seed(),
)
# TPUEstimator can do both train and eval.
train_est = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
train_batch_size=self.parser.train_batch_size(),
eval_batch_size=self.parser.eval_batch_size(),
config=run_config,
params=params)
eval_est = train_est
else:
strategy = None
if self.parser.strategy() == 'gpus':
strategy = tf.distribute.MirroredStrategy()
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
train_distribute=strategy,
log_step_count_steps=self.parser.iterations_per_loop(),
session_config=config_proto,
save_checkpoints_steps=self.parser.save_checkpoints_steps(),
tf_random_seed=self.parser.tf_random_seed(),
)
def get_estimator(global_batch_size):
params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
params['batch_size'] = global_batch_size // params['num_shards']
params['eval_dir'] = self.parser.eval_dir() #2021/11/14
params['label_map'] = self.label_map #2021/11/14
params['disable_per_class_ap'] = self.parser.disable_per_class_ap() #2021/11/15
print("------------------------disable_per_class_ap {}".format(params['disable_per_class_ap']))
return tf.estimator.Estimator(
model_fn=model_fn_instance, config=run_config, params=params)
# train and eval need different estimator due to different batch size.
self.train_est = get_estimator(self.parser.train_batch_size())
self.eval_est = get_estimator(self.parser.eval_batch_size())
# start train/eval flow.
if self.parser.mode() == "train": #'train':
print("=== train ")
self.train_est.train(input_fn=self.train_input_fn, max_steps=train_steps)
if self.parser.eval_after_train():
self.eval_est.evaluate(input_fn=self.eval_input_fn, steps=self.eval_steps)
elif self.parser.mode() == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
self.parser.model_dir(),
min_interval_secs=self.parser.min_eval_interval(),
timeout=self.parser.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = self.eval_est.evaluate(
self.eval_input_fn, steps=self.eval_steps, name=self.parser.eval_name())
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
print("=== IndexError ")
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
if current_step >= train_steps:
logging.info('Eval finished step %d/%d', current_step, train_steps)
break
except tf.errors.NotFoundError:
# Checkpoint might be not already deleted by the time eval finished.
# We simply skip ssuch case.
print("=== tf.errors.NotFoundError")
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif self.parser.mode() == 'train_and_eval':
print("=== train_and_eval --------------------")
ckpt = tf.train.latest_checkpoint(self.parser.model_dir())
try:
step = int(os.path.basename(ckpt).split('-')[1])
current_epoch = (
step * self.parser.train_batch_size() // self.parser.num_examples_per_epoch())
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info('Folder %s has no ckpt with valid step.', self.parser.model_dir())
current_epoch = 0
epochs_per_cycle = 1 # higher number has less graph construction overhead.
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if self.parser.run_epoch_in_child_process():
p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
p.start()
p.join()
if p.exitcode != 0:
return p.exitcode
else:
tf.compat.v1.reset_default_graph()
# call self.run_train_and_eval
breaking_loop = self.run_train_and_eval(e)
if breaking_loop == True:
print("=== Breaking the train_and_eval loop by mAPEarlyStopping epoch={}".format(e) )
break
else:
logging.info('Invalid mode: %s', self.parser.mode())
def run_train_and_eval(self, e):
print("=== run_train_and_eval -------------------------------")
"""
2021/09/13
return True if breaking_loop_by_earlystopping is True else False
"""
print('\n==> Starting training, epoch: %d.' % e)
max_steps = e * self.parser.num_examples_per_epoch() // self.parser.train_batch_size()
#print("=== examples_per_epoch {}".format(self.parser.examples_per_epoch()))
print("=== train_batch_size {}".format(self.parser.train_batch_size()))
print("=== num_examples_per_epoch {}".format(self.parser.num_examples_per_epoch()))
print("=== max_steps {}".format(max_steps))
# 2021/11/15
os.environ['epoch'] = str(e)
print("=== environ[['epoch']={}".format(os.environ['epoch']))
self.train_est.train(
input_fn = self.train_input_fn,
max_steps = max_steps)
print('\n =====> Starting evaluation, epoch: {}'.format(e) )
eval_results = self.eval_est.evaluate(
input_fn = self.eval_input_fn,
steps = self.eval_steps)
#print("=== eval_results")
#pprint.pprint(eval_results)
map = eval_results['AP']
loss = eval_results['loss']
self.epoch_change_notifier.epoch_end(e, loss, map)
self.coco_metrics_writer.write(e, eval_results)
# 2021/11/15
if self.categorized_ap_writer:
self.categorized_ap_writer.write(e, eval_results)
self.training_losses_writer.write(e, eval_results)
ckpt = tf.train.latest_checkpoint(self.parser.model_dir() )
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
breaking_loop_by_earlystopping = False
if self.early_stopping != None:
ap = eval_results['AP']
ar = eval_results['ARmax1']
breaking_loop_by_earlystopping = self.early_stopping.validate(e, ap, ar)
return breaking_loop_by_earlystopping
###
#
def main(_):
train_config = ""
if len(sys.argv) == 2:
train_config = sys.argv[1]
else:
raise Exception("Usage: python EfficientDetFinetuningModel.py train_config")
if os.path.exists(train_config) == False:
raise Exception("Not found train_config {}".format(train_config))
model = EfficientDetFinetuningModel(train_config)
model.train()
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
tf.enable_v2_tensorshape()
tf.disable_eager_execution()
app.run(main)
|
util.py
|
import atexit
import os
import shutil
import sys
import ctypes
if sys.version_info[0] < 3 or sys.version_info[1] < 5:
print("\nPlease restart with python3. \n(Taichi supports Python 3.5+)\n")
print("Current version:", sys.version_info)
exit(-1)
tc_core = None
def in_docker():
if os.environ.get("TI_IN_DOCKER", "") == "":
return False
else:
return True
def import_tc_core():
global tc_core
if get_os_name() != 'win':
old_flags = sys.getdlopenflags()
sys.setdlopenflags(258) # 258 = RTLD_NOW | RTLD_GLOBAL
else:
pyddir = os.path.join(package_root(), 'lib')
os.environ['PATH'] += ';' + pyddir
import taichi_core as core
tc_core = core
if get_os_name() != 'win':
sys.setdlopenflags(old_flags)
core.set_lib_dir(os.path.join(package_root(), 'lib'))
def is_ci():
return os.environ.get('TC_CI', '') == '1'
def package_root():
return os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
def is_release():
return os.environ.get('TAICHI_REPO_DIR', '') == ''
from colorama import Fore, Back, Style
def get_core_shared_object():
if is_release():
directory = os.path.join(package_root(), 'lib')
else:
directory = get_bin_directory()
return os.path.join(directory, 'libtaichi_core.so')
def get_repo():
from git import Repo
import taichi as tc
repo = Repo(tc.get_repo_directory())
return repo
def print_red_bold(*args, **kwargs):
print(Fore.RED + Style.BRIGHT, end='')
print(*args, **kwargs)
print(Style.RESET_ALL, end='')
def format():
import os
import taichi as tc
from yapf.yapflib.yapf_api import FormatFile
repo = get_repo()
print('* Formatting code', end='')
for item in repo.index.diff('HEAD'):
fn = os.path.join(tc.get_repo_directory(), item.a_path)
print(end='.')
if fn.endswith('.py'):
FormatFile(
fn,
in_place=True,
style_config=os.path.join(tc.get_repo_directory(), '.style.yapf'))
if fn.endswith('.cpp'):
os.system('clang-format -i -style=file {}'.format(fn))
repo.git.add(item.a_path)
print('* Done!')
from taichi.misc.settings import get_output_directory, get_build_directory, get_bin_directory, get_repo_directory, get_runtime_directory
from taichi.misc.util import get_os_name, get_unique_task_id
CREATE_SAND_BOX_ON_WINDOWS = True
def build():
assert False
tmp_cwd = os.getcwd()
bin_dir = get_build_directory()
try:
os.mkdir(bin_dir)
except:
pass
os.chdir(bin_dir)
flags = ' -DPYTHON_EXECUTABLE:FILEPATH="{}"'.format(sys.executable)
print('Running cmake...')
if is_ci():
print(' Note: building for CI.')
if get_os_name() == 'win':
flags += ' -G "Visual Studio 15 Win64"'
cmake_ret = os.system('cmake .. ' + flags)
if cmake_ret != 0:
print(' Error: CMake failed.')
exit(-1)
import multiprocessing
print('Building taichi...')
num_make_threads = min(20, multiprocessing.cpu_count())
if get_os_name() == 'win':
make_ret = os.system("msbuild /p:Configuration=Release /p:Platform=x64 /m taichi.sln")
else:
make_ret = os.system('make -j {}'.format(num_make_threads))
if make_ret != 0:
print(' Error: Build failed.')
exit(-1)
os.chdir(tmp_cwd)
if is_release():
print("[Release mode]")
sys.path.append(os.path.join(package_root(), 'lib'))
if get_os_name() != 'win':
link_src = os.path.join(package_root(), 'lib', 'taichi_core.so')
link_dst = os.path.join(package_root(), 'lib', 'libtaichi_core.so')
# For llvm jit to find the runtime symbols
if not os.path.exists(link_dst):
os.symlink(link_src, link_dst)
import_tc_core()
if get_os_name() != 'win':
dll = ctypes.CDLL(get_core_shared_object(), mode=ctypes.RTLD_GLOBAL)
tc_core.set_python_package_dir(package_root())
os.makedirs(tc_core.get_repo_dir(), exist_ok=True)
else:
if get_os_name() == 'osx':
bin_dir = get_bin_directory()
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] = get_runtime_directory()
assert os.path.exists(os.path.join(bin_dir, 'libtaichi_core.dylib'))
tmp_cwd = os.getcwd()
os.chdir(bin_dir)
shutil.copy('libtaichi_core.dylib', 'taichi_core.so')
sys.path.append(bin_dir)
import taichi_core as tc_core
os.chdir(tmp_cwd)
elif get_os_name() == 'linux':
bin_dir = get_bin_directory()
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] += ':/usr/lib64/'
else:
os.environ['LD_LIBRARY_PATH'] = '/usr/lib64/'
assert os.path.exists(os.path.join(bin_dir, 'libtaichi_core.so'))
tmp_cwd = os.getcwd()
os.chdir(bin_dir)
sys.path.append(bin_dir)
# https://stackoverflow.com/questions/3855004/overwriting-library-file-causes-segmentation-fault
if os.path.exists('taichi_core.so'):
try:
os.unlink('taichi_core.so')
except:
print('Warning: taichi_core.so already removed. This may be caused by'
'simultaneously starting two taichi instances.')
pass
shutil.copy('libtaichi_core.so', 'taichi_core.so')
try:
import_tc_core()
except Exception as e:
from colorama import Fore, Back, Style
print_red_bold("Taichi core import failed: ", end='')
print(e)
exit(-1)
os.chdir(tmp_cwd)
elif get_os_name() == 'win':
bin_dir = get_bin_directory()
dll_path1 = os.path.join(bin_dir, 'RelWithDebInfo', 'taichi_core.dll')
dll_path2 = os.path.join(bin_dir, 'libtaichi_core.dll')
assert os.path.exists(dll_path1) and not os.path.exists(dll_path2)
# On windows when an dll/pyd is loaded, we can not write to it any more
old_wd = os.getcwd()
os.chdir(bin_dir)
if CREATE_SAND_BOX_ON_WINDOWS:
# Create a sandbox for separated core lib development and loading
dir = os.path.join(get_output_directory(), 'tmp', get_unique_task_id())
lib_dir = os.path.join(get_repo_directory(), 'external', 'lib')
os.environ['PATH'] += ';' + lib_dir
os.makedirs(dir)
if os.path.exists(dll_path1):
shutil.copy(dll_path1, os.path.join(dir, 'taichi_core.pyd'))
else:
shutil.copy(dll_path2, os.path.join(dir, 'taichi_core.pyd'))
os.environ['PATH'] += ';' + dir
sys.path.append(dir)
else:
shutil.copy(dll_path, os.path.join(bin_dir, 'taichi_core.pyd'))
sys.path.append(bin_dir)
try:
import taichi_core as tc_core
except Exception as e:
print(e)
print()
print('Is taichi\external\lib correctly set to branch msvc or mingw?')
print()
raise e
os.chdir(old_wd)
def get_dll_name(name):
if get_os_name() == 'linux':
return 'libtaichi_%s.so' % name
elif get_os_name() == 'osx':
return 'libtaichi_%s.dylib' % name
elif get_os_name() == 'win':
return 'taichi_%s.dll' % name
else:
assert False, "Unknown OS"
def load_module(name, verbose=True):
if verbose:
print('Loading module', name)
try:
if get_os_name() == 'osx':
mode = ctypes.RTLD_LOCAL
else:
mode = ctypes.RTLD_GLOBAL
if '.so' in name:
ctypes.PyDLL(name, mode=mode)
else:
ctypes.PyDLL(
os.path.join(get_repo_directory(), 'build',
get_dll_name(name)), mode=mode)
except Exception as e:
print(Fore.YELLOW + "Warning: module [{}] loading failed: {}".format(
name, e) + Style.RESET_ALL)
def at_startup():
if not is_release():
output_dir = get_output_directory()
if not os.path.exists(output_dir):
print('Making output directory')
os.mkdir(output_dir)
# Load modules
# load_module('lang_core')
tc_core.set_core_state_python_imported(True)
def start_memory_monitoring(output_fn, pid=-1, interval=1):
# removing dependency on psutil
return
import os, psutil, time
if pid == -1:
pid = os.getpid()
import multiprocessing
def task():
with open(output_fn, 'w') as f:
process = psutil.Process(pid)
while True:
try:
mem = process.memory_info().rss
except:
mem = -1
time.sleep(interval)
print(time.time(), mem, file=f)
f.flush()
proc = multiprocessing.Process(target=task, daemon=True)
proc.start()
@atexit.register
def clean_libs():
pass
at_startup()
device_string = 'cpu only' if not tc_core.with_cuda() else 'cuda {}'.format(tc_core.cuda_version())
print('[Taichi version {}, {}, commit {}]'.format(tc_core.get_version_string(), device_string, tc_core.get_commit_hash()[:8]))
if not is_release():
tc_core.set_core_trigger_gdb_when_crash(True)
|
Final_Run_FaceRecognition.py
|
import datetime
import threading
import time
import cv2
import timed_face_recognition
import numpy as np
import pandas as pd
def timer():
time_limit = 100 # 120 seconds
while time_limit >= 0:
m, s = divmod(time_limit, 60)
h, m = divmod(m, 60)
time_left = str(h).zfill(2) + ":" + str(m).zfill(2) + ":" + str(s).zfill(2)
print("\rEXIT ROOM BEFORE " + time_left, end='')
time.sleep(1)
time_limit -= 1
if stop_thread:
break
if time_limit == -1:
print("\nSorry Time Limit has been reached, Door locked")
else:
print("\nScanning Interrupted")
video_capture.release()
cv2.destroyAllWindows()
stop_thread = False
timer_thread = threading.Thread(target = timer)
df1 = pd.read_excel("Data_of_Employees.xlsx", engine='openpyxl')
names = ['None'] + df1['Name'].tolist() # CHANGE NAMES HERE
timed_face_recognition.face_recognition(names) # ENTRY FACE RECOGNITION
entry_time = datetime.datetime.now()
print("Entry time",entry_time)
timer_thread.start()
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('Trainer/trainer.yml')
cascPathface = "haarcascade_frontalface_alt2.xml"
faceCascade = cv2.CascadeClassifier(cascPathface)
font = cv2.FONT_HERSHEY_SIMPLEX
id = 0
video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW) # TURNING VIDEO ON
blank = np.zeros((500, 1000, 3), dtype=np.uint8) # CREATING A BLANK IMAGE TO DISPLAY THE ERROR MESSAGE
video_capture.set(3, 640) # set video widht
video_capture.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1 * video_capture.get(3)
minH = 0.1 * video_capture.get(4)
door_closed = cv2.imread("door_1.png")
door_open = cv2.imread("door_14.png")
resized_door_closed = cv2.resize(door_closed, None, fx=0.5, fy=0.5)
resized_door_open = cv2.resize(door_open, None, fx=0.5, fy=0.5)
while video_capture.isOpened():
cv2.imshow("Door", resized_door_closed)
ret, frame = video_capture.read()
# IF CONDITION TO CHECK PROPER WORKING
if not ret:
print("Unable to open video")
break
frame = cv2.flip(frame, 1) # FLIPPING IT VERTICALLY
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # CONVERTING INTO GRAY SCALE
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(int(minW), int(minH)),
flags=cv2.CASCADE_SCALE_IMAGE)
# IF MORE THAN 1 FACE IS DETECTED THEN STOP
if len(faces) > 1:
cv2.destroyWindow('Video')
cv2.putText(blank, "'Sorry' Stopped due to more faces", (0, 50), None, 1, (255, 255, 255), 2)
cv2.imshow('Error! Closed', blank)
if cv2.waitKey(0) & 0xFF == ord('q'):
break
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # CREATING RECTANGLE AROUND FACE
id, confidence = recognizer.predict(gray[y:y + h, x:x + w]) # PREDICTING USING TRAINED MODEL
# If confidence is less them 100 ==> "0" : perfect match
if (confidence < 100):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(frame, "Face Matched with the person", (50, 50), font, 1, (255, 255, 255), 2)
cv2.imshow("Door", resized_door_open)
# print("Face Matched with",id,",Door Opened")
else:
id = "Unknown"
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(frame, "For Emergency SOS, Press 's'", (50, 50), font, 1, (255, 255, 255), 2)
# ADD SOS FUNCTION
cv2.putText(frame, str(id), (x + 5, y - 5), font, 1, (255, 255, 255), 2) # Displaying text "NAME"
cv2.putText(frame, str(confidence), (x + 5, y + h - 5), font, 1, (255, 255, 0),
1) # Displaying text "CONFIDENCE"
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
stop_thread = True
timer_thread.join()
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import signal
import sys
import threading
import warnings
import importlib
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from py4j.java_gateway import is_instance_of
from pyspark import accumulators, since
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway, local_connect_and_auth
from pyspark.serializers import (
CPickleSerializer,
BatchedSerializer,
UTF8Deserializer,
PairDeserializer,
AutoBatchedSerializer,
NoOpSerializer,
ChunkedStream,
)
from pyspark.storagelevel import StorageLevel
from pyspark.resource.information import ResourceInformation
from pyspark.rdd import RDD, _load_from_socket
from pyspark.taskcontext import TaskContext
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
__all__ = ["SparkContext"]
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create :class:`RDD` and
broadcast variables on that cluster.
When you create a new SparkContext, at least the master and app name should
be set, either through the named parameters here or through `conf`.
Parameters
----------
master : str, optional
Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
appName : str, optional
A name for your job, to display on the cluster web UI.
sparkHome : str, optional
Location where Spark is installed on cluster nodes.
pyFiles : list, optional
Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
environment : dict, optional
A dictionary of environment variables to set on
worker nodes.
batchSize : int, optional
The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
serializer : :class:`pyspark.serializers.Serializer`, optional
The serializer for RDDs.
conf : :py:class:`pyspark.SparkConf`, optional
An object setting Spark properties.
gateway : :py:class:`py4j.java_gateway.JavaGateway`, optional
Use an existing gateway and JVM, otherwise a new JVM
will be instantiated. This is only used internally.
jsc : :py:class:`py4j.java_gateway.JavaObject`, optional
The JavaSparkContext instance. This is only used internally.
profiler_cls : type, optional
A class of custom Profiler used to do profiling
(default is :class:`pyspark.profiler.BasicProfiler`).
Notes
-----
Only one :class:`SparkContext` should be active per JVM. You must `stop()`
the active :class:`SparkContext` before creating a new one.
:class:`SparkContext` instance is not supported to share across multiple
processes out of the box, and PySpark does not guarantee multi-processing execution.
Use threads instead for concurrent processing purpose.
Examples
--------
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: ...
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = (".zip", ".egg", ".jar")
def __init__(
self,
master=None,
appName=None,
sparkHome=None,
pyFiles=None,
environment=None,
batchSize=0,
serializer=CPickleSerializer(),
conf=None,
gateway=None,
jsc=None,
profiler_cls=BasicProfiler,
):
if conf is None or conf.get("spark.executor.allowSparkContext", "false").lower() != "true":
# In order to prevent SparkContext from being created in executors.
SparkContext._assert_on_driver()
self._callsite = first_spark_call() or CallSite(None, None, None)
if gateway is not None and gateway.gateway_parameters.auth_token is None:
raise ValueError(
"You are trying to pass an insecure Py4j gateway to Spark. This"
" is not allowed as it is a security risk."
)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(
master,
appName,
sparkHome,
pyFiles,
environment,
batchSize,
serializer,
conf,
jsc,
profiler_cls,
)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(
self,
master,
appName,
sparkHome,
pyFiles,
environment,
batchSize,
serializer,
conf,
jsc,
profiler_cls,
):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represents the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise RuntimeError("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise RuntimeError("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv.") :]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token)
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
# If encryption is enabled, we need to setup a server in the jvm to read broadcast
# data via a socket.
# scala's mangled names w/ $ in them require special treatment.
self._encryption_enabled = self._jvm.PythonUtils.isEncryptionEnabled(self._jsc)
os.environ["SPARK_AUTH_SOCKET_TIMEOUT"] = str(
self._jvm.PythonUtils.getPythonAuthSocketTimeout(self._jsc)
)
os.environ["SPARK_BUFFER_SIZE"] = str(self._jvm.PythonUtils.getSparkBufferSize(self._jsc))
self.pythonExec = os.environ.get("PYSPARK_PYTHON", "python3")
self.pythonVer = "%d.%d" % sys.version_info[:2]
if sys.version_info[:2] < (3, 7):
with warnings.catch_warnings():
warnings.simplefilter("once")
warnings.warn("Python 3.6 support is deprecated in Spark 3.2.", FutureWarning)
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in pyFiles or []:
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] specified in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning,
)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = self._jvm.org.apache.spark.util.Utils.createTempDir(
local_dir, "pyspark"
).getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (
SparkContext._active_spark_context
and SparkContext._active_spark_context != instance
):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (
currentAppName,
currentMaster,
callsite.function,
callsite.file,
callsite.linenum,
)
)
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise RuntimeError(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
Parameters
----------
conf : :py:class:`pyspark.SparkConf`, optional
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
Examples
--------
>>> sc.applicationId # doctest: +ELLIPSIS
'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
"Unable to cleanly shutdown Spark JVM process."
" It is possible that the process has crashed,"
" been killed or may also be in a zombie state.",
RuntimeWarning,
)
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
Parameters
----------
start : int
the start value
end : int, optional
the end value (exclusive)
step : int, optional
the incremental step (default: 1)
numSlices : int, optional
the number of partitions of the new RDD
Returns
-------
:py:class:`pyspark.RDD`
An RDD of int
Examples
--------
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(range(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using range
is recommended if the input represents a range for performance.
Examples
--------
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(range(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, range):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
# it's an empty iterator here but we need this line for triggering the
# logic of signal handling in FramedSerializer.load_stream, for instance,
# SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since
# FramedSerializer.load_stream produces a generator, the control should
# at least be in that function once. Here we do it by explicitly converting
# the empty iterator to a list, thus make sure worker reuse takes effect.
# See more details in SPARK-26549.
assert len(list(iterator)) == 0
return range(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
def reader_func(temp_filename):
return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices)
def createRDDServer():
return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices)
jrdd = self._serialize_to_jvm(c, serializer, reader_func, createRDDServer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, serializer, reader_func, createRDDServer):
"""
Using py4j to send a large dataset to the jvm is really slow, so we use either a file
or a socket if we have encryption enabled.
Examples
--------
data
object to be serialized
serializer : :py:class:`pyspark.serializers.Serializer`
reader_func : function
A function which takes a filename and reads in the data in the jvm and
returns a JavaRDD. Only used when encryption is disabled.
createRDDServer : function
A function which creates a PythonRDDServer in the jvm to
accept the serialized data, for use when encryption is enabled.
"""
if self._encryption_enabled:
# with encryption, we open a server in java and send the data directly
server = createRDDServer()
(sock_file, _) = local_connect_and_auth(server.port(), server.secret())
chunked_out = ChunkedStream(sock_file, 8192)
serializer.dump_stream(data, chunked_out)
chunked_out.close()
# this call will block until the server has read all the data and processed it (or
# throws an exception)
r = server.getResult()
return r
else:
# without encryption, we serialize to a file, and we read the file in java and
# parallelize from there.
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
try:
serializer.dump_stream(data, tempFile)
finally:
tempFile.close()
return reader_func(tempFile.name)
finally:
# we eagerly reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using :meth:`RDD.saveAsPickleFile` method.
Examples
--------
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
The text files must be encoded as UTF-8.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
Examples
--------
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
['Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self, UTF8Deserializer(use_unicode))
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
The text files must be encoded as UTF-8.
If `use_unicode` is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files:
.. code-block:: text
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do ``rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")``,
then ``rdd`` contains:
.. code-block:: text
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
Notes
-----
Small files are preferred, as each file will be loaded fully in memory.
Examples
--------
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[('.../1.txt', '1'), ('.../2.txt', '2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(
self._jsc.wholeTextFiles(path, minPartitions),
self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)),
)
def binaryFiles(self, path, minPartitions=None):
"""
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
Notes
-----
Small files are preferred, large file is also allowable, but may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(
self._jsc.binaryFiles(path, minPartitions),
self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()),
)
def binaryRecords(self, path, recordLength):
"""
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
Parameters
----------
path : str
Directory to the input data files
recordLength : int
The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(
self,
path,
keyClass=None,
valueClass=None,
keyConverter=None,
valueConverter=None,
minSplits=None,
batchSize=0,
):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pickle pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. :class:`CPickleSerializer` is used to deserialize pickled objects on the Python side
Parameters
----------
path : str
path to sequencefile
keyClass: str, optional
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
valueConverter : str, optional
fully qualifiedname of a function returning value WritableConverter
minSplits : int, optional
minimum splits in dataset (default min(2, sc.defaultParallelism))
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(
self._jsc,
path,
keyClass,
valueClass,
keyConverter,
valueConverter,
minSplits,
batchSize,
)
return RDD(jrdd, self)
def newAPIHadoopFile(
self,
path,
inputFormatClass,
keyClass,
valueClass,
keyConverter=None,
valueConverter=None,
conf=None,
batchSize=0,
):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
Parameters
----------
path : str
path to Hadoop file
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
None by default
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
None by default
conf : dict, optional
Hadoop configuration, passed in as a dict
None by default
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(
self._jsc,
path,
inputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
batchSize,
)
return RDD(jrdd, self)
def newAPIHadoopRDD(
self,
inputFormatClass,
keyClass,
valueClass,
keyConverter=None,
valueConverter=None,
conf=None,
batchSize=0,
):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
Parameters
----------
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(
self._jsc,
inputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
batchSize,
)
return RDD(jrdd, self)
def hadoopFile(
self,
path,
inputFormatClass,
keyClass,
valueClass,
keyConverter=None,
valueConverter=None,
conf=None,
batchSize=0,
):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
path : str
path to Hadoop file
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(
self._jsc,
path,
inputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
batchSize,
)
return RDD(jrdd, self)
def hadoopRDD(
self,
inputFormatClass,
keyClass,
valueClass,
keyConverter=None,
valueConverter=None,
conf=None,
batchSize=0,
):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
Parameters
----------
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(
self._jsc,
inputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
batchSize,
)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
Examples
--------
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
['Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
['Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
gw = SparkContext._gateway
jvm = SparkContext._jvm
jrdd_cls = jvm.org.apache.spark.api.java.JavaRDD
jpair_rdd_cls = jvm.org.apache.spark.api.java.JavaPairRDD
jdouble_rdd_cls = jvm.org.apache.spark.api.java.JavaDoubleRDD
if is_instance_of(gw, rdds[0]._jrdd, jrdd_cls):
cls = jrdd_cls
elif is_instance_of(gw, rdds[0]._jrdd, jpair_rdd_cls):
cls = jpair_rdd_cls
elif is_instance_of(gw, rdds[0]._jrdd, jdouble_rdd_cls):
cls = jdouble_rdd_cls
else:
cls_name = rdds[0]._jrdd.getClass().getCanonicalName()
raise TypeError("Unsupported Java RDD class %s" % cls_name)
jrdds = gw.new_array(cls, len(rdds))
for i in range(0, len(rdds)):
jrdds[i] = rdds[i]._jrdd
return RDD(self._jsc.union(jrdds), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a :class:`Broadcast`
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an :class:`Accumulator` with the given initial value, using a given
:class:`AccumulatorParam` helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The `path` passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use :meth:`SparkFiles.get` with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
Notes
-----
A path can be added only once. Subsequent additions of the same path are ignored.
Examples
--------
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The `path` passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
Notes
-----
A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be an HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
@since(3.1)
def getCheckpointDir(self):
"""
Return the directory where RDDs are checkpointed. Returns None if no
checkpoint directory has been set.
"""
if not self._jsc.sc().getCheckpointDir().isEmpty():
return self._jsc.sc().getCheckpointDir().get()
return None
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise TypeError("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(
storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication,
)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use :meth:`SparkContext.cancelJobGroup` to cancel all
running jobs in this group.
Notes
-----
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread
local inheritance, and preventing resource leak.
Examples
--------
>>> import threading
>>> from time import sleep
>>> from pyspark import InheritableThread
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise RuntimeError("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = InheritableThread(target=start_job, args=(10,)).start()
>>> suppress = InheritableThread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
Notes
-----
If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread
local inheritance, and preventing resource leak.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
:meth:`setLocalProperty`.
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
Notes
-----
If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread
local inheritance, and preventing resource leak.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See :meth:`SparkContext.setJobGroup`.
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
Examples
--------
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
"""Print the profile stats to stdout"""
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError(
"'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile."
)
def dump_profiles(self, path):
"""Dump the profile stats into directory `path`"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError(
"'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile."
)
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
@property
def resources(self):
resources = {}
jresources = self._jsc.resources()
for x in jresources:
name = jresources[x].name()
jaddresses = jresources[x].addresses()
addrs = [addr for addr in jaddresses]
resources[name] = ResourceInformation(name, addrs)
return resources
@staticmethod
def _assert_on_driver():
"""
Called to ensure that SparkContext is created only on the Driver.
Throws an exception if a SparkContext is about to be created in executors.
"""
if TaskContext.get() is not None:
raise RuntimeError("SparkContext should only be created and accessed on the driver.")
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs["sc"] = SparkContext("local[4]", "PythonTest")
globs["tempdir"] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs["tempdir"]))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs["sc"].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
test_smtplib.py
|
import asyncore
import email.utils
import socket
import smtpd
import smtplib
import StringIO
import sys
import time
import select
import unittest
from test import test_support
try:
import threading
except ImportError:
threading = None
HOST = test_support.HOST
def server(evt, buf, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
@unittest.skipUnless(threading, 'Threading required for this test.')
class GeneralTests(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, "220 Hola mundo\n", self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
def testBasic1(self):
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testBasic2(self):
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
def setUp(self):
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self._threads = test_support.threading_setup()
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
# restore sys.stdout
sys.stdout = self.old_stdout
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
expected = (250, 'Ok')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
expected = (250, 'Ok')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testNotImplemented(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
expected = (502, 'Error: command "EHLO" not implemented')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testVRFY(self):
# VRFY isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
expected = (502, 'Error: command "VRFY" not implemented')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.helo()
expected = (503, 'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
self.assertEqual(smtp.help(), 'Error: command "HELP" not implemented')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises socket.error
self.assertRaises(socket.error, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(socket.error, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self._threads = test_support.threading_setup()
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, "199 no hello for you!\n", self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
@unittest.skipUnless(threading, 'Threading required for this test.')
class TooLongLineTests(unittest.TestCase):
respdata = '250 OK' + ('.' * smtplib._MAXLINE * 2) + '\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
threading.Thread(target=server, args=servargs).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@somewhere.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_auth_credentials = {
'login': 'TXIuQUBzb21ld2hlcmUuY29t',
'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=',
'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ'
'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'),
}
sim_auth_login_password = 'C29TZXBHC3N3B3JK'
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@somewhere.com',],
}
# Simulated SMTP channel & server
class SimSMTPChannel(smtpd.SMTPChannel):
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
smtpd.SMTPChannel.__init__(self, *args, **kw)
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_AUTH(self, arg):
if arg.strip().lower()=='cram-md5':
self.push('334 {0}'.format(sim_cram_md5_challenge))
return
mech, auth = arg.split()
mech = mech.lower()
if mech not in sim_auth_credentials:
self.push('504 auth type unimplemented')
return
if mech == 'plain' and auth==sim_auth_credentials['plain']:
self.push('235 plain auth ok')
elif mech=='login' and auth==sim_auth_credentials['login']:
self.push('334 Password:')
else:
self.push('550 No access for you!')
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accept(self):
conn, addr = self.accept()
self._SMTPchannel = SimSMTPChannel(self._extra_features,
self, conn, addr)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for email, name in sim_users.items():
expected_known = (250, '%s %s' % (name, smtplib.quoteaddr(email)))
self.assertEqual(smtp.vrfy(email), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, 'No such user: %s' % u)
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, '\n'.join(users))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, 'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
expected_auth_ok = (235, b'plain auth ok')
self.assertEqual(smtp.login(sim_auth[0], sim_auth[1]), expected_auth_ok)
# SimSMTPChannel doesn't fully support LOGIN or CRAM-MD5 auth because they
# require a synchronous read to obtain the credentials...so instead smtpd
# sees the credential sent by smtplib's login method as an unknown command,
# which results in smtplib raising an auth error. Fortunately the error
# message contains the encoded credential, so we can partially check that it
# was generated correctly (partially, because the 'word' is uppercased in
# the error message).
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
if sim_auth_login_password not in str(err):
raise "expected encoded password not found in error message"
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
if sim_auth_credentials['cram-md5'] not in str(err):
raise "expected encoded credentials not found in error message"
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_main(verbose=None):
test_support.run_unittest(GeneralTests, DebuggingServerTests,
NonConnectingTests,
BadHELOServerTests, SMTPSimTests,
TooLongLineTests)
if __name__ == '__main__':
test_main()
|
dask.py
|
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module
# pylint: disable=missing-class-docstring, invalid-name
# pylint: disable=too-many-lines, fixme
# pylint: disable=import-error
"""Dask extensions for distributed training. See
https://xgboost.readthedocs.io/en/latest/tutorials/dask.html for simple
tutorial. Also xgboost/demo/dask for some examples.
There are two sets of APIs in this module, one is the functional API including
``train`` and ``predict`` methods. Another is stateful Scikit-Learner wrapper
inherited from single-node Scikit-Learn interface.
The implementation is heavily influenced by dask_xgboost:
https://github.com/dask/dask-xgboost
"""
import platform
import logging
from collections import defaultdict
from collections.abc import Sequence
from threading import Thread
from typing import TYPE_CHECKING, List, Tuple, Callable, Optional, Any, Union, Dict, Set
from typing import Awaitable, Generator, TypeVar
import numpy
from . import rabit, config
from .callback import TrainingCallback
from .compat import LazyLoader
from .compat import sparse, scipy_sparse
from .compat import PANDAS_INSTALLED, DataFrame, Series, pandas_concat
from .compat import lazy_isinstance
from .core import DMatrix, DeviceQuantileDMatrix, Booster, _expect, DataIter
from .core import Objective, Metric
from .core import _deprecate_positional_args
from .training import train as worker_train
from .tracker import RabitTracker, get_host_ip
from .sklearn import XGBModel, XGBRegressorBase, XGBClassifierBase, _objective_decorator
from .sklearn import xgboost_model_doc
from .sklearn import _cls_predict_proba
from .sklearn import XGBRanker
if TYPE_CHECKING:
from dask import dataframe as dd
from dask import array as da
import dask
import distributed
else:
dd = LazyLoader('dd', globals(), 'dask.dataframe')
da = LazyLoader('da', globals(), 'dask.array')
dask = LazyLoader('dask', globals(), 'dask')
distributed = LazyLoader('distributed', globals(), 'dask.distributed')
_DaskCollection = Union["da.Array", "dd.DataFrame", "dd.Series"]
try:
from mypy_extensions import TypedDict
TrainReturnT = TypedDict('TrainReturnT', {
'booster': Booster,
'history': Dict,
})
except ImportError:
TrainReturnT = Dict[str, Any] # type:ignore
# Current status is considered as initial support, many features are not properly
# supported yet.
#
# TODOs:
# - CV
# - Ranking
#
# Note for developers:
#
# As of writing asyncio is still a new feature of Python and in depth documentation is
# rare. Best examples of various asyncio tricks are in dask (luckily). Classes like
# Client, Worker are awaitable. Some general rules for the implementation here:
#
# - Synchronous world is different from asynchronous one, and they don't mix well.
# - Write everything with async, then use distributed Client sync function to do the
# switch.
# - Use Any for type hint when the return value can be union of Awaitable and plain
# value. This is caused by Client.sync can return both types depending on context.
# Right now there's no good way to silent:
#
# await train(...)
#
# if train returns an Union type.
LOGGER = logging.getLogger('[xgboost.dask]')
def _start_tracker(n_workers: int) -> Dict[str, Any]:
"""Start Rabit tracker """
env = {'DMLC_NUM_WORKER': n_workers}
host = get_host_ip('auto')
rabit_context = RabitTracker(hostIP=host, nslave=n_workers)
env.update(rabit_context.slave_envs())
rabit_context.start(n_workers)
thread = Thread(target=rabit_context.join)
thread.daemon = True
thread.start()
return env
def _assert_dask_support() -> None:
try:
import dask # pylint: disable=W0621,W0611
except ImportError as e:
raise ImportError(
'Dask needs to be installed in order to use this module') from e
if platform.system() == 'Windows':
msg = 'Windows is not officially supported for dask/xgboost,'
msg += ' contribution are welcomed.'
LOGGER.warning(msg)
class RabitContext:
'''A context controling rabit initialization and finalization.'''
def __init__(self, args: List[bytes]) -> None:
self.args = args
worker = distributed.get_worker()
self.args.append(
('DMLC_TASK_ID=[xgboost.dask]:' + str(worker.address)).encode())
def __enter__(self) -> None:
rabit.init(self.args)
LOGGER.debug('-------------- rabit say hello ------------------')
def __exit__(self, *args: List) -> None:
rabit.finalize()
LOGGER.debug('--------------- rabit say bye ------------------')
def concat(value: Any) -> Any: # pylint: disable=too-many-return-statements
'''To be replaced with dask builtin.'''
if isinstance(value[0], numpy.ndarray):
return numpy.concatenate(value, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
return scipy_sparse.vstack(value, format='csr')
if sparse and isinstance(value[0], sparse.SparseArray):
return sparse.concatenate(value, axis=0)
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
return pandas_concat(value, axis=0)
if lazy_isinstance(value[0], 'cudf.core.dataframe', 'DataFrame') or \
lazy_isinstance(value[0], 'cudf.core.series', 'Series'):
from cudf import concat as CUDF_concat # pylint: disable=import-error
return CUDF_concat(value, axis=0)
if lazy_isinstance(value[0], 'cupy.core.core', 'ndarray'):
import cupy
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
d_v = v.device.id
assert d_v == d, 'Concatenating arrays on different devices.'
return cupy.concatenate(value, axis=0)
return dd.multi.concat(list(value), axis=0)
def _xgb_get_client(client: Optional["distributed.Client"]) -> "distributed.Client":
'''Simple wrapper around testing None.'''
if not isinstance(client, (type(distributed.get_client()), type(None))):
raise TypeError(
_expect([type(distributed.get_client()), type(None)], type(client)))
ret = distributed.get_client() if client is None else client
return ret
# From the implementation point of view, DaskDMatrix complicates a lots of
# things. A large portion of the code base is about syncing and extracting
# stuffs from DaskDMatrix. But having an independent data structure gives us a
# chance to perform some specialized optimizations, like building histogram
# index directly.
class DaskDMatrix:
# pylint: disable=missing-docstring, too-many-instance-attributes
'''DMatrix holding on references to Dask DataFrame or Dask Array. Constructing
a `DaskDMatrix` forces all lazy computation to be carried out. Wait for
the input data explicitly if you want to see actual computation of
constructing `DaskDMatrix`.
.. note::
DaskDMatrix does not repartition or move data between workers. It's
the caller's responsibility to balance the data.
.. versionadded:: 1.0.0
Parameters
----------
client :
Specify the dask client used for training. Use default client returned from dask
if it's set to None.
data :
data source of DMatrix.
label :
label used for trainin.
missing :
Value in the input data (e.g. `numpy.ndarray`) which needs to be present as a
missing value. If None, defaults to np.nan.
weight :
Weight for each instance.
base_margin :
Global bias for each instance.
qid :
Query ID for ranking.
label_lower_bound :
Upper bound for survival training.
label_upper_bound :
Lower bound for survival training.
feature_weights :
Weight for features used in column sampling.
feature_names :
Set names for features.
feature_types :
Set types for features
'''
@_deprecate_positional_args
def __init__(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
*,
missing: float = None,
weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
feature_names: Optional[Union[str, List[str]]] = None,
feature_types: Optional[Union[Any, List[Any]]] = None
) -> None:
_assert_dask_support()
client = _xgb_get_client(client)
self.feature_names = feature_names
self.feature_types = feature_types
self.missing = missing
if qid is not None and weight is not None:
raise NotImplementedError('per-group weight is not implemented.')
if len(data.shape) != 2:
raise ValueError(
'Expecting 2 dimensional input, got: {shape}'.format(
shape=data.shape))
if not isinstance(data, (dd.DataFrame, da.Array)):
raise TypeError(_expect((dd.DataFrame, da.Array), type(data)))
if not isinstance(label, (dd.DataFrame, da.Array, dd.Series,
type(None))):
raise TypeError(
_expect((dd.DataFrame, da.Array, dd.Series), type(label)))
self.worker_map: Dict[str, "distributed.Future"] = defaultdict(list)
self.is_quantile: bool = False
self._init = client.sync(self.map_local_data,
client, data, label=label, weights=weight,
base_margin=base_margin,
qid=qid,
feature_weights=feature_weights,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound)
def __await__(self) -> Generator:
return self._init.__await__()
async def map_local_data(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
weights: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None
) -> "DaskDMatrix":
'''Obtain references to local data.'''
def inconsistent(
left: List[Any], left_name: str, right: List[Any], right_name: str
) -> str:
msg = 'Partitions between {a_name} and {b_name} are not ' \
'consistent: {a_len} != {b_len}. ' \
'Please try to repartition/rechunk your data.'.format(
a_name=left_name, b_name=right_name, a_len=len(left),
b_len=len(right)
)
return msg
def check_columns(parts: Any) -> None:
# x is required to be 2 dim in __init__
assert parts.ndim == 1 or parts.shape[1], 'Data should be' \
' partitioned by row. To avoid this specify the number' \
' of columns for your dask Array explicitly. e.g.' \
' chunks=(partition_size, X.shape[1])'
data = data.persist()
for meta in [label, weights, base_margin, label_lower_bound,
label_upper_bound]:
if meta is not None:
meta = meta.persist()
# Breaking data into partitions, a trick borrowed from dask_xgboost.
# `to_delayed` downgrades high-level objects into numpy or pandas
# equivalents.
X_parts = data.to_delayed()
if isinstance(X_parts, numpy.ndarray):
check_columns(X_parts)
X_parts = X_parts.flatten().tolist()
def flatten_meta(
meta: Optional[_DaskCollection]
) -> "Optional[List[dask.delayed.Delayed]]":
if meta is not None:
meta_parts = meta.to_delayed()
if isinstance(meta_parts, numpy.ndarray):
check_columns(meta_parts)
meta_parts = meta_parts.flatten().tolist()
return meta_parts
return None
y_parts = flatten_meta(label)
w_parts = flatten_meta(weights)
margin_parts = flatten_meta(base_margin)
qid_parts = flatten_meta(qid)
ll_parts = flatten_meta(label_lower_bound)
lu_parts = flatten_meta(label_upper_bound)
parts = [X_parts]
meta_names = []
def append_meta(
m_parts: Optional[List["dask.delayed.delayed"]], name: str
) -> None:
if m_parts is not None:
assert len(X_parts) == len(
m_parts), inconsistent(X_parts, 'X', m_parts, name)
parts.append(m_parts)
meta_names.append(name)
append_meta(y_parts, 'labels')
append_meta(w_parts, 'weights')
append_meta(margin_parts, 'base_margin')
append_meta(qid_parts, 'qid')
append_meta(ll_parts, 'label_lower_bound')
append_meta(lu_parts, 'label_upper_bound')
# At this point, `parts` looks like:
# [(x0, x1, ..), (y0, y1, ..), ..] in delayed form
# delay the zipped result
parts = list(map(dask.delayed, zip(*parts))) # pylint: disable=no-member
# At this point, the mental model should look like:
# [(x0, y0, ..), (x1, y1, ..), ..] in delayed form
parts = client.compute(parts)
await distributed.wait(parts) # async wait for parts to be computed
for part in parts:
assert part.status == 'finished', part.status
# Preserving the partition order for prediction.
self.partition_order = {}
for i, part in enumerate(parts):
self.partition_order[part.key] = i
key_to_partition = {part.key: part for part in parts}
who_has = await client.scheduler.who_has(keys=[part.key for part in parts])
worker_map: Dict[str, "distributed.Future"] = defaultdict(list)
for key, workers in who_has.items():
worker_map[next(iter(workers))].append(key_to_partition[key])
self.worker_map = worker_map
self.meta_names = meta_names
if feature_weights is None:
self.feature_weights = None
else:
self.feature_weights = await client.compute(feature_weights).result()
return self
def create_fn_args(self, worker_addr: str) -> Dict[str, Any]:
'''Create a dictionary of objects that can be pickled for function
arguments.
'''
return {'feature_names': self.feature_names,
'feature_types': self.feature_types,
'feature_weights': self.feature_weights,
'meta_names': self.meta_names,
'missing': self.missing,
'parts': self.worker_map.get(worker_addr, None),
'is_quantile': self.is_quantile}
_DataParts = List[Tuple[Any, Optional[Any], Optional[Any], Optional[Any], Optional[Any],
Optional[Any], Optional[Any]]]
def _get_worker_parts_ordered(
meta_names: List[str], list_of_parts: _DataParts
) -> _DataParts:
# List of partitions like: [(x3, y3, w3, m3, ..), ..], order is not preserved.
assert isinstance(list_of_parts, list)
result = []
for i, _ in enumerate(list_of_parts):
data = list_of_parts[i][0]
labels = None
weights = None
base_margin = None
qid = None
label_lower_bound = None
label_upper_bound = None
# Iterate through all possible meta info, brings small overhead as in xgboost
# there are constant number of meta info available.
for j, blob in enumerate(list_of_parts[i][1:]):
if meta_names[j] == 'labels':
labels = blob
elif meta_names[j] == 'weights':
weights = blob
elif meta_names[j] == 'base_margin':
base_margin = blob
elif meta_names[j] == 'qid':
qid = blob
elif meta_names[j] == 'label_lower_bound':
label_lower_bound = blob
elif meta_names[j] == 'label_upper_bound':
label_upper_bound = blob
else:
raise ValueError('Unknown metainfo:', meta_names[j])
result.append((data, labels, weights, base_margin, qid, label_lower_bound,
label_upper_bound))
return result
def _unzip(list_of_parts: _DataParts) -> List[Tuple[Any, ...]]:
return list(zip(*list_of_parts))
def _get_worker_parts(
list_of_parts: _DataParts, meta_names: List[str]
) -> List[Tuple[Any, ...]]:
partitions = _get_worker_parts_ordered(meta_names, list_of_parts)
partitions_unzipped = _unzip(partitions)
return partitions_unzipped
class DaskPartitionIter(DataIter): # pylint: disable=R0902
"""A data iterator for `DaskDeviceQuantileDMatrix`."""
def __init__(
self,
data: Tuple[Any, ...],
label: Optional[Tuple[Any, ...]] = None,
weight: Optional[Tuple[Any, ...]] = None,
base_margin: Optional[Tuple[Any, ...]] = None,
qid: Optional[Tuple[Any, ...]] = None,
label_lower_bound: Optional[Tuple[Any, ...]] = None,
label_upper_bound: Optional[Tuple[Any, ...]] = None,
feature_names: Optional[Union[str, List[str]]] = None,
feature_types: Optional[Union[Any, List[Any]]] = None
) -> None:
self._data = data
self._labels = label
self._weights = weight
self._base_margin = base_margin
self._qid = qid
self._label_lower_bound = label_lower_bound
self._label_upper_bound = label_upper_bound
self._feature_names = feature_names
self._feature_types = feature_types
assert isinstance(self._data, Sequence)
types = (Sequence, type(None))
assert isinstance(self._labels, types)
assert isinstance(self._weights, types)
assert isinstance(self._base_margin, types)
assert isinstance(self._label_lower_bound, types)
assert isinstance(self._label_upper_bound, types)
self._iter = 0 # set iterator to 0
super().__init__()
def data(self) -> Any:
'''Utility function for obtaining current batch of data.'''
return self._data[self._iter]
def labels(self) -> Any:
'''Utility function for obtaining current batch of label.'''
if self._labels is not None:
return self._labels[self._iter]
return None
def weights(self) -> Any:
'''Utility function for obtaining current batch of label.'''
if self._weights is not None:
return self._weights[self._iter]
return None
def qids(self) -> Any:
'''Utility function for obtaining current batch of query id.'''
if self._qid is not None:
return self._qid[self._iter]
return None
def base_margins(self) -> Any:
'''Utility function for obtaining current batch of base_margin.'''
if self._base_margin is not None:
return self._base_margin[self._iter]
return None
def label_lower_bounds(self) -> Any:
'''Utility function for obtaining current batch of label_lower_bound.
'''
if self._label_lower_bound is not None:
return self._label_lower_bound[self._iter]
return None
def label_upper_bounds(self) -> Any:
'''Utility function for obtaining current batch of label_upper_bound.
'''
if self._label_upper_bound is not None:
return self._label_upper_bound[self._iter]
return None
def reset(self) -> None:
'''Reset the iterator'''
self._iter = 0
def next(self, input_data: Callable) -> int:
'''Yield next batch of data'''
if self._iter == len(self._data):
# Return 0 when there's no more batch.
return 0
feature_names: Optional[Union[List[str], str]] = None
if self._feature_names:
feature_names = self._feature_names
else:
if hasattr(self.data(), 'columns'):
feature_names = self.data().columns.format()
else:
feature_names = None
input_data(data=self.data(), label=self.labels(),
weight=self.weights(), group=None,
qid=self.qids(),
label_lower_bound=self.label_lower_bounds(),
label_upper_bound=self.label_upper_bounds(),
feature_names=feature_names,
feature_types=self._feature_types)
self._iter += 1
return 1
class DaskDeviceQuantileDMatrix(DaskDMatrix):
'''Specialized data type for `gpu_hist` tree method. This class is used to
reduce the memory usage by eliminating data copies. Internally the all
partitions/chunks of data are merged by weighted GK sketching. So the
number of partitions from dask may affect training accuracy as GK generates
bounded error for each merge.
.. versionadded:: 1.2.0
Parameters
----------
max_bin : Number of bins for histogram construction.
'''
def __init__(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
missing: float = None,
weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
feature_names: Optional[Union[str, List[str]]] = None,
feature_types: Optional[Union[Any, List[Any]]] = None,
max_bin: int = 256
) -> None:
super().__init__(
client=client,
data=data,
label=label,
missing=missing,
feature_weights=feature_weights,
weight=weight,
base_margin=base_margin,
qid=qid,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound,
feature_names=feature_names,
feature_types=feature_types
)
self.max_bin = max_bin
self.is_quantile = True
def create_fn_args(self, worker_addr: str) -> Dict[str, Any]:
args = super().create_fn_args(worker_addr)
args['max_bin'] = self.max_bin
return args
def _create_device_quantile_dmatrix(
feature_names: Optional[Union[str, List[str]]],
feature_types: Optional[Union[Any, List[Any]]],
feature_weights: Optional[Any],
meta_names: List[str],
missing: float,
parts: Optional[_DataParts],
max_bin: int
) -> DeviceQuantileDMatrix:
worker = distributed.get_worker()
if parts is None:
msg = 'worker {address} has an empty DMatrix. '.format(
address=worker.address)
LOGGER.warning(msg)
import cupy
d = DeviceQuantileDMatrix(cupy.zeros((0, 0)),
feature_names=feature_names,
feature_types=feature_types,
max_bin=max_bin)
return d
(data, labels, weights, base_margin, qid,
label_lower_bound, label_upper_bound) = _get_worker_parts(
parts, meta_names)
it = DaskPartitionIter(data=data, label=labels, weight=weights,
base_margin=base_margin,
qid=qid,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound)
dmatrix = DeviceQuantileDMatrix(it,
missing=missing,
feature_names=feature_names,
feature_types=feature_types,
nthread=worker.nthreads,
max_bin=max_bin)
dmatrix.set_info(feature_weights=feature_weights)
return dmatrix
def _create_dmatrix(
feature_names: Optional[Union[str, List[str]]],
feature_types: Optional[Union[Any, List[Any]]],
feature_weights: Optional[Any],
meta_names: List[str],
missing: float,
parts: Optional[_DataParts]
) -> DMatrix:
'''Get data that local to worker from DaskDMatrix.
Returns
-------
A DMatrix object.
'''
worker = distributed.get_worker()
list_of_parts = parts
if list_of_parts is None:
msg = 'worker {address} has an empty DMatrix. '.format(address=worker.address)
LOGGER.warning(msg)
d = DMatrix(numpy.empty((0, 0)),
feature_names=feature_names,
feature_types=feature_types)
return d
T = TypeVar('T')
def concat_or_none(data: Tuple[Optional[T], ...]) -> Optional[T]:
if any([part is None for part in data]):
return None
return concat(data)
(data, labels, weights, base_margin, qid,
label_lower_bound, label_upper_bound) = _get_worker_parts(list_of_parts, meta_names)
_labels = concat_or_none(labels)
_weights = concat_or_none(weights)
_base_margin = concat_or_none(base_margin)
_qid = concat_or_none(qid)
_label_lower_bound = concat_or_none(label_lower_bound)
_label_upper_bound = concat_or_none(label_upper_bound)
_data = concat(data)
dmatrix = DMatrix(
_data,
_labels,
missing=missing,
feature_names=feature_names,
feature_types=feature_types,
nthread=worker.nthreads
)
dmatrix.set_info(
base_margin=_base_margin, qid=_qid, weight=_weights,
label_lower_bound=_label_lower_bound,
label_upper_bound=_label_upper_bound,
feature_weights=feature_weights
)
return dmatrix
def _dmatrix_from_list_of_parts(
is_quantile: bool, **kwargs: Any
) -> Union[DMatrix, DeviceQuantileDMatrix]:
if is_quantile:
return _create_device_quantile_dmatrix(**kwargs)
return _create_dmatrix(**kwargs)
async def _get_rabit_args(n_workers: int, client: "distributed.Client") -> List[bytes]:
'''Get rabit context arguments from data distribution in DaskDMatrix.'''
env = await client.run_on_scheduler(_start_tracker, n_workers)
rabit_args = [('%s=%s' % item).encode() for item in env.items()]
return rabit_args
# train and predict methods are supposed to be "functional", which meets the
# dask paradigm. But as a side effect, the `evals_result` in single-node API
# is no longer supported since it mutates the input parameter, and it's not
# intuitive to sync the mutation result. Therefore, a dictionary containing
# evaluation history is instead returned.
def _get_workers_from_data(
dtrain: DaskDMatrix,
evals: Optional[List[Tuple[DaskDMatrix, str]]]
) -> Set[str]:
X_worker_map: Set[str] = set(dtrain.worker_map.keys())
if evals:
for e in evals:
assert len(e) == 2
assert isinstance(e[0], DaskDMatrix) and isinstance(e[1], str)
worker_map = set(e[0].worker_map.keys())
X_worker_map = X_worker_map.union(worker_map)
return X_worker_map
async def _train_async(
client: "distributed.Client",
global_config: Dict[str, Any],
params: Dict[str, Any],
dtrain: DaskDMatrix,
num_boost_round: int,
evals: Optional[List[Tuple[DaskDMatrix, str]]],
obj: Optional[Objective],
feval: Optional[Metric],
early_stopping_rounds: Optional[int],
verbose_eval: Union[int, bool],
xgb_model: Optional[Booster],
callbacks: Optional[List[TrainingCallback]]
) -> Optional[TrainReturnT]:
workers = list(_get_workers_from_data(dtrain, evals))
_rabit_args = await _get_rabit_args(len(workers), client)
def dispatched_train(
worker_addr: str,
rabit_args: List[bytes],
dtrain_ref: Dict,
dtrain_idt: int,
evals_ref: Dict
) -> Optional[Dict[str, Union[Booster, Dict]]]:
'''Perform training on a single worker. A local function prevents pickling.
'''
LOGGER.debug('Training on %s', str(worker_addr))
worker = distributed.get_worker()
with RabitContext(rabit_args), config.config_context(**global_config):
local_dtrain = _dmatrix_from_list_of_parts(**dtrain_ref)
local_evals = []
if evals_ref:
for ref, name, idt in evals_ref:
if idt == dtrain_idt:
local_evals.append((local_dtrain, name))
continue
local_evals.append((_dmatrix_from_list_of_parts(**ref), name))
local_history: Dict = {}
local_param = params.copy() # just to be consistent
msg = 'Overriding `nthreads` defined in dask worker.'
override = ['nthread', 'n_jobs']
for p in override:
val = local_param.get(p, None)
if val is not None and val != worker.nthreads:
LOGGER.info(msg)
else:
local_param[p] = worker.nthreads
bst = worker_train(params=local_param,
dtrain=local_dtrain,
num_boost_round=num_boost_round,
evals_result=local_history,
evals=local_evals,
obj=obj,
feval=feval,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
xgb_model=xgb_model,
callbacks=callbacks)
ret: Optional[Dict[str, Union[Booster, Dict]]] = {
'booster': bst, 'history': local_history}
if local_dtrain.num_row() == 0:
ret = None
return ret
# Note for function purity:
# XGBoost is deterministic in most of the cases, which means train function is
# supposed to be idempotent. One known exception is gblinear with shotgun updater.
# We haven't been able to do a full verification so here we keep pure to be False.
futures = []
for i, worker_addr in enumerate(workers):
if evals:
evals_per_worker = [(e.create_fn_args(worker_addr), name, id(e))
for e, name in evals]
else:
evals_per_worker = []
f = client.submit(dispatched_train,
worker_addr,
_rabit_args,
dtrain.create_fn_args(workers[i]),
id(dtrain),
evals_per_worker,
pure=False,
workers=[worker_addr])
futures.append(f)
results = await client.gather(futures)
return list(filter(lambda ret: ret is not None, results))[0]
def train(
client: "distributed.Client",
params: Dict[str, Any],
dtrain: DaskDMatrix,
num_boost_round: int = 10,
evals: Optional[List[Tuple[DaskDMatrix, str]]] = None,
obj: Optional[Objective] = None,
feval: Optional[Metric] = None,
early_stopping_rounds: Optional[int] = None,
xgb_model: Optional[Booster] = None,
verbose_eval: Union[int, bool] = True,
callbacks: Optional[List[TrainingCallback]] = None
) -> Any:
'''Train XGBoost model.
.. versionadded:: 1.0.0
.. note::
Other parameters are the same as `xgboost.train` except for `evals_result`, which
is returned as part of function return value instead of argument.
Parameters
----------
client :
Specify the dask client used for training. Use default client returned from dask
if it's set to None.
Returns
-------
results: dict
A dictionary containing trained booster and evaluation history. `history` field
is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
'''
_assert_dask_support()
client = _xgb_get_client(client)
# Get global configuration before transferring computation to another thread or
# process.
global_config = config.get_config()
return client.sync(_train_async,
client=client,
global_config=global_config,
num_boost_round=num_boost_round,
obj=obj,
feval=feval,
params=params,
dtrain=dtrain,
evals=evals,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
xgb_model=xgb_model,
callbacks=callbacks)
async def _direct_predict_impl(
client: "distributed.Client",
data: _DaskCollection,
predict_fn: Callable
) -> _DaskCollection:
if isinstance(data, da.Array):
predictions = await client.submit(
da.map_blocks,
predict_fn, data, False, drop_axis=1,
dtype=numpy.float32
).result()
return predictions
if isinstance(data, dd.DataFrame):
predictions = await client.submit(
dd.map_partitions,
predict_fn, data, True,
meta=dd.utils.make_meta({'prediction': 'f4'})
).result()
return predictions.iloc[:, 0]
raise TypeError('data of type: ' + str(type(data)) +
' is not supported by direct prediction')
# pylint: disable=too-many-statements
async def _predict_async(
client: "distributed.Client",
global_config: Dict[str, Any],
model: Union[Booster, Dict],
data: _DaskCollection,
output_margin: bool,
missing: float,
pred_leaf: bool,
pred_contribs: bool,
approx_contribs: bool,
pred_interactions: bool,
validate_features: bool
) -> _DaskCollection:
if isinstance(model, Booster):
booster = model
elif isinstance(model, dict):
booster = model['booster']
else:
raise TypeError(_expect([Booster, dict], type(model)))
if not isinstance(data, (DaskDMatrix, da.Array, dd.DataFrame)):
raise TypeError(_expect([DaskDMatrix, da.Array, dd.DataFrame],
type(data)))
def mapped_predict(partition: Any, is_df: bool) -> Any:
worker = distributed.get_worker()
with config.config_context(**global_config):
booster.set_param({'nthread': worker.nthreads})
m = DMatrix(partition, missing=missing, nthread=worker.nthreads)
predt = booster.predict(
data=m,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features
)
if is_df:
if lazy_isinstance(partition, 'cudf', 'core.dataframe.DataFrame'):
import cudf
predt = cudf.DataFrame(predt, columns=['prediction'])
else:
predt = DataFrame(predt, columns=['prediction'])
return predt
# Predict on dask collection directly.
if isinstance(data, (da.Array, dd.DataFrame)):
return await _direct_predict_impl(client, data, mapped_predict)
# Prediction on dask DMatrix.
worker_map = data.worker_map
partition_order = data.partition_order
feature_names = data.feature_names
feature_types = data.feature_types
missing = data.missing
meta_names = data.meta_names
def dispatched_predict(
worker_id: int, list_of_orders: List[int], list_of_parts: _DataParts
) -> List[Tuple[Tuple["dask.delayed.Delayed", int], int]]:
'''Perform prediction on each worker.'''
LOGGER.debug('Predicting on %d', worker_id)
with config.config_context(**global_config):
worker = distributed.get_worker()
list_of_parts = _get_worker_parts_ordered(meta_names, list_of_parts)
predictions = []
booster.set_param({'nthread': worker.nthreads})
for i, parts in enumerate(list_of_parts):
(data, _, _, base_margin, _, _, _) = parts
order = list_of_orders[i]
local_part = DMatrix(
data,
base_margin=base_margin,
feature_names=feature_names,
feature_types=feature_types,
missing=missing,
nthread=worker.nthreads
)
predt = booster.predict(
data=local_part,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features
)
columns = 1 if len(predt.shape) == 1 else predt.shape[1]
ret = ((dask.delayed(predt), columns), order) # pylint: disable=no-member
predictions.append(ret)
return predictions
def dispatched_get_shape(
worker_id: int, list_of_orders: List[int], list_of_parts: _DataParts
) -> List[Tuple[int, int]]:
'''Get shape of data in each worker.'''
LOGGER.debug('Get shape on %d', worker_id)
list_of_parts = _get_worker_parts_ordered(meta_names, list_of_parts)
shapes = []
for i, parts in enumerate(list_of_parts):
(data, _, _, _, _, _, _) = parts
shapes.append((data.shape, list_of_orders[i]))
return shapes
async def map_function(
func: Callable[[int, List[int], _DataParts], Any]
) -> List[Any]:
'''Run function for each part of the data.'''
futures = []
workers_address = list(worker_map.keys())
for wid, worker_addr in enumerate(workers_address):
worker_addr = workers_address[wid]
list_of_parts = worker_map[worker_addr]
list_of_orders = [partition_order[part.key] for part in list_of_parts]
f = client.submit(func, worker_id=wid,
list_of_orders=list_of_orders,
list_of_parts=list_of_parts,
pure=True, workers=[worker_addr])
assert isinstance(f, distributed.client.Future)
futures.append(f)
# Get delayed objects
results = await client.gather(futures)
# flatten into 1 dim list
results = [t for list_per_worker in results for t in list_per_worker]
# sort by order, l[0] is the delayed object, l[1] is its order
results = sorted(results, key=lambda l: l[1])
results = [predt for predt, order in results] # remove order
return results
results = await map_function(dispatched_predict)
shapes = await map_function(dispatched_get_shape)
# Constructing a dask array from list of numpy arrays
# See https://docs.dask.org/en/latest/array-creation.html
arrays = []
for i, shape in enumerate(shapes):
arrays.append(da.from_delayed(
results[i][0], shape=(shape[0],)
if results[i][1] == 1 else (shape[0], results[i][1]),
dtype=numpy.float32))
predictions = await da.concatenate(arrays, axis=0)
return predictions
def predict(
client: "distributed.Client",
model: Union[TrainReturnT, Booster],
data: Union[DaskDMatrix, _DaskCollection],
output_margin: bool = False,
missing: float = numpy.nan,
pred_leaf: bool = False,
pred_contribs: bool = False,
approx_contribs: bool = False,
pred_interactions: bool = False,
validate_features: bool = True
) -> Any:
'''Run prediction with a trained booster.
.. note::
Only default prediction mode is supported right now.
.. versionadded:: 1.0.0
Parameters
----------
client:
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model:
The trained model.
data:
Input data used for prediction. When input is a dataframe object,
prediction output is a series.
missing:
Used when input data is not DaskDMatrix. Specify the value
considered as missing.
Returns
-------
prediction: dask.array.Array/dask.dataframe.Series
'''
_assert_dask_support()
client = _xgb_get_client(client)
global_config = config.get_config()
return client.sync(
_predict_async, client, global_config, model, data,
output_margin=output_margin,
missing=missing,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features
)
async def _inplace_predict_async(
client: "distributed.Client",
global_config: Dict[str, Any],
model: Union[Booster, Dict],
data: _DaskCollection,
iteration_range: Tuple[int, int] = (0, 0),
predict_type: str = 'value',
missing: float = numpy.nan
) -> _DaskCollection:
client = _xgb_get_client(client)
if isinstance(model, Booster):
booster = model
elif isinstance(model, dict):
booster = model['booster']
else:
raise TypeError(_expect([Booster, dict], type(model)))
if not isinstance(data, (da.Array, dd.DataFrame)):
raise TypeError(_expect([da.Array, dd.DataFrame], type(data)))
def mapped_predict(data: Any, is_df: bool) -> Any:
worker = distributed.get_worker()
config.set_config(**global_config)
booster.set_param({'nthread': worker.nthreads})
prediction = booster.inplace_predict(
data,
iteration_range=iteration_range,
predict_type=predict_type,
missing=missing)
if is_df:
if lazy_isinstance(data, 'cudf.core.dataframe', 'DataFrame'):
import cudf
prediction = cudf.DataFrame({'prediction': prediction},
dtype=numpy.float32)
else:
# If it's from pandas, the partition is a numpy array
prediction = DataFrame(prediction, columns=['prediction'],
dtype=numpy.float32)
return prediction
return await _direct_predict_impl(client, data, mapped_predict)
def inplace_predict(
client: "distributed.Client",
model: Union[TrainReturnT, Booster],
data: _DaskCollection,
iteration_range: Tuple[int, int] = (0, 0),
predict_type: str = 'value',
missing: float = numpy.nan
) -> Any:
'''Inplace prediction.
.. versionadded:: 1.1.0
Parameters
----------
client:
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model:
The trained model.
iteration_range:
Specify the range of trees used for prediction.
predict_type:
* 'value': Normal prediction result.
* 'margin': Output the raw untransformed margin value.
missing:
Value in the input data which needs to be present as a missing
value. If None, defaults to np.nan.
Returns
-------
prediction
'''
_assert_dask_support()
client = _xgb_get_client(client)
global_config = config.get_config()
return client.sync(_inplace_predict_async, client, global_config, model=model,
data=data,
iteration_range=iteration_range,
predict_type=predict_type,
missing=missing)
async def _evaluation_matrices(
client: "distributed.Client",
validation_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]],
sample_weight: Optional[List[_DaskCollection]],
sample_qid: Optional[List[_DaskCollection]],
missing: float
) -> Optional[List[Tuple[DaskDMatrix, str]]]:
'''
Parameters
----------
validation_set: list of tuples
Each tuple contains a validation dataset including input X and label y.
E.g.:
.. code-block:: python
[(X_0, y_0), (X_1, y_1), ... ]
sample_weights: list of arrays
The weight vector for validation data.
Returns
-------
evals: list of validation DMatrix
'''
evals: Optional[List[Tuple[DaskDMatrix, str]]] = []
if validation_set is not None:
assert isinstance(validation_set, list)
for i, e in enumerate(validation_set):
w = sample_weight[i] if sample_weight is not None else None
qid = sample_qid[i] if sample_qid is not None else None
dmat = await DaskDMatrix(client=client, data=e[0], label=e[1],
weight=w, missing=missing, qid=qid)
assert isinstance(evals, list)
evals.append((dmat, 'validation_{}'.format(i)))
else:
evals = None
return evals
class DaskScikitLearnBase(XGBModel):
'''Base class for implementing scikit-learn interface with Dask'''
_client = None
# pylint: disable=arguments-differ
@_deprecate_positional_args
async def _predict_async(
self, data: _DaskCollection,
output_margin: bool = False,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None
) -> Any:
test_dmatrix = await DaskDMatrix(
client=self.client, data=data, base_margin=base_margin,
missing=self.missing
)
pred_probs = await predict(client=self.client,
model=self.get_booster(), data=test_dmatrix,
output_margin=output_margin,
validate_features=validate_features)
return pred_probs
def predict(
self,
data: _DaskCollection,
output_margin: bool = False,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None
) -> Any:
_assert_dask_support()
msg = '`ntree_limit` is not supported on dask, use model slicing instead.'
assert ntree_limit is None, msg
return self.client.sync(self._predict_async, data,
output_margin=output_margin,
validate_features=validate_features,
base_margin=base_margin)
def __await__(self) -> Awaitable[Any]:
# Generate a coroutine wrapper to make this class awaitable.
async def _() -> Awaitable[Any]:
return self
return self.client.sync(_).__await__()
@property
def client(self) -> "distributed.Client":
'''The dask client used in this model.'''
client = _xgb_get_client(self._client)
return client
@client.setter
def client(self, clt: "distributed.Client") -> None:
self._client = clt
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost.""", ["estimators", "model"]
)
class DaskXGBRegressor(DaskScikitLearnBase, XGBRegressorBase):
# pylint: disable=missing-class-docstring
async def _fit_async(
self,
X: _DaskCollection,
y: _DaskCollection,
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]],
eval_metric: Optional[Union[str, List[str], Metric]],
sample_weight_eval_set: Optional[List[_DaskCollection]],
early_stopping_rounds: int,
verbose: bool,
xgb_model: Optional[Union[Booster, XGBModel]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[List[TrainingCallback]],
) -> _DaskCollection:
dtrain = await DaskDMatrix(
client=self.client,
data=X,
label=y,
weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
missing=self.missing,
)
params = self.get_xgb_params()
evals = await _evaluation_matrices(
self.client, eval_set, sample_weight_eval_set, None, self.missing
)
if callable(self.objective):
obj = _objective_decorator(self.objective)
else:
obj = None
model, metric, params = self._configure_fit(
booster=xgb_model, eval_metric=eval_metric, params=params
)
results = await train(
client=self.client,
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
feval=metric,
obj=obj,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results["booster"]
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results["history"]
return self
# pylint: disable=missing-docstring
@_deprecate_positional_args
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, List[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: bool = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[List[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[List[TrainingCallback]] = None
) -> "DaskXGBRegressor":
_assert_dask_support()
return self.client.sync(
self._fit_async,
X=X,
y=y,
sample_weight=sample_weight,
base_margin=base_margin,
eval_set=eval_set,
eval_metric=eval_metric,
sample_weight_eval_set=sample_weight_eval_set,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
xgb_model=xgb_model,
feature_weights=feature_weights,
callbacks=callbacks,
)
@xgboost_model_doc(
'Implementation of the scikit-learn API for XGBoost classification.',
['estimators', 'model'])
class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):
# pylint: disable=missing-class-docstring
async def _fit_async(
self, X: _DaskCollection, y: _DaskCollection,
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]],
eval_metric: Optional[Union[str, List[str], Metric]],
sample_weight_eval_set: Optional[List[_DaskCollection]],
early_stopping_rounds: int,
verbose: bool,
xgb_model: Optional[Union[Booster, XGBModel]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[List[TrainingCallback]]
) -> "DaskXGBClassifier":
dtrain = await DaskDMatrix(client=self.client,
data=X,
label=y,
weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
missing=self.missing)
params = self.get_xgb_params()
# pylint: disable=attribute-defined-outside-init
if isinstance(y, (da.Array)):
self.classes_ = await self.client.compute(da.unique(y))
else:
self.classes_ = await self.client.compute(y.drop_duplicates())
self.n_classes_ = len(self.classes_)
if self.n_classes_ > 2:
params["objective"] = "multi:softprob"
params['num_class'] = self.n_classes_
else:
params["objective"] = "binary:logistic"
evals = await _evaluation_matrices(self.client, eval_set,
sample_weight_eval_set,
None,
self.missing)
if callable(self.objective):
obj = _objective_decorator(self.objective)
else:
obj = None
model, metric, params = self._configure_fit(
booster=xgb_model,
eval_metric=eval_metric,
params=params
)
results = await train(
client=self.client,
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
obj=obj,
feval=metric,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results['booster']
if not callable(self.objective):
self.objective = params["objective"]
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results['history']
return self
@_deprecate_positional_args
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, List[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: bool = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[List[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[List[TrainingCallback]] = None
) -> "DaskXGBClassifier":
_assert_dask_support()
return self.client.sync(
self._fit_async,
X=X,
y=y,
sample_weight=sample_weight,
base_margin=base_margin,
eval_set=eval_set,
eval_metric=eval_metric,
sample_weight_eval_set=sample_weight_eval_set,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
xgb_model=xgb_model,
feature_weights=feature_weights,
callbacks=callbacks,
)
async def _predict_proba_async(
self,
X: _DaskCollection,
validate_features: bool,
output_margin: bool,
base_margin: Optional[_DaskCollection]
) -> _DaskCollection:
test_dmatrix = await DaskDMatrix(
client=self.client, data=X, base_margin=base_margin,
missing=self.missing
)
pred_probs = await predict(client=self.client,
model=self.get_booster(),
data=test_dmatrix,
validate_features=validate_features,
output_margin=output_margin)
return _cls_predict_proba(self.objective, pred_probs, da.vstack)
# pylint: disable=missing-docstring
def predict_proba(
self,
X: _DaskCollection,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
output_margin: bool = False,
base_margin: Optional[_DaskCollection] = None
) -> Any:
_assert_dask_support()
msg = '`ntree_limit` is not supported on dask, use model slicing instead.'
assert ntree_limit is None, msg
return self.client.sync(
self._predict_proba_async,
X=X,
validate_features=validate_features,
output_margin=output_margin,
base_margin=base_margin
)
async def _predict_async(
self, data: _DaskCollection,
output_margin: bool = False,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None
) -> _DaskCollection:
pred_probs = await super()._predict_async(
data, output_margin, validate_features, base_margin
)
if output_margin:
return pred_probs
if self.n_classes_ == 2:
preds = (pred_probs > 0.5).astype(int)
else:
preds = da.argmax(pred_probs, axis=1)
return preds
@xgboost_model_doc(
"Implementation of the Scikit-Learn API for XGBoost Ranking.",
["estimators", "model"],
end_note="""
Note
----
For dask implementation, group is not supported, use qid instead.
""",
)
class DaskXGBRanker(DaskScikitLearnBase):
def __init__(self, objective: str = "rank:pairwise", **kwargs: Any):
if callable(objective):
raise ValueError("Custom objective function not supported by XGBRanker.")
super().__init__(objective=objective, kwargs=kwargs)
async def _fit_async(
self,
X: _DaskCollection,
y: _DaskCollection,
qid: Optional[_DaskCollection],
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]],
sample_weight_eval_set: Optional[List[_DaskCollection]],
eval_qid: Optional[List[_DaskCollection]],
eval_metric: Optional[Union[str, List[str], Metric]],
early_stopping_rounds: int,
verbose: bool,
xgb_model: Optional[Union[XGBModel, Booster]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[List[TrainingCallback]],
) -> "DaskXGBRanker":
dtrain = await DaskDMatrix(
client=self.client,
data=X,
label=y,
qid=qid,
weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
missing=self.missing,
)
params = self.get_xgb_params()
evals = await _evaluation_matrices(
self.client,
eval_set,
sample_weight_eval_set,
sample_qid=eval_qid,
missing=self.missing,
)
if eval_metric is not None:
if callable(eval_metric):
raise ValueError(
'Custom evaluation metric is not yet supported for XGBRanker.')
model, metric, params = self._configure_fit(
booster=xgb_model,
eval_metric=eval_metric,
params=params
)
results = await train(
client=self.client,
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
feval=metric,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results["booster"]
self.evals_result_ = results["history"]
return self
@_deprecate_positional_args
def fit( # pylint: disable=arguments-differ
self,
X: _DaskCollection,
y: _DaskCollection,
*,
group: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]] = None,
sample_weight_eval_set: Optional[List[_DaskCollection]] = None,
eval_group: Optional[List[_DaskCollection]] = None,
eval_qid: Optional[List[_DaskCollection]] = None,
eval_metric: Optional[Union[str, List[str], Metric]] = None,
early_stopping_rounds: int = None,
verbose: bool = False,
xgb_model: Optional[Union[XGBModel, Booster]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[List[TrainingCallback]] = None
) -> "DaskXGBRanker":
_assert_dask_support()
msg = "Use `qid` instead of `group` on dask interface."
if not (group is None and eval_group is None):
raise ValueError(msg)
if qid is None:
raise ValueError("`qid` is required for ranking.")
return self.client.sync(
self._fit_async,
X=X,
y=y,
qid=qid,
sample_weight=sample_weight,
base_margin=base_margin,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
eval_qid=eval_qid,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
xgb_model=xgb_model,
feature_weights=feature_weights,
callbacks=callbacks,
)
# FIXME(trivialfis): arguments differ due to additional parameters like group and qid.
fit.__doc__ = XGBRanker.fit.__doc__
|
record2.py
|
import time
import os
from threading import Thread
from bluepy.btle import BTLEException
from bluepy.sensortag import SensorTag
from config import SENSOR_TAG_LIST
OUT_FILE = "lux.csv"
TIME_BETWEEN_READS = 5
TIME_BETWEEN_WRITES = 10
TIME_BETWEEN_RETRY = 5
LUX_READINGS = []
def get_light_and_battery(tag):
light = -1
battery = -1
try:
light = tag.lightmeter.read()
battery = tag.battery.read()
except Exception as e:
print(e)
return int(light), int(battery)
def get_time():
return int(time.time())
def collect_lux_readings(label, ble_mac):
print(ble_mac, label, "starting collection thread")
print(ble_mac, label, "connecting...")
tag = None
while not tag:
try:
tag = SensorTag(ble_mac)
tag.lightmeter.enable()
tag.battery.enable()
time.sleep(1.0)
except Exception as e:
print(ble_mac, label, str(e))
print("will retry in %d seconds" % TIME_BETWEEN_RETRY)
time.sleep(TIME_BETWEEN_RETRY)
print(ble_mac, label, "connected")
while 1:
light, battery = get_light_and_battery(tag)
reading = {"timestamp": get_time(), "lux": light, "battery": battery, "label": label}
LUX_READINGS.append(reading)
time.sleep(TIME_BETWEEN_READS)
def process_readings():
print("starting processing thread")
while 1:
current_records_number = len(LUX_READINGS)
if current_records_number > 0:
if not os.path.isfile(OUT_FILE):
create_csv_file_with_header(OUT_FILE, sorted(LUX_READINGS[0].keys()))
i = 0
with open(OUT_FILE, 'a') as f:
while i < current_records_number:
values = []
readings = LUX_READINGS.pop()
with open(OUT_FILE, "a") as f:
for k in sorted(readings):
values.append(readings[k])
f.write(",".join([str(x) for x in values]) + "\n")
i += 1
time.sleep(TIME_BETWEEN_WRITES)
def create_csv_file_with_header(file_name, header):
header_line = ','.join(header)
print("creating file with header,", header)
with open(file_name, 'w') as f:
f.write(header_line + '\n')
def main():
start_time = int(time.time())
print('init time', start_time)
for sensor_tag in SENSOR_TAG_LIST:
Thread(target=collect_lux_readings, args=(sensor_tag["label"], sensor_tag["ble_mac"])).start()
time.sleep(1)
process_readings()
if __name__ == "__main__":
main()
|
agents.py
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os.path
import threading
import time
try:
import queue # Python 3.x
except ImportError:
import Queue as queue
from cloudify.logs import create_event_message_prefix
from ..cli import cfy
from ..execution_events_fetcher import (wait_for_execution,
WAIT_FOR_EXECUTION_SLEEP_INTERVAL)
from ..exceptions import CloudifyCliError
from .. import env, utils
from ..table import print_data
_NODE_INSTANCE_STATE_STARTED = 'started'
AGENT_COLUMNS = ['id', 'ip', 'deployment', 'node', 'system', 'version',
'install_method', 'tenant_name']
MAX_TRACKER_THREADS = 20
@cfy.group(name='agents')
@cfy.options.common_options
@cfy.assert_manager_active()
def agents():
"""Handle a deployment's agents
"""
pass
@agents.command(name='list',
short_help='List installed agents [manager only]')
@cfy.options.common_options
@cfy.options.tenant_name(required=False,
mutually_exclusive_with=['all_tenants'],
resource_name_for_help='relevant deployment(s)')
@cfy.options.agent_filters
@cfy.options.all_tenants
@cfy.pass_logger
@cfy.pass_client()
@cfy.options.extended_view
def agents_list(agent_filters, tenant_name, client, logger, all_tenants):
utils.explicit_tenant_name_message(tenant_name, logger)
agent_filters['_all_tenants'] = all_tenants
agent_list = client.agents.list(**agent_filters)
logger.info('Listing agents...')
print_data(AGENT_COLUMNS, agent_list, 'Agents:')
@agents.command(name='install',
short_help='Install deployment agents [manager only]')
@cfy.options.common_options
@cfy.options.tenant_name(required=False,
mutually_exclusive_with=['all_tenants'],
resource_name_for_help='relevant deployment(s)')
@cfy.options.all_tenants
@cfy.options.stop_old_agent
@cfy.options.manager_ip
@cfy.options.manager_certificate
@cfy.options.agent_filters
@cfy.options.agents_wait
@cfy.options.install_agent_timeout
@cfy.pass_logger
@cfy.pass_client()
def install(agent_filters,
tenant_name,
logger,
client,
all_tenants,
stop_old_agent,
manager_ip,
manager_certificate,
wait,
install_agent_timeout):
"""Install agents on the hosts of existing deployments.
"""
if manager_certificate:
manager_certificate = _validate_certificate_file(manager_certificate)
params = dict()
# We only want to pass this arg if it's true, because of backwards
# compatibility with blueprints that don't support it
if stop_old_agent:
params['stop_old_agent'] = stop_old_agent
if manager_ip or manager_certificate:
params['manager_ip'] = manager_ip
params['manager_certificate'] = manager_certificate
params['install_agent_timeout'] = install_agent_timeout
utils.explicit_tenant_name_message(tenant_name, logger)
get_deployments_and_run_workers(
client, agent_filters, all_tenants,
logger, 'install_new_agents', wait, params)
def get_filters_map(
client,
logger,
agent_filters,
all_tenants):
# We need to analyze the filters.
#
# If node instance ID's are given, then we only process these node
# instances. The filters for deployment ID's and node ID's
# must not be specified.
#
# Otherwise, we perform an intersection between:
#
# * Union of all specified node ID's
# * Union of all specified deployment ID's
#
# This will end up being a mapping of this form:
#
# tenant1 |- dep1 |- nodeinstance_1
# |- |- nodeinstance_2
# |- |- nodeinstance_3
# tenant2 |- dep2 |- nodeinstance_4
# |- dep3 |- nodeinstance_5
# |- |- nodeinstance_6
#
# It is possible that one of the keys in the dict is 'None',
# and that means - the current tenant.
if agent_filters[cfy.AGENT_FILTER_NODE_INSTANCE_IDS] and (
agent_filters[cfy.AGENT_FILTER_DEPLOYMENT_ID] or
agent_filters[cfy.AGENT_FILTER_NODE_IDS]):
raise CloudifyCliError(
"If node instance ID's are provided, neither deployment ID's nor "
"deployment ID's are allowed.")
tenants_to_deployments = dict()
requested_node_instance_ids = agent_filters[
cfy.AGENT_FILTER_NODE_INSTANCE_IDS]
if requested_node_instance_ids:
candidate_ids = requested_node_instance_ids
candidates = client.node_instances.list(
id=candidate_ids,
_include=['id', 'tenant_name', 'deployment_id'],
_get_all_results=True, _all_tenants=True)
# Ensure that all requested node instance ID's actually exist.
missing = set(candidate_ids) - set([
node_instance.id for node_instance in candidates])
if missing:
raise CloudifyCliError("Node instances do not exist: "
"%s" % ', '.join(missing))
for node_instance in candidates:
tenant_map = tenants_to_deployments.setdefault(
node_instance['tenant_name'], dict())
deployment = tenant_map.setdefault(
node_instance['deployment_id'], dict())
deployment_node_instances = deployment.setdefault(
'node_instance_ids', list())
deployment_node_instances.append(node_instance.id)
else:
requested_deployment_ids = agent_filters[
cfy.AGENT_FILTER_DEPLOYMENT_ID]
requested_node_ids = agent_filters[cfy.AGENT_FILTER_NODE_IDS]
existing_deployments = client.deployments.list(
id=requested_deployment_ids or None,
_include=['id', 'tenant_name'],
_get_all_results=True,
_all_tenants=all_tenants)
# If at least one deployment ID was provided, then ensure
# all specified deployment ID's indeed exist.
if requested_deployment_ids:
missing = set(requested_deployment_ids) - set([
deployment.id for deployment in existing_deployments])
if missing:
raise CloudifyCliError("Deployments do not exist: "
"%s" % ', '.join(missing))
if requested_node_ids:
existing_nodes = client.nodes.list(
id=requested_node_ids,
_include=['id', 'deployment_id', 'tenant_name'],
_get_all_results=True,
_all_tenants=all_tenants
)
deps_with_req_nodes = set([
(node['tenant_name'], node.deployment_id)
for node in existing_nodes])
# Collect all deployments (from 'existing_deployments')
# that includes at least one of the requested nodes.
deployments_to_execute = list()
for deployment in existing_deployments:
if (deployment['tenant_name'], deployment.id) in \
deps_with_req_nodes:
deployments_to_execute.append(deployment)
else:
deployments_to_execute = existing_deployments
for deployment in deployments_to_execute:
tenant_map = tenants_to_deployments.setdefault(
deployment['tenant_name'], dict())
deployment_filters = tenant_map.setdefault(deployment.id, dict())
if requested_node_ids:
deployment_filters['node_ids'] = requested_node_ids
# If no deployment ID's were requested, then filter out deployments
# that have at least one Compute instance that is not in "started"
# state.
# We skip this check if specific deployment ID's were requested.
if not requested_deployment_ids:
for tenant_name in list(tenants_to_deployments):
tenant_client = env.get_rest_client(tenant_name=tenant_name)
deps_to_execute = tenants_to_deployments[tenant_name]
offset = 0
while True:
node_instances = tenant_client.node_instances.list(
_include=['id', 'host_id', 'deployment_id', 'state'],
_offset=offset,
)
# Find all unstarted Compute instances.
unstarted_computes = [
ni for ni in node_instances
if ni.id == ni.host_id and
ni.state != _NODE_INSTANCE_STATE_STARTED
]
for unstarted_ni in unstarted_computes:
logger.info("Node instance '%s' is not in '%s' state; "
"deployment '%s' will be skipped",
unstarted_ni.id,
_NODE_INSTANCE_STATE_STARTED,
unstarted_ni.deployment_id)
deps_to_execute.pop(unstarted_ni.deployment_id, None)
if not deps_to_execute:
del tenants_to_deployments[tenant_name]
size = node_instances.metadata.pagination.size
total = node_instances.metadata.pagination.total
if len(node_instances) < size or size == total:
break
offset += size
return tenants_to_deployments
def get_deployments_and_run_workers(
client,
agent_filters,
all_tenants,
logger,
workflow_id,
agents_wait,
parameters=None):
tenants_to_deployments = get_filters_map(
client, logger, agent_filters, all_tenants)
if not tenants_to_deployments:
raise CloudifyCliError("No eligible deployments found")
started_executions = []
requested_install_methods = agent_filters[cfy.AGENT_FILTER_INSTALL_METHODS]
for tenant_name, deployments in tenants_to_deployments.items():
tenant_client = env.get_rest_client(tenant_name=tenant_name)
for deployment_id, dep_filters in deployments.items():
execution_params = dep_filters.copy() # Shallow is fine.
if requested_install_methods:
execution_params['install_methods'] = requested_install_methods
if parameters:
execution_params.update(parameters)
execution = tenant_client.executions.start(
deployment_id, workflow_id, execution_params,
allow_custom_parameters=True)
started_executions.append((tenant_name, execution))
logger.info(
"Started execution for deployment '%s' on tenant '%s': %s",
deployment_id, tenant_name, execution.id
)
if not agents_wait:
logger.info("Executions started for all applicable deployments. "
"You may now use the 'cfy events list' command to "
"view the events associated with these executions.")
return
executions_queue = queue.Queue()
for execution_info in started_executions:
executions_queue.put(execution_info)
errors_summary = []
def _events_handler(events):
for event in events:
output = create_event_message_prefix(event)
if output:
logger.info(output)
def _tracker_thread():
while True:
try:
tenant_name, execution = executions_queue.get_nowait()
except queue.Empty:
break
try:
tenant_client = env.get_rest_client(tenant_name=tenant_name)
execution = wait_for_execution(
tenant_client, execution, events_handler=_events_handler,
include_logs=True, timeout=None)
if execution.error:
message = "Execution of workflow '{0}' for " \
"deployment '{1}' failed. [error={2}]".format(
workflow_id, execution.deployment_id,
execution.error)
logger.error(message)
errors_summary.append(message)
else:
logger.info("Finished executing workflow "
"'{0}' on deployment"
" '{1}'".format(workflow_id,
execution.deployment_id))
except Exception as ex:
# Log to the logger with a full traceback.
# Add to errors summary with only the exception message,
# to avoid clutter.
logger.exception("Failed waiting for execution {0} to "
"finish".format(execution.id))
errors_summary.append(
"Failed waiting for execution {0} to finish; error "
"message: %s" % str(ex)
)
threads = []
for i in range(MAX_TRACKER_THREADS):
thread = threading.Thread(target=_tracker_thread)
threads.append(thread)
thread.daemon = True
thread.start()
while True:
if all(not thread.is_alive() for thread in threads):
break
time.sleep(WAIT_FOR_EXECUTION_SLEEP_INTERVAL)
# No need to join any thread, because if we get to this point,
# all threads have already ended (see loop above).
if errors_summary:
raise CloudifyCliError("At least one execution ended with an error:\n"
"{0}".format('\n'.join(errors_summary)))
@agents.command(name='validate',
short_help='Validates the connection between the'
' Cloudify Manager and the live Cloudify Agents'
' (installed on remote hosts). [manager only]')
@cfy.options.common_options
@cfy.options.agent_filters
@cfy.options.tenant_name(required=False,
mutually_exclusive_with=['all_tenants'],
resource_name_for_help='relevant deployment(s)')
@cfy.options.all_tenants
@cfy.options.agents_wait
@cfy.pass_logger
@cfy.pass_client()
def validate(agent_filters,
tenant_name,
logger,
client,
all_tenants,
wait):
"""Validates the connection between the Cloudify Manager and the
live Cloudify Agents (installed on remote hosts).
"""
utils.explicit_tenant_name_message(tenant_name, logger)
get_deployments_and_run_workers(
client, agent_filters, all_tenants,
logger, 'validate_agents', wait, None)
def _validate_certificate_file(certificate):
if not os.path.exists(certificate):
raise IOError("Manager's SSL certificate file does not exist in the"
" following path: {0}".format(certificate))
try:
with open(certificate, 'r') as ssl_file:
manager_certificate = ssl_file.read()
except IOError as e:
raise IOError("Could not read Manager's SSL certificate from the given"
" path: {0}\nError:{1}".format(certificate, e))
return manager_certificate
|
executormarionette.py
|
import os
import socket
import threading
import traceback
import urlparse
import uuid
errors = None
marionette = None
pytestrunner = None
here = os.path.join(os.path.split(__file__)[0])
from .base import (ExecutorException,
Protocol,
RefTestExecutor,
RefTestImplementation,
TestExecutor,
TestharnessExecutor,
WdspecExecutor,
WdspecRun,
WebDriverProtocol,
extra_timeout,
testharness_result_converter,
reftest_result_converter,
strip_server)
from ..testrunner import Stop
from ..webdriver_server import GeckoDriverServer
def do_delayed_imports():
global errors, marionette
# Marionette client used to be called marionette, recently it changed
# to marionette_driver for unfathomable reasons
try:
import marionette
from marionette import errors
except ImportError:
from marionette_driver import marionette, errors
class MarionetteProtocol(Protocol):
def __init__(self, executor, browser, timeout_multiplier=1):
do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.marionette = None
self.marionette_port = browser.marionette_port
self.timeout_multiplier = timeout_multiplier
self.timeout = None
self.runner_handle = None
def setup(self, runner):
"""Connect to browser via Marionette."""
Protocol.setup(self, runner)
self.logger.debug("Connecting to Marionette on port %i" % self.marionette_port)
startup_timeout = marionette.Marionette.DEFAULT_STARTUP_TIMEOUT * self.timeout_multiplier
self.marionette = marionette.Marionette(host='localhost',
port=self.marionette_port,
socket_timeout=None,
startup_timeout=startup_timeout)
# XXX Move this timeout somewhere
self.logger.debug("Waiting for Marionette connection")
while True:
success = self.marionette.wait_for_port(60 * self.timeout_multiplier)
#When running in a debugger wait indefinitely for firefox to start
if success or self.executor.debug_info is None:
break
session_started = False
if success:
try:
self.logger.debug("Starting Marionette session")
self.marionette.start_session()
except Exception as e:
self.logger.warning("Starting marionette session failed: %s" % e)
else:
self.logger.debug("Marionette session started")
session_started = True
if not success or not session_started:
self.logger.warning("Failed to connect to Marionette")
self.executor.runner.send_message("init_failed")
else:
try:
self.after_connect()
except Exception:
self.logger.warning("Post-connection steps failed")
self.logger.error(traceback.format_exc())
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
try:
self.marionette._request_in_app_shutdown()
self.marionette.delete_session(send_request=False, reset_session_id=True)
except Exception:
# This is typically because the session never started
pass
if self.marionette is not None:
del self.marionette
@property
def is_alive(self):
"""Check if the Marionette connection is still active."""
try:
self.marionette.current_window_handle
except Exception:
return False
return True
def after_connect(self):
self.load_runner(self.executor.last_environment["protocol"])
def set_timeout(self, timeout):
"""Set the Marionette script timeout.
:param timeout: Script timeout in seconds
"""
self.marionette.timeout.script = timeout
self.timeout = timeout
def load_runner(self, protocol):
# Check if we previously had a test window open, and if we did make sure it's closed
self.marionette.execute_script("if (window.wrappedJSObject.win) {window.wrappedJSObject.win.close()}")
url = urlparse.urljoin(self.executor.server_url(protocol), "/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.runner_handle = self.marionette.current_window_handle
try:
self.marionette.navigate(url)
except Exception as e:
self.logger.critical(
"Loading initial page %s failed. Ensure that the "
"there are no other programs bound to this port and "
"that your firewall rules or network setup does not "
"prevent access.\e%s" % (url, traceback.format_exc(e)))
self.marionette.execute_script(
"document.title = '%s'" % threading.current_thread().name.replace("'", '"'))
def close_old_windows(self, protocol):
handles = self.marionette.window_handles
runner_handle = None
try:
handles.remove(self.runner_handle)
runner_handle = self.runner_handle
except ValueError:
# The runner window probably changed id but we can restore it
# This isn't supposed to happen, but marionette ids are not yet stable
# We assume that the first handle returned corresponds to the runner,
# but it hopefully doesn't matter too much if that assumption is
# wrong since we reload the runner in that tab anyway.
runner_handle = handles.pop(0)
for handle in handles:
self.marionette.switch_to_window(handle)
self.marionette.close()
self.marionette.switch_to_window(runner_handle)
if runner_handle != self.runner_handle:
self.load_runner(protocol)
def wait(self):
socket_timeout = self.marionette.client.sock.gettimeout()
if socket_timeout:
self.marionette.timeout.script = socket_timeout / 2
self.marionette.switch_to_window(self.runner_handle)
while True:
try:
self.marionette.execute_async_script("")
except errors.ScriptTimeoutException:
self.logger.debug("Script timed out")
pass
except (socket.timeout, IOError):
self.logger.debug("Socket closed")
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
def on_environment_change(self, old_environment, new_environment):
#Unset all the old prefs
for name in old_environment.get("prefs", {}).iterkeys():
value = self.executor.original_pref_values[name]
if value is None:
self.clear_user_pref(name)
else:
self.set_pref(name, value)
for name, value in new_environment.get("prefs", {}).iteritems():
self.executor.original_pref_values[name] = self.get_pref(name)
self.set_pref(name, value)
def set_pref(self, name, value):
if value.lower() not in ("true", "false"):
try:
int(value)
except ValueError:
value = "'%s'" % value
else:
value = value.lower()
self.logger.info("Setting pref %s (%s)" % (name, value))
script = """
let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
.getService(Components.interfaces.nsIPrefBranch);
let pref = '%s';
let type = prefInterface.getPrefType(pref);
let value = %s;
switch(type) {
case prefInterface.PREF_STRING:
prefInterface.setCharPref(pref, value);
break;
case prefInterface.PREF_BOOL:
prefInterface.setBoolPref(pref, value);
break;
case prefInterface.PREF_INT:
prefInterface.setIntPref(pref, value);
break;
}
""" % (name, value)
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.marionette.execute_script(script)
def clear_user_pref(self, name):
self.logger.info("Clearing pref %s" % (name))
script = """
let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
.getService(Components.interfaces.nsIPrefBranch);
let pref = '%s';
prefInterface.clearUserPref(pref);
""" % name
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.marionette.execute_script(script)
def get_pref(self, name):
script = """
let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
.getService(Components.interfaces.nsIPrefBranch);
let pref = '%s';
let type = prefInterface.getPrefType(pref);
switch(type) {
case prefInterface.PREF_STRING:
return prefInterface.getCharPref(pref);
case prefInterface.PREF_BOOL:
return prefInterface.getBoolPref(pref);
case prefInterface.PREF_INT:
return prefInterface.getIntPref(pref);
case prefInterface.PREF_INVALID:
return null;
}
""" % name
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.marionette.execute_script(script)
def clear_origin(self, url):
self.logger.info("Clearing origin %s" % (url))
script = """
let url = '%s';
let uri = Components.classes["@mozilla.org/network/io-service;1"]
.getService(Ci.nsIIOService)
.newURI(url);
let ssm = Components.classes["@mozilla.org/scriptsecuritymanager;1"]
.getService(Ci.nsIScriptSecurityManager);
let principal = ssm.createCodebasePrincipal(uri, {});
let qms = Components.classes["@mozilla.org/dom/quota-manager-service;1"]
.getService(Components.interfaces.nsIQuotaManagerService);
qms.clearStoragesForPrincipal(principal, "default", true);
""" % url
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.marionette.execute_script(script)
class ExecuteAsyncScriptRun(object):
def __init__(self, logger, func, protocol, url, timeout):
self.logger = logger
self.result = (None, None)
self.protocol = protocol
self.marionette = protocol.marionette
self.func = func
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
index = self.url.rfind("/storage/");
if index != -1:
# Clear storage
self.protocol.clear_origin(self.url)
timeout = self.timeout
try:
if timeout is not None:
if timeout + extra_timeout != self.protocol.timeout:
self.protocol.set_timeout(timeout + extra_timeout)
else:
# We just want it to never time out, really, but marionette doesn't
# make that possible. It also seems to time out immediately if the
# timeout is set too high. This works at least.
self.protocol.set_timeout(2**28 - 1)
except IOError:
self.logger.error("Lost marionette connection before starting test")
return Stop
executor = threading.Thread(target = self._run)
executor.start()
if timeout is not None:
wait_timeout = timeout + 2 * extra_timeout
else:
wait_timeout = None
flag = self.result_flag.wait(wait_timeout)
if self.result == (None, None):
self.logger.debug("Timed out waiting for a result")
self.result = False, ("EXTERNAL-TIMEOUT", None)
elif self.result[1] is None:
# We didn't get any data back from the test, so check if the
# browser is still responsive
if self.protocol.is_alive:
self.result = False, ("ERROR", None)
else:
self.result = False, ("CRASH", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.marionette, self.url, self.timeout)
except errors.ScriptTimeoutException:
self.logger.debug("Got a marionette timeout")
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, IOError):
# This can happen on a crash
# Also, should check after the test if the firefox process is still running
# and otherwise ignore any other result and set it to crash
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
class MarionetteTestharnessExecutor(TestharnessExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, debug_info=None, **kwargs):
"""Marionette-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = MarionetteProtocol(self, browser, timeout_multiplier)
self.script = open(os.path.join(here, "testharness_marionette.js")).read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
self.original_pref_values = {}
if marionette is None:
do_delayed_imports()
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
self.protocol.on_environment_change(self.last_environment, new_environment)
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.load_runner(new_environment["protocol"])
def do_test(self, test):
timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
else None)
success, data = ExecuteAsyncScriptRun(self.logger,
self.do_testharness,
self.protocol,
self.test_url(test),
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, marionette, url, timeout):
if self.close_after_done:
marionette.execute_script("if (window.wrappedJSObject.win) {window.wrappedJSObject.win.close()}")
self.protocol.close_old_windows(self.protocol)
if timeout is not None:
timeout_ms = str(timeout * 1000)
else:
timeout_ms = "null"
script = self.script % {"abs_url": url,
"url": strip_server(url),
"window_id": self.window_id,
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout_ms,
"explicit_timeout": timeout is None}
rv = marionette.execute_async_script(script, new_sandbox=False)
return rv
class MarionetteRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, reftest_internal=False,
reftest_screenshot="unexpected",
group_metadata=None, **kwargs):
"""Marionette-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = MarionetteProtocol(self, browser)
self.implementation = (InternalRefTestImplementation
if reftest_internal
else RefTestImplementation)(self)
self.implementation_kwargs = ({"screenshot": reftest_screenshot} if
reftest_internal else {})
self.close_after_done = close_after_done
self.has_window = False
self.original_pref_values = {}
self.group_metadata = group_metadata
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait_marionette.js")) as f:
self.wait_script = f.read()
def setup(self, runner):
super(self.__class__, self).setup(runner)
self.implementation.setup(**self.implementation_kwargs)
def teardown(self):
self.implementation.teardown()
handle = self.protocol.marionette.window_handles[0]
self.protocol.marionette.switch_to_window(handle)
super(self.__class__, self).teardown()
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
self.protocol.on_environment_change(self.last_environment, new_environment)
def do_test(self, test):
if not isinstance(self.implementation, InternalRefTestImplementation):
if self.close_after_done and self.has_window:
self.protocol.marionette.close()
self.protocol.marionette.switch_to_window(
self.protocol.marionette.window_handles[-1])
self.has_window = False
if not self.has_window:
self.protocol.marionette.execute_script(self.script)
self.protocol.marionette.switch_to_window(self.protocol.marionette.window_handles[-1])
self.has_window = True
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
timeout = self.timeout_multiplier * test.timeout if self.debug_info is None else None
test_url = self.test_url(test)
return ExecuteAsyncScriptRun(self.logger,
self._screenshot,
self.protocol,
test_url,
timeout).run()
def _screenshot(self, marionette, url, timeout):
marionette.navigate(url)
marionette.execute_async_script(self.wait_script)
screenshot = marionette.screenshot(full=False)
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
class InternalRefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
@property
def logger(self):
return self.executor.logger
def setup(self, screenshot="unexpected"):
data = {"screenshot": screenshot}
if self.executor.group_metadata is not None:
data["urlCount"] = {urlparse.urljoin(self.executor.server_url(key[0]), key[1]):value
for key, value in self.executor.group_metadata.get("url_count", {}).iteritems()
if value > 1}
self.executor.protocol.marionette.set_context(self.executor.protocol.marionette.CONTEXT_CHROME)
self.executor.protocol.marionette._send_message("reftest:setup", data)
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
references = self.get_references(test)
rv = self.executor.protocol.marionette._send_message("reftest:run",
{"test": self.executor.test_url(test),
"references": references,
"expected": test.expected(),
"timeout": test.timeout * 1000})["value"]
return rv
def get_references(self, node):
rv = []
for item, relation in node.references:
rv.append([self.executor.test_url(item), self.get_references(item), relation])
return rv
def teardown(self):
try:
self.executor.protocol.marionette._send_message("reftest:teardown", {})
self.executor.protocol.marionette.set_context(self.executor.protocol.marionette.CONTEXT_CONTENT)
except socket.error:
pass
class GeckoDriverProtocol(WebDriverProtocol):
server_cls = GeckoDriverServer
class MarionetteWdspecExecutor(WdspecExecutor):
protocol_cls = GeckoDriverProtocol
|
test_boostrap_link_via_app_data.py
|
from __future__ import absolute_import, unicode_literals
import os
import sys
from stat import S_IREAD, S_IRGRP, S_IROTH, S_IWUSR
from threading import Thread
import pytest
from virtualenv.discovery.py_info import PythonInfo
from virtualenv.info import fs_supports_symlink
from virtualenv.run import cli_run
from virtualenv.seed.wheels.embed import BUNDLE_FOLDER, BUNDLE_SUPPORT
from virtualenv.util.six import ensure_text
from virtualenv.util.subprocess import Popen
@pytest.mark.slow
@pytest.mark.parametrize("copies", [False, True] if fs_supports_symlink() else [True])
def test_seed_link_via_app_data(tmp_path, coverage_env, current_fastest, copies):
current = PythonInfo.current_system()
bundle_ver = BUNDLE_SUPPORT[current.version_release_str]
create_cmd = [
ensure_text(str(tmp_path / "en v")), # space in the name to ensure generated scripts work when path has space
"--seeder",
"app-data",
"--extra-search-dir",
ensure_text(str(BUNDLE_FOLDER)),
"--download",
"--pip",
bundle_ver["pip"].split("-")[1],
"--setuptools",
bundle_ver["setuptools"].split("-")[1],
"--reset-app-data",
"--creator",
current_fastest,
"-vv",
]
if not copies:
create_cmd.append("--symlink-app-data")
result = cli_run(create_cmd)
coverage_env()
assert result
# uninstalling pip/setuptools now should leave us with a ensure_safe_to_do env
site_package = result.creator.purelib
pip = site_package / "pip"
setuptools = site_package / "setuptools"
files_post_first_create = list(site_package.iterdir())
assert pip in files_post_first_create
assert setuptools in files_post_first_create
for pip_exe in [
result.creator.script_dir / "pip{}{}".format(suffix, result.creator.exe.suffix)
for suffix in (
"",
"{}".format(current.version_info.major),
"{}.{}".format(current.version_info.major, current.version_info.minor),
"-{}.{}".format(current.version_info.major, current.version_info.minor),
)
]:
assert pip_exe.exists()
process = Popen([ensure_text(str(pip_exe)), "--version", "--disable-pip-version-check"])
_, __ = process.communicate()
assert not process.returncode
remove_cmd = [
str(result.creator.script("pip")),
"--verbose",
"--disable-pip-version-check",
"uninstall",
"-y",
"setuptools",
]
process = Popen(remove_cmd)
_, __ = process.communicate()
assert not process.returncode
assert site_package.exists()
files_post_first_uninstall = list(site_package.iterdir())
assert pip in files_post_first_uninstall
assert setuptools not in files_post_first_uninstall
# check we can run it again and will work - checks both overwrite and reuse cache
result = cli_run(create_cmd)
coverage_env()
assert result
files_post_second_create = list(site_package.iterdir())
assert files_post_first_create == files_post_second_create
# Windows does not allow removing a executable while running it, so when uninstalling pip we need to do it via
# python -m pip
remove_cmd = [str(result.creator.exe), "-m", "pip"] + remove_cmd[1:]
process = Popen(remove_cmd + ["pip", "wheel"])
_, __ = process.communicate()
assert not process.returncode
# pip is greedy here, removing all packages removes the site-package too
if site_package.exists():
purelib = result.creator.purelib
patch_files = {purelib / "{}.{}".format("_virtualenv", i) for i in ("py", "pyc", "pth")}
patch_files.add(purelib / "__pycache__")
post_run = set(site_package.iterdir()) - patch_files
assert not post_run, "\n".join(str(i) for i in post_run)
if sys.version_info[0:2] == (3, 4) and os.environ.get(str("PIP_REQ_TRACKER")):
os.environ.pop(str("PIP_REQ_TRACKER"))
@pytest.fixture()
def read_only_app_data(temp_app_data):
temp_app_data.mkdir()
try:
os.chmod(str(temp_app_data), S_IREAD | S_IRGRP | S_IROTH)
yield temp_app_data
finally:
os.chmod(str(temp_app_data), S_IWUSR | S_IREAD)
@pytest.mark.skipif(sys.platform == "win32", reason="Windows only applies R/O to files")
def test_base_bootstrap_link_via_app_data_not_writable(tmp_path, current_fastest, read_only_app_data, monkeypatch):
dest = tmp_path / "venv"
result = cli_run(["--seeder", "app-data", "--creator", current_fastest, "--reset-app-data", "-vv", str(dest)])
assert result
@pytest.mark.slow
@pytest.mark.parametrize("pkg", ["pip", "setuptools", "wheel"])
def test_base_bootstrap_link_via_app_data_no(tmp_path, coverage_env, current_fastest, session_app_data, pkg):
create_cmd = [str(tmp_path), "--seeder", "app-data", "--no-{}".format(pkg)]
result = cli_run(create_cmd)
assert not (result.creator.purelib / pkg).exists()
for key in {"pip", "setuptools", "wheel"} - {pkg}:
assert (result.creator.purelib / key).exists()
def test_app_data_parallel_ok(tmp_path, temp_app_data):
exceptions = _run_parallel_threads(tmp_path)
assert not exceptions, "\n".join(exceptions)
def test_app_data_parallel_fail(tmp_path, temp_app_data, mocker):
mocker.patch("virtualenv.seed.embed.via_app_data.pip_install.base.PipInstall.build_image", side_effect=RuntimeError)
exceptions = _run_parallel_threads(tmp_path)
assert len(exceptions) == 2
for exception in exceptions:
assert exception.startswith("failed to build image wheel because:\nTraceback")
assert "RuntimeError" in exception, exception
def _run_parallel_threads(tmp_path):
exceptions = []
def _run(name):
try:
cli_run(["--seeder", "app-data", str(tmp_path / name), "--no-pip", "--no-setuptools"])
except Exception as exception: # noqa
as_str = str(exception)
exceptions.append(as_str)
threads = [Thread(target=_run, args=("env{}".format(i),)) for i in range(1, 3)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return exceptions
|
util.py
|
"""
:mod:`miraiml.util` provides utility functions that are used by higher level
modules.
"""
from threading import Thread
import random as rnd
import pickle
import string
import math
def load(path):
"""
A clean `pickle.load` wrapper for binary files.
:type path: string
:param path: The path of the binary file to be loaded.
:rtype: object
:returns: The loaded object.
"""
return pickle.load(open(path, 'rb'))
def dump(obj, path):
"""
Optimizes the process of writing objects on disc by triggering a thread.
:type obj: object
:param obj: The object to be dumped to the binary file.
:type path: string
:param path: The path of the binary file to be written.
"""
Thread(target=lambda: pickle.dump(obj, open(path, 'wb'))).start()
def sample_random_len(lst):
"""
Returns a sample of random size from the list ``lst``. The minimum length of
the returned list is 1.
:type lst: list
:param lst: A list containing the elements to be sampled.
:rtype: sampled_lst: list
:returns: The randomly sampled elements from ``lst``.
"""
if len(lst) == 0:
return []
return rnd.sample(lst, max(1, math.ceil(rnd.random()*len(lst))))
__valid_filename_chars__ = frozenset('-_.() %s%s' % (string.ascii_letters,
string.digits))
def is_valid_filename(filename):
"""
Tells whether a string can be used as a safe file name or not.
:type filename: str
:param filename: The file name.
:rtype: bool
:returns: Whether ``filename`` is a valid file name or not.
"""
filename = filename.strip()
if len(filename) == 0 or '..' in filename or filename == '.':
return False
for char in filename:
if char not in __valid_filename_chars__:
return False
return True
__valid_pipeline_chars__ = frozenset('_%s%s' % (string.ascii_letters,
string.digits))
def is_valid_pipeline_name(pipeline_name):
"""
Tells whether a string can be used to compose pipelines or not.
:type pipeline_name: str
:param pipeline_name: The file name.
:rtype: bool
:returns: Whether ``pipeline_name`` is a valid name or not.
"""
if len(pipeline_name) == 0 or '__' in pipeline_name \
or pipeline_name[0] in string.digits:
return False
for char in pipeline_name:
if char not in __valid_pipeline_chars__:
return False
return True
|
_websocket.py
|
from __future__ import absolute_import
import websocket
import threading
import logging
import json
import random
import time
class WebsocketHandler(object):
"""WebsocketHandler handles websocket connections to a ConnectorDB server. It allows
subscribing and unsubscribing from inputs/outputs. The handler also deals with dropped
connections, repeatedly attempting to reconnect to the server whenever connection is lost."""
"""The maximum time to wait between reconnection attempts"""
reconnect_time_max_seconds = 8 * 60.0
"""Multiply the wait time by this factor when a reconnect fails"""
reconnect_time_backoff_multiplier = 1.5
"""The time in seconds to wait before an initial attempt at reconnecting"""
reconnect_time_starting_seconds = 1.0
"""The time between pings that results in a connection timeout"""
connection_ping_timeout = 60 * 2
def __init__(self, server_url, basic_auth):
"""
The handler is initialized as follows::
from requests.auth import HTTPBasicAuth
req = HTTPBasicAuth(username,password)
ws = WebsocketHandler("https://connectordb.com",req)
"""
# The websocket is at /api/v1/websocket, and the server_url includes the /api/v1/
server_url += "websocket"
# First we must get the websocket URI from the server URL
self.ws_url = "wss://" + server_url[8:]
if server_url.startswith("http://"):
self.ws_url = "ws://" + server_url[7:]
self.setauth(basic_auth)
# Set up the variable which will hold all of the subscriptions
self.subscriptions = {}
self.subscription_lock = threading.Lock()
# The server periodically sends ping messages during websocket connection.
# we keep track of the pings so that we notice loss of connection
self.lastpingtime = time.time()
self.pingtimer = None
# Now set up the websocket
self.ws = None
self.ws_thread = None # The thread where the websocket runs
self.ws_openlock = threading.Lock()
self.ws_sendlock = threading.Lock()
# Set up the websocket status
self._status = "disconnected"
self._status_lock = threading.Lock()
# Set up the reconnect time
self.reconnect_time = self.reconnect_time_starting_seconds
# Set up the times that we were connected and disconnected. These allow for
# setting up reconnect delays correctly
self.connected_time = 0
self.disconnected_time = 0
def setauth(self,basic_auth):
""" setauth can be used during runtime to make sure that authentication is reset.
it can be used when changing passwords/apikeys to make sure reconnects succeed """
self.headers = []
# If we have auth
if basic_auth is not None:
# we use a cheap hack to get the basic auth header out of the auth object.
# This snippet ends up with us having an array of the necessary headers
# to perform authentication.
class auth_extractor():
def __init__(self):
self.headers = {}
extractor = auth_extractor()
basic_auth(extractor)
for header in extractor.headers:
self.headers.append("%s: %s" % (header, extractor.headers[header]))
@property
def status(self):
status = ""
with self._status_lock:
status = self._status
return status
@status.setter
def status(self, newstatus):
with self._status_lock:
self._status = newstatus
logging.debug("ConnectorDB:WS:STATUS: %s", newstatus)
def send(self, cmd):
"""Send the given command thru the websocket"""
with self.ws_sendlock:
self.ws.send(json.dumps(cmd))
def insert(self, stream, data):
"""Insert the given datapoints into the stream"""
self.send({"cmd": "insert", "arg": stream, "d": data})
def subscribe(self, stream, callback, transform=""):
"""Given a stream, a callback and an optional transform, sets up the subscription"""
if self.status == "disconnected" or self.status == "disconnecting" or self.status == "connecting":
self.connect()
if self.status is not "connected":
return False
logging.debug("Subscribing to %s", stream)
self.send({"cmd": "subscribe", "arg": stream, "transform": transform})
with self.subscription_lock:
self.subscriptions[stream + ":" + transform] = callback
return True
def unsubscribe(self, stream, transform=""):
"""Unsubscribe from the given stream (with the optional transform)"""
if self.status is not "connected":
return False
logging.debug("Unsubscribing from %s", stream)
self.send(
{"cmd": "unsubscribe",
"arg": stream,
"transform": transform})
self.subscription_lock.acquire()
del self.subscriptions[stream + ":" + transform]
if len(self.subscriptions) is 0:
self.subscription_lock.release()
self.disconnect()
else:
self.subscription_lock.release()
def connect(self):
"""Attempt to connect to the websocket - and returns either True or False depending on if
the connection was successful or not"""
# Wait for the lock to be available (ie, the websocket is not being used (yet))
self.ws_openlock.acquire()
self.ws_openlock.release()
if self.status == "connected":
return True # Already connected
if self.status == "disconnecting":
# If currently disconnecting, wait a moment, and retry connect
time.sleep(0.1)
return self.connect()
if self.status == "disconnected" or self.status == "reconnecting":
self.ws = websocket.WebSocketApp(self.ws_url,
header=self.headers,
on_message=self.__on_message,
on_ping=self.__on_ping,
on_open=self.__on_open,
on_close=self.__on_close,
on_error=self.__on_error)
self.ws_thread = threading.Thread(target=self.ws.run_forever)
self.ws_thread.daemon = True
self.status = "connecting"
self.ws_openlock.acquire()
self.ws_thread.start()
self.ws_openlock.acquire()
self.ws_openlock.release()
return self.status == "connected"
def disconnect(self):
if self.status == "connected":
self.status = "disconnecting"
with self.subscription_lock:
self.subscriptions = {}
self.ws.close()
self.__on_close(self.ws)
def __reconnect(self):
"""This is called when a connection is lost - it attempts to reconnect to the server"""
self.status = "reconnecting"
# Reset the disconnect time after 15 minutes
if self.disconnected_time - self.connected_time > 15 * 60:
self.reconnect_time = self.reconnect_time_starting_seconds
else:
self.reconnect_time *= self.reconnect_time_backoff_multiplier
if self.reconnect_time > self.reconnect_time_max_seconds:
self.reconnect_time = self.reconnect_time_max_seconds
# We want to add some randomness to the reconnect rate - necessary so that we don't pound the server
# if it goes down
self.reconnect_time *= 1 + random.uniform(-0.2, 0.2)
if self.reconnect_time < self.reconnect_time_starting_seconds:
self.reconnect_time = self.reconnect_time_starting_seconds
logging.warn("ConnectorDB:WS: Attempting to reconnect in %fs",
self.reconnect_time)
self.reconnector = threading.Timer(self.reconnect_time,
self.__reconnect_fnc)
self.reconnector.daemon = True
self.reconnector.start()
def __reconnect_fnc(self):
"""This function is called by reconnect after the time delay"""
if self.connect():
self.__resubscribe()
else:
self.__reconnect()
def __resubscribe(self):
"""Send subscribe command for all existing subscriptions. This allows to resume a connection
that was closed"""
with self.subscription_lock:
for sub in self.subscriptions:
logging.debug("Resubscribing to %s", sub)
stream_transform = sub.split(":", 1)
self.send({
"cmd": "subscribe",
"arg": stream_transform[0],
"transform": stream_transform[1]
})
def __on_open(self, ws):
"""Called when the websocket is opened"""
logging.debug("ConnectorDB: Websocket opened")
# Connection success - decrease the wait time for next connection
self.reconnect_time /= self.reconnect_time_backoff_multiplier
self.status = "connected"
self.lastpingtime = time.time()
self.__ensure_ping()
self.connected_time = time.time()
# Release the lock that connect called
self.ws_openlock.release()
def __on_close(self, ws):
"""Called when the websocket is closed"""
if self.status == "disconnected":
return # This can be double-called on disconnect
logging.debug("ConnectorDB:WS: Websocket closed")
# Turn off the ping timer
if self.pingtimer is not None:
self.pingtimer.cancel()
self.disconnected_time = time.time()
if self.status == "disconnecting":
self.status = "disconnected"
elif self.status == "connected":
self.__reconnect()
def __on_error(self, ws, err):
"""Called when there is an error in the websocket"""
logging.debug("ConnectorDB:WS: Connection Error")
if self.status == "connecting":
self.status = "errored"
self.ws_openlock.release() # Release the lock of connecting
def __on_message(self, ws, msg):
"""This function is called whenever there is a message received from the server"""
msg = json.loads(msg)
logging.debug("ConnectorDB:WS: Msg '%s'", msg["stream"])
# Build the subcription key
stream_key = msg["stream"] + ":"
if "transform" in msg:
stream_key += msg["transform"]
self.subscription_lock.acquire()
if stream_key in self.subscriptions:
subscription_function = self.subscriptions[stream_key]
self.subscription_lock.release()
fresult = subscription_function(msg["stream"], msg["data"])
if fresult is True:
# This is a special result - if the subscription function of a downlink returns True,
# then the datapoint is acknowledged automatically (ie, reinserted in non-downlink stream)
fresult = msg["data"]
if fresult is not False and fresult is not None and msg["stream"].endswith(
"/downlink") and msg["stream"].count("/") == 3:
# If the above conditions are true, it means that the datapoints were from a downlink,
# and the subscriber function chooses to acknowledge them, so we reinsert them.
self.insert(msg["stream"][:-9], fresult)
else:
self.subscription_lock.release()
logging.warn(
"ConnectorDB:WS: Msg '%s' not subscribed! Subscriptions: %s",
msg["stream"], list(self.subscriptions.keys()))
def __on_ping(self, ws, data):
"""The server periodically sends us websocket ping messages to keep the connection alive. To
ensure that the connection to the server is still active, we memorize the most recent ping's time
and we periodically ensure that a ping was received in __ensure_ping"""
logging.debug("ConnectorDB:WS: ping")
self.lastpingtime = time.time()
def __ensure_ping(self):
"""Each time the server sends a ping message, we record the timestamp. If we haven't received a ping
within the given interval, then we assume that the connection was lost, close the websocket and
attempt to reconnect"""
logging.debug("ConnectorDB:WS: pingcheck")
if (time.time() - self.lastpingtime > self.connection_ping_timeout):
logging.warn("ConnectorDB:WS: Websocket ping timed out!")
if self.ws is not None:
self.ws.close()
self.__on_close(self.ws)
else:
# reset the ping timer
self.pingtimer = threading.Timer(self.connection_ping_timeout,
self.__ensure_ping)
self.pingtimer.daemon = True
self.pingtimer.start()
def __del__(self):
"""Make sure that all threads shut down when needed"""
self.disconnect()
|
test_subprocess.py
|
import unittest
from test import script_helper
from test import support
import subprocess
import sys
import signal
import io
import locale
import os
import errno
import tempfile
import time
import re
import selectors
import sysconfig
import warnings
import select
import shutil
import gc
import textwrap
try:
import threading
except ImportError:
threading = None
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
try:
mkstemp = tempfile.mkstemp
except AttributeError:
# tempfile.mkstemp is not available
def mkstemp():
"""Replacement for mkstemp, calling mktemp."""
fname = tempfile.mktemp()
return os.open(fname, os.O_RDWR|os.O_CREAT), fname
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(FileNotFoundError, self._assert_python, pre_args,
executable="doesnotexist")
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
original_cwd = os.getcwd()
os.chdir(cwd)
cwd = os.getcwd()
os.chdir(original_cwd)
return cwd
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with script_helper.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.addCleanup(p.stderr.close)
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.addCleanup(p.stdout.close)
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') is not None,
'the python library cannot be loaded '
'with an empty environment')
def test_empty_env(self):
with subprocess.Popen([sys.executable, "-c",
'import os; '
'print(list(os.environ.keys()))'],
stdout=subprocess.PIPE,
env={}) as p:
stdout, stderr = p.communicate()
self.assertIn(stdout.strip(),
(b"[]",
# Mac OS X adds __CF_USER_TEXT_ENCODING variable to an empty
# environment
b"['__CF_USER_TEXT_ENCODING']"))
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_ouput(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
import _bootlocale
for encoding in ['utf-16', 'utf-32-be']:
old_getpreferredencoding = _bootlocale.getpreferredencoding
# Indirectly via io.TextIOWrapper, Popen() defaults to
# locale.getpreferredencoding(False) and earlier in Python 3.2 to
# locale.getpreferredencoding().
def getpreferredencoding(do_setlocale=True):
return encoding
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
try:
_bootlocale.getpreferredencoding = getpreferredencoding
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = popen.communicate(input='')
finally:
_bootlocale.getpreferredencoding = old_getpreferredencoding
self.assertEqual(stdout, '1\n2\n3\n4')
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(OSError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
@unittest.skipIf(threading is None, "threading required")
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = mkstemp()
ofhandle, ofname = mkstemp()
efhandle, efname = mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistant directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_restore_signals(self):
# Code coverage for both values of restore_signals to make sure it
# at least does not blow up.
# A test for behavior would be complex. Contributions welcome.
subprocess.call([sys.executable, "-c", ""], restore_signals=True)
subprocess.call([sys.executable, "-c", ""], restore_signals=False)
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
def test_args_string(self):
# args is a string
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
if sys.platform.startswith("aix"):
# On AIX, the C locale uses the Latin1 encoding
decoded_value = encoded_value.decode("latin1", "surrogateescape")
else:
# On other UNIXes, the C locale uses the ASCII encoding
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open("/dev/null", os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=())
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & fds_to_keep & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open("/dev/null", os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class CommandTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
class HelperFunctionTests(unittest.TestCase):
@unittest.skipIf(mswindows, "errno and EINTR make no sense on windows")
def test_eintr_retry_call(self):
record_calls = []
def fake_os_func(*args):
record_calls.append(args)
if len(record_calls) == 2:
raise OSError(errno.EINTR, "fake interrupted system call")
return tuple(reversed(args))
self.assertEqual((999, 256),
subprocess._eintr_retry_call(fake_os_func, 256, 999))
self.assertEqual([(256, 999)], record_calls)
# This time there will be an EINTR so it will loop once.
self.assertEqual((666,),
subprocess._eintr_retry_call(fake_os_func, 666))
self.assertEqual([(256, 999), (666,), (666,)], record_calls)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
self.addCleanup(p.stdout.close)
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(FileNotFoundError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_main():
unit_tests = (ProcessTestCase,
POSIXProcessTestCase,
Win32ProcessTestCase,
CommandTests,
ProcessTestCaseNoPoll,
HelperFunctionTests,
CommandsWithSpaces,
ContextManagerTests,
)
support.run_unittest(*unit_tests)
support.reap_children()
if __name__ == "__main__":
unittest.main()
|
VytServerApp.py
|
import socket
import threading
# Python 会自动对齐到四字节,因此如果要发送单个字节的话,需要在格式字符串前加上=取消对齐
import struct
from vyterm.cryptography import Base64
from vyterm.vythug.Handlers import handle_packet, handle_logout
_cipher = Base64()
class ClientPeer:
@staticmethod
def SendProc(*args, **kwargs):
peer = kwargs['peer']
while True:
try:
peer.sendlock.acquire()
packets = peer.packets
peer.packets = []
peer.sendlock.release()
for packet in packets:
peer.socket.send(packet)
except WindowsError:
print("SendProc catch winerror")
return
except Exception as e:
print("SendProc catch except exception:", e)
return
def __init__(self, socket):
self.socket = socket
self.packets = []
self.sendlock = threading.Lock()
# self.cipher = Base64()
sendThread = threading.Thread(name="Send Thread", target=ClientPeer.SendProc, kwargs={'peer':self})
sendThread.setDaemon(True)
sendThread.start()
def send(self, opcmd: int, subcmd: int, packet = bytes()):
packet = struct.pack('ii', opcmd, subcmd) + packet
packet = _cipher.Encrypt(packet)
packet = struct.pack('i', len(packet)) + packet
self.sendlock.acquire()
self.packets.append(packet)
self.sendlock.release()
def recv(self, size):
return self.socket.recv(size)
def RecvProc(*args, **kwargs):
client = ClientPeer(kwargs['socket'])
ip, port = kwargs['addr']
while True:
try:
packetsize = client.recv(4)
packetsize, = struct.unpack('i', packetsize)
packet = client.recv(packetsize)
packet = _cipher.Decrypt(packet)
opCommand,subCommand, = struct.unpack('ii', packet[:8])
packet = packet[8:]
print("收到来自客户端的数据包,大小为" + str(packetsize).ljust(5, ' ')
+ " 主操作码为:" + str(opCommand).ljust(2, ' ')
+ " 副操作码为:" + str(subCommand).ljust(2, ' '))
handle_packet(client, opCommand, subCommand, packet)
except ConnectionResetError:
# 客户端断开连接后会出现异常
print("用户 " + ip + " 已断开连接")
handle_logout(client)
return
def AcceptProc(*args, **kwargs):
server = kwargs['socket']
while True:
client, addr = server.accept()
ip, port = addr
print("用户IP:" + ip + " " + "端口号:" + str(port))
# 建立接收客户端消息线程
recvThread = threading.Thread(name='Recv Thread', target=RecvProc, kwargs={'socket': client, 'addr': addr})
recvThread.setDaemon(True)
# 开启线程
recvThread.start()
if __name__ == '__main__':
# 创建套接字
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
# 绑定IP及端口
server.bind(("localhost", 38564))
# 开始监听连接
server.listen(socket.SOMAXCONN)
# 创建主线程
acceptThread = threading.Thread(name='Accept Thread', target=AcceptProc, kwargs={'socket':server})
acceptThread.setDaemon(True)
# 启动主线程
acceptThread.start()
while "Quit" != input(">>> "):
continue
print("Exit Done")
|
server.py
|
#!/usr/bin/env python3
# Copyright 2021 Nicolas Surbayrole
# Copyright 2021 Quarkslab
# Copyright 2021 Association STIC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from enum import IntEnum, unique
from hashlib import sha256
import json
import os
import multiprocessing as mp
from solver import Tracer
import socket
import struct
import tempfile
import time
import traceback
import urllib.request
import subprocess
import tempfile
from hexdump import hexdump
@unique
class ReqType(IntEnum):
CHECK = 0
GETKEY = 1
REQ_EXEC_CODE = 2
REQ_EXEC_FILE = 3
@unique
class RespType(IntEnum):
ACK = 0
CHECK_OK = 1
CHECK_EXPIRED = 2
GETKEY_OK = 3
GETKEY_EXPIRED = 4
GETKEY_INVALID_PERMS = 5
GETKEY_UNKNOW = 6
GETKEY_DEBUG_DEVICE = 7
EXEC_CODE_OK = 8
EXEC_CODE_ERROR = 9
EXEC_FILE_KEY_OK = 10
EXEC_FILE_BAD_KEY = 11
EXEC_FILE_OK = 12
EXEC_FILE_ERROR = 13
REQUEST_ERROR = 0xfe
UNEXPECTED_ERROR = 0xff
def connect_key_serv(ctx):
if ctx['rsock'] is not None:
ctx['rsock'].close()
rsock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
rsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
rsock.connect(ctx['rsock_addr'])
print("connected to {}:{}".format(*ctx['rsock_addr']))
header = rsock.recv(4, socket.MSG_WAITALL)
if header != b'STIC':
print("Fail recevied header: exit")
os.exit(1)
ctx['rsock'] = rsock
ctx['rsock_timeout'] = int(time.time()) + 3500
print("connection ready")
def get_ident_getter(ident):
with open("get_key.c", "r") as f:
c = f.read()
c = c.replace("uint64_t ident = 0x0011223344deed;", "uint64_t ident = 0x{:x};".format(ident))
with tempfile.TemporaryDirectory() as builddir:
with open(builddir + "/input.c", "w") as f:
f.write(c)
subprocess.run(["gcc", "-static", "-o", builddir + "/output", builddir + "/input.c"], check=True)
with open(builddir + "/output", 'rb') as f:
return f.read()
def get_key_with_exec(ident, ctx):
PASS_RESOLV = b"expand 32-byte kb\xcc'=\xe8\x90U\x81\xc4\xfa\xc9\x1c\xbeE\x104\x1a\t\x16\xca\xfa\x05\x14\xf6\x80\xe4`J\xa8\x97\xba\xd4\xadb\xa0-\xcd\x9b5t\x87\xf6z\xb4q4\xb6\x97\x0e\x03\x05\n\x08\x04\t\x0b\x00\x0c\r\x07\x0f\x02\x06\x01"
binary = get_ident_getter(ident)
header = gen_zero_headers(ctx)
ctx['rsock'].send(bytes([ReqType.REQ_EXEC_FILE.value]) + header + PASS_RESOLV)
r = ctx['rsock'].recv(1, socket.MSG_WAITALL)
try:
resType = RespType(int(r[0]))
except ValueError:
print("reqexec_file RESPONSE_ERROR unknown reqType", r)
return None
print("recv {}".format(resType))
if resType != RespType.EXEC_FILE_KEY_OK:
return None
print("send binary len = {}".format(len(binary)))
ctx['rsock'].send(struct.pack('<Q', len(binary)) + binary)
r = ctx['rsock'].recv(1, socket.MSG_WAITALL)
try:
resType = RespType(int(r[0]))
except ValueError:
print("reqexec_file RESPONSE_ERROR unknown reqType", r)
return None
print("recv {}".format(resType))
if resType != RespType.EXEC_FILE_OK:
return None
err = b""
err_end = False
while not err_end:
err += ctx['rsock'].recv(1024)
err_end = err.endswith(b"---EXEC OUTPUT END---\n")
l = err.decode('ascii').split('\n')[1]
print("get {}".format(l))
if l.startswith('key: '):
r = bytes.fromhex(l[5:])
if r != bytes([0] * 16):
return r
print("Error null key")
return None
def getResp(ctx):
res = ctx['rsock'].recv(1, socket.MSG_WAITALL)
try:
respType = RespType(int(res[0]))
except ValueError:
print("reqCheck unknown respType : {}".format(int(respType)))
sock.send(bytes([RespType.REQUEST_ERROR.value]))
return None, res
if respType in [RespType.CHECK_OK, RespType.CHECK_EXPIRED, RespType.GETKEY_OK]:
res += ctx['rsock'].recv(16, socket.MSG_WAITALL)
return respType, res
def oracle(m, ctx):
ctx['rsock'].send(bytes([ReqType.CHECK.value]) + m)
respType, res = getResp(ctx)
assert respType in [RespType.CHECK_OK, RespType.CHECK_EXPIRED]
return res[1:]
def create_solver(ctx):
response = urllib.request.urlopen(ctx['wb_address'])
data = response.read()
with tempfile.NamedTemporaryFile(suffix='.so', prefix='lib') as f:
f.write(data)
solver = Tracer(f.name)
solver.generate_mask(lambda m: oracle(m + solver.wb_ident, ctx))
return solver
def gen_zero_headers(ctx):
ts = int(time.time()) - 1800
if ctx['solver'] == None or struct.unpack('<I', ctx['solver'].wb_ident)[0] < ts:
if ctx['solver'] != None:
del ctx['solver']
ctx['solver'] = create_solver(ctx)
return ctx['solver'].encryptAny(bytes([0] * 16)) + ctx['solver'].wb_ident
def reqCheck(sock, address, ctx):
payload = sock.recv(20, socket.MSG_WAITALL)
if len(payload) != 20:
print("reqCheck REQUEST_ERROR")
sock.send(bytes([RespType.REQUEST_ERROR.value]))
return
print("CHECK request payload:{} ident:{}".format(payload[:16].hex(), payload[16:].hex()))
m = bytes([ReqType.CHECK.value]) + payload
ctx['rsock'].send(m)
respType, res = getResp(ctx)
sock.send(res)
print("{}: {}".format(respType, res[1:].hex()))
return
def reqGetKey(sock, address, ctx):
payload = sock.recv(20, socket.MSG_WAITALL)
if len(payload) != 20:
print("reqGetKey REQUEST_ERROR")
sock.send(bytes([RespType.REQUEST_ERROR.value]))
return
print("GETKEY request payload:{} ident:{}".format(payload[:16].hex(), payload[16:].hex()))
if ctx['solver'] == None or ctx['solver'].wb_ident != payload[16:]:
if ctx['solver'] != None:
del ctx['solver']
ctx['solver'] = create_solver(ctx)
if ctx['solver'].wb_ident != payload[16:]:
print("Return GETKEY_EXPIRED to reload whitebox")
sock.send(bytes([RespType.GETKEY_EXPIRED.value]))
return
m = bytes([ReqType.GETKEY.value]) + payload
ctx['rsock'].send(m)
respType, res = getResp(ctx)
if respType == RespType.GETKEY_INVALID_PERMS:
print("{}: {}".format(respType, res[1:].hex()))
m = oracle(payload, ctx)
print("Use solver on {}".format(m.hex()))
clear = m[:8] + bytes([0] * 8)
cipher = ctx['solver'].encryptAny(clear)
print("get {} for {}".format(cipher.hex(), clear.hex()))
to_send = bytes([ReqType.GETKEY.value]) + cipher + payload[16:]
ctx['rsock'].send(to_send)
respType, res = getResp(ctx)
# need perm 0
if respType == RespType.GETKEY_INVALID_PERMS:
ident = get_key_with_exec(struct.unpack('<Q', m[:8])[0], ctx)
if ident != None:
respType = RespType.GETKEY_OK
res = bytes([RespType.GETKEY_OK.value]) + ident
sock.send(res)
print("{}: {}".format(respType, res[1:].hex()))
return
def reqExecCode(sock, address, ctx):
# dummy header, create a valid header
sock.recv(0x14, socket.MSG_WAITALL)
header = gen_zero_headers(ctx)
code_size = sock.recv(8, socket.MSG_WAITALL)
code = sock.recv(struct.unpack('<Q', code_size)[0], socket.MSG_WAITALL)
input_size = sock.recv(8, socket.MSG_WAITALL)
input_buff = sock.recv(struct.unpack('<Q', input_size)[0], socket.MSG_WAITALL)
output_size = sock.recv(8, socket.MSG_WAITALL)
output_size_ = struct.unpack('<Q', output_size)[0]
print("send code")
print(hexdump(code, 'return'))
ctx['rsock'].send(bytes([ReqType.REQ_EXEC_CODE.value]) + header + code_size + code + input_size + input_buff + output_size)
c = ctx['rsock'].recv(1, socket.MSG_WAITALL)
sock.send(c)
if int(c[0]) != RespType.EXEC_CODE_OK.value:
err = RespType(c[0])
print("reqexec_code received {}".format(err))
return
output = b""
output_end = False
while not output_end:
output += ctx['rsock'].recv(1024)
output_end = output.endswith(b"---DEBUG LOG END---\n")
print("reqexec_code confirm")
print("output:")
print(hexdump(output[:output_size_], 'return'))
print("stderr:")
print(output[output_size_:].decode('ascii'))
sock.send(output)
return
def reqExecFile(sock, address, ctx):
sock.recv(0x14, socket.MSG_WAITALL)
header = gen_zero_headers(ctx)
input_buff = sock.recv(0x50, socket.MSG_WAITALL)
print("begin send exec_file")
print(hexdump(input_buff, 'return'))
ctx['rsock'].send(bytes([ReqType.REQ_EXEC_FILE.value]) + header + input_buff)
r = ctx['rsock'].recv(1, socket.MSG_WAITALL)
try:
resType = RespType(int(r[0]))
except ValueError:
print("reqexec_file RESPONSE_ERROR unknown reqType", r)
sock.send(bytes([RespType.REQUEST_ERROR.value]))
return
print("recv {}".format(resType))
sock.send(r)
if resType != RespType.EXEC_FILE_KEY_OK:
return
f_size = sock.recv(0x8, socket.MSG_WAITALL)
f_buff = sock.recv(struct.unpack('<Q', f_size)[0], socket.MSG_WAITALL)
print("send binary len = {}".format(struct.unpack('<Q', f_size)[0]))
ctx['rsock'].send(f_size + f_buff)
r = ctx['rsock'].recv(1, socket.MSG_WAITALL)
try:
resType = RespType(int(r[0]))
except ValueError:
print("reqexec_file RESPONSE_ERROR unknown reqType", r)
sock.send(bytes([RespType.REQUEST_ERROR.value]))
return
print("recv {}".format(resType))
sock.send(r)
if resType != RespType.EXEC_FILE_OK:
return
err = b""
err_end = False
while not err_end:
err += ctx['rsock'].recv(1024)
err_end = err.endswith(b"---EXEC OUTPUT END---\n")
print("msg :")
print(err)
sock.send(err)
return
def process_main(sock, address, ctx):
try:
print("Begin connexion {}".format(address))
sock.send(b'STIC');
while True:
m = sock.recv(1, socket.MSG_WAITALL)
if len(m) < 1:
sock.send(bytes([RespType.REQUEST_ERROR.value]))
sock.close()
return
req = int(m[0])
try:
reqType = ReqType(req)
except ValueError:
print("process_main REQUEST_ERROR unknown reqType")
sock.send(bytes([RespType.REQUEST_ERROR.value]))
continue
with ctx['lock']:
if ctx['rsock_timeout'] < int(time.time()):
print("recreate connexion")
connect_key_serv(ctx)
if reqType == ReqType.CHECK:
reqCheck(sock, address, ctx)
elif reqType == ReqType.GETKEY:
reqGetKey(sock, address, ctx)
elif reqType == ReqType.REQ_EXEC_CODE:
reqExecCode(sock, address, ctx)
elif reqType == ReqType.REQ_EXEC_FILE:
reqExecFile(sock, address, ctx)
else:
print("process_main REQUEST_ERROR no handler for reqType {}".format(reqType))
sock.send(bytes([RespType.REQUEST_ERROR.value]))
finally:
print("End connexion {}".format(address))
sock.close()
def worker(sock, ctx):
cont = True
while cont:
client = None
try:
client, address = sock.accept()
process_main(client, address, ctx)
except KeyboardInterrupt:
cont = False
except Exception as e:
traceback.print_exc()
if client != None:
client.close()
def main():
import argparse
parser = argparse.ArgumentParser()
class hexArg:
def __call__(self, raw):
try:
b = bytes.fromhex(raw)
except ValueError:
raise argparse.ArgumentTypeError('Not an hexa value')
return b
#parser.add_argument("-W", "--workers", type=int, help="worker", default=1)
parser.add_argument("-l", "--listen-port", type=int, help="listening port", default=65430)
parser.add_argument("-a", "--remote-address", type=str, default="127.0.0.1")
parser.add_argument("-p", "--remote-port", type=int, default=1337)
parser.add_argument("-w", "--wb-address", type=str, default='http://127.0.0.1:8080/api/guest.so')
args = parser.parse_args()
context = {
"lock": mp.Lock(),
"rsock": None,
"rsock_addr": (args.remote_address, args.remote_port),
"rsock_timeout": 0,
"wb_address": args.wb_address,
"solver": None,
}
connect_key_serv(context)
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("0.0.0.0", args.listen_port))
#sock.listen(8 * args.workers)
sock.listen(8)
print("connection ready, listen on {}".format(args.listen_port))
worker(sock, context)
#workers = [mp.Process(target=worker, args=(sock, context), daemon=True) for i in range(args.workers)]
#for w in workers:
# w.start()
#while True:
# for i in range(len(workers)):
# workers[i].join(0.001)
# if workers[i].exitcode != None:
# workers[i] = mp.Process(target=worker, args=(sock, context), daemon=True)
# workers[i].start()
# time.sleep(1)
if __name__ == '__main__':
main()
|
__init__.py
|
#!/usr/bin/env false
# -*- coding: utf-8 -*-
import sys
import socket
import base64
import time
import re
import traceback
import os
from threading import Lock, Thread
# Netcat module taken from here: https://gist.github.com/leonjza/f35a7252babdf77c8421
# and slightly modified
class Netcat:
""" Python 'netcat like' module """
def __init__(self, ip, port):
self.buff = b''
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((ip, port))
def read(self, length=1024):
""" Read 1024 bytes off the socket """
tmp = self.socket.recv(length)
while len(tmp) < length:
received = self.socket.recv(length - len(tmp))
if len(received) == 0:
self.socket.close()
raise IOError('Connection closed')
tmp += received
return tmp
def read_until(self, data):
""" Read data into the buffer until we have data """
while not data in self.buff:
received = self.socket.recv(1024)
if len(received) == 0:
self.socket.close()
raise IOError('Connection closed')
self.buff += received
pos = self.buff.find(data)
rval = self.buff[:pos + len(data)]
self.buff = self.buff[pos + len(data):]
return rval
def write(self, data):
self.socket.send(data)
def close(self):
self.socket.close()
class Class:
def __init__(self, nc, name):
self.name = name
self.nc = nc
def instantiate(self):
return Object(self, self.nc, self.nc.instantiate_class(self.name))
def get_method(self, method):
return self.nc.invoke('core.class.getMethod', [self.name, method], 'ss', 'sss')
class Object:
def __init__(self, cls, nc, handle):
self.cls = cls
self.handle = handle
self.nc = nc
def get_method(self, method):
return self.cls.get_method(method)
def call_method(self, method, ls):
command, arg_types, ret_types = self.get_method(method)
return self.nc.invoke(command, [self.handle] + ls, arg_types, ret_types)
class Modcat(Netcat):
def __init__(self, ip, port, logger):
Netcat.__init__(self, ip, port)
self.logger = logger
self.func_provider_names = {}
self.func_providers = {}
def recv_header(self):
self.logger.vlog('Reading host header')
header = self.read(8).decode()
if header != 'ModBox/M':
raise Exception('Invalid server header')
def send_header(self):
self.logger.vlog('Sending module header')
self.write(b'ModBox/m')
def recv_reverse_header(self):
self.logger.vlog('Reading reverse host header')
header = self.read(8).decode()
if header != 'ModBox/R':
raise Exception('Invalid server header')
def send_reverse_header(self):
self.logger.vlog('Sending reverse module header')
self.write(b'ModBox/r')
def read_str(self):
self.logger.vvlog('{}: Reading string...'.format(id(self)))
s = self.read_until(b'\x00')[:-1].decode()
self.logger.vvlog('{}: Reading string: "{}"'.format(id(self), s))
return s
def write_str(self, s):
if s is None:
raise Exception('Attempted to send a None value')
self.logger.vvlog('{}: Writing string: "{}"'.format(id(self), s))
s = str(s)
s += '\x00'
self.write(bytes(s, 'ascii'))
def unblobify(self, blob):
self.logger.vvlog('unblobify: {}'.format(repr(blob)))
split_blob = blob.split(b'\x00')
# Костыль № 9124721
split_blob = split_blob[:-1]
raw_members = dict([split_blob[i:i+2] for i in range(0, len(split_blob), 2)])
members = {
key.decode()[:-1]: (
key.decode()[-1], self.type_decode(value, key.decode()[-1])
) for key, value in raw_members.items()
}
return members
def send_arg(self, arg, tp):
self.logger.vvlog('send_arg: self = {}, arg = {}, tp = {}'.format(id(self), arg, tp))
if tp in 'iufs':
self.write_str(arg)
elif tp == 'b':
self.write_str(base64.b64encode(arg.encode() if type(arg) is str else arg).decode())
else:
raise Exception('Unknown type: "{}"'.format(tp))
def recv_arg(self, tp):
if tp in 'iu':
return int(self.read_str())
elif tp == 'f':
return float(self.read_str())
elif tp == 's':
return self.read_str()
elif tp == 'b':
return base64.b64decode(self.read_str()).decode()
else:
raise Exception('Unknown type: "{}"'.format(tp))
def blobify(self, values):
blob = b''
for name, (type, value) in values.items():
blob += bytes(name, 'utf-8') + bytes(type, 'utf-8') + b'\x00'
blob += self.type_encode(value, type) + b'\x00'
self.logger.vvlog('blobify: {}'.format(repr(blob)))
return blob
def invoke(self, func, ls, args, ret):
self.logger.vlog('Invoking {}({})...'.format(func, ', '.join(map(str, ls))))
self.write_str(func)
for arg, tp in zip(ls, args):
self.send_arg(arg, tp)
exit_code = int(self.read_str())
if exit_code == 0:
ret_ls = [self.recv_arg(tp) for tp in ret]
self.logger.vlog('... = {}'.format(ret_ls))
return ret_ls
else:
error = self.read_str()
self.logger.vlog('... error: {}'.format(error))
raise Exception(error)
def register_func_provider(self, storage, func, name, args, ret):
self.logger.vlog('Registering FuncProvider: "{}" ({}) -> {}'.format(name, args, ret))
self.invoke('core.funcProvider.register', [name, args, ret], 'sss', '')
storage.func_providers[name] = func, args, ret
def serve_func(self):
try:
self.logger.vlog('Serving')
while True:
# Wait for a request
name = self.read_str()
if name == '_exit':
# exit
return
try:
# Parse the request
func, arg_types, ret_types = self.func_providers[name]
# Receive function arguments
args = [self.recv_arg(type) for type in arg_types]
# Call the function
ret = func(*args)
except BaseException as e:
# Something has gone wrong, exit code is not 0
self.logger.log('Exception at module function:')
traceback.print_exc()
self.write_str(1)
if self.logger.exit_on_callback_errors:
self.logger.log('Bye-bye')
self.logger.nc.close()
self.logger.rnc.close()
os._exit(1)
continue
# Exit code is 0
self.write_str(0)
for type, val in zip(ret_types, ret):
self.send_arg(val, type)
except BaseException as e:
self.logger.log('Exception occured at serve_func: ' + str(e))
os._exit(1)
def spawn_serving_thread(self):
self.serving_thread = Thread(target=self.serve_func)
self.serving_thread.start()
def join_serving_thread(self):
self.serving_thread.join()
def get_class(self, classname):
return Class(self, classname)
def add_class(self, classname, members, methods, parent=''):
self.logger.vlog('Adding class "{}"'.format(classname))
members_string = ':'.join(map(lambda pair: ''.join(pair), members))
methods_string = ':'.join(map(lambda tup: ','.join(tup), methods))
self.invoke('core.class.add', [parent, classname, members_string, methods_string], 'ssss', '')
return Class(self, classname)
def instantiate_class(self, classname):
self.logger.vlog('Instantiating class {}'.format(classname))
return self.invoke('core.class.instantiate', [classname], 's', 'u')[0]
class Module:
def __init__(self, module_name):
self.module_name = module_name
self.VERBOSE = True
self.VERY_VERBOSE = False
self.exit_on_callback_errors = False
self.call_lock = Lock()
self.main_port, self.reverse_port = self.portinfo(sys.argv)
self.nc = Modcat('localhost', self.main_port, logger=self)
self.rnc = Modcat('localhost', self.reverse_port, logger=self)
self.nc.recv_header()
self.nc.send_header()
self.nc.write_str(module_name)
self.rnc.recv_reverse_header()
self.rnc.send_reverse_header()
self.rnc.write_str(module_name)
self.rnc.spawn_serving_thread()
def register_func_provider(self, func, command, args, ret):
self.nc.register_func_provider(self.rnc, func.__get__(self), command, args, ret)
def invoke(self, *args, **kwargs):
with self.call_lock:
return self.nc.invoke(*args, **kwargs)
def _get_log_time(self):
return time.strftime('%02d.%02m.%Y %02H:%02M:%02S')
def log(self, text):
print("[MODLOG ({}) {}] ".format(self.module_name, self._get_log_time()) + str(text))
sys.stdout.flush()
def vlog(self, text):
if self.VERBOSE:
self.log(text)
def vvlog(self, text):
if self.VERY_VERBOSE:
self.log(text)
def portinfo(self, argv):
main_port = None
reverse_port = None
self.vlog(argv)
for arg in argv:
if re.match(r'^--main-port=[0-9]*$', arg) is not None:
main_port = int(arg.split('=')[1])
if re.match(r'^--reverse-port=[0-9]*$', arg) is not None:
reverse_port = int(arg.split('=')[1])
if main_port is None:
raise ValueError('Main port information not provided')
if reverse_port is None:
raise ValueError('Reverse port information not provided')
return main_port, reverse_port
def ready(self):
self.invoke('module.ready', [], '', '')
|
graph-size-circum-trim.py
|
#! /usr/bin/env python
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt. Contact: ctb@msu.edu
#
import khmer
import sys
import screed
import os.path
import threading
import Queue
import gc
K = 32
HASHTABLE_SIZE = int(1e9)
THRESHOLD = 500
N_HT = 4
WORKER_THREADS = 5
###
RADIUS = 2
MAX_CIRCUM = 4 # 4 seems to eliminate lump in 1m.fa
MAX_VOLUME = 200
incr = 2 * RADIUS
###
GROUPSIZE = 100
###
class SequenceGroup(object):
def __init__(self, order, seqlist):
self.order = order
self.seqlist = seqlist
def is_pair(r1, r2):
a = r1['name'].split('/')[0]
b = r2['name'].split('/')[0]
return (a == b)
def trim_by_circumference(ht, name, seq):
# calculate circumference for every point.
end = len(seq) - K
is_high = False
pos = 0
for pos in range(0, end, incr):
circum = ht.count_kmers_on_radius(seq[pos:pos + K], RADIUS, MAX_VOLUME)
if circum >= MAX_CIRCUM:
is_high = True
break
# ok. sequence has high-radius k-mers; can we trim them off?
if is_high and pos > incr:
pos -= incr
# find last k-mer with a low radius:
i = 1
for i in range(1, incr):
circum = ht.count_kmers_on_radius(seq[pos + i:pos + i + K],
RADIUS, MAX_VOLUME)
if circum >= MAX_CIRCUM:
break
pos += i - 1
# now trim sequence:
seq = seq[:pos + K]
is_high = False
name += "\tTRUNC.%d" % pos
if is_high:
return None, None
else:
return name, seq
def process(inq, outq, ht):
global worker_count
while not done or not inq.empty():
try:
g = inq.get(True, 1)
except Queue.Empty:
continue
x = []
last_record = None
for record in g.seqlist:
kmer = record['sequence'][:K]
size = ht.calc_connected_graph_size(kmer, THRESHOLD)
if size >= THRESHOLD:
# keep pairs together if either is "good"
if last_record and is_pair(last_record, record):
x.append(last_record)
x.append(record)
record = None
last_record = record
y = []
for record in x:
name, seq = trim_by_circumference(ht, record['name'],
record['sequence'])
if name:
y.append((name, seq))
gg = SequenceGroup(g.order, y)
outq.put(gg)
worker_count -= 1
def write(outq, outfp):
global worker_count
groups = {}
next_group = 0
while worker_count > 0 or not outq.empty():
try:
g = outq.get(True, 1)
except Queue.Empty:
continue
groups[g.order] = g
while next_group in groups:
g = groups[next_group]
for name, seq in g.seqlist:
outfp.write('>%s\n%s\n' % (name, seq,))
del groups[next_group]
next_group += 1
gc.collect()
def main():
global done, worker_count
done = False
worker_count = 0
infile = sys.argv[1]
outfile = os.path.basename(infile) + '.graphcirc'
if len(sys.argv) == 3:
outfile = sys.argv[2]
print 'creating ht'
ht = khmer.new_hashbits(K, HASHTABLE_SIZE, N_HT)
print 'eating fa', infile
total_reads, n_consumed = ht.consume_fasta(infile)
outfp = open(outfile, 'w')
inqueue = Queue.Queue(50)
outqueue = Queue.Queue(50)
## worker and writer threads
for i in range(WORKER_THREADS):
t = threading.Thread(target=process, args=(inqueue, outqueue, ht))
worker_count += 1
t.start()
threading.Thread(target=write, args=(outqueue, outfp)).start()
### main thread
x = []
i = 0
group_n = 0
for n, record in enumerate(screed.fasta.fasta_iter(open(infile))):
if n % 10000 == 0:
print '...', n
i += 1
if i > GROUPSIZE:
this_name = record['name'].split('/')[0]
last_name = x[-1]['name'].split('/')[0]
if is_pair(record, x[-1]): # preserve pairs
x.append(record)
g = SequenceGroup(group_n, x)
inqueue.put(g)
x = []
else:
g = SequenceGroup(group_n, x)
inqueue.put(g)
x = [record]
group_n += 1
i = 0
else:
x.append(record)
# submit last set of sequences
g = SequenceGroup(group_n, x)
inqueue.put(g)
done = True
if __name__ == '__main__':
main()
|
miniterm.py
|
#!/usr/bin/env python
# Very simple serial terminal
# (C)2002-2011 Chris Liechti <cliechti@gmx.net>
# Input characters are sent directly (only LF -> CR/LF/CRLF translation is
# done), received characters are displayed as is (or escaped trough pythons
# repr, useful for debug purposes)
import sys, os, serial, threading, atexit
try:
from serial.tools.list_ports import comports
except ImportError:
comports = None
EXITCHARCTER = serial.to_bytes([0x1d]) # GS/CTRL+]
MENUCHARACTER = serial.to_bytes([0x14]) # Menu: CTRL+T
DEFAULT_PORT = None
DEFAULT_BAUDRATE = 9600
DEFAULT_RTS = None
DEFAULT_DTR = None
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+%c' % (ord('@') + ascii_code)
else:
return repr(character)
# help text, starts with blank line! it's a function so that the current values
# for the shortcut keys is used and not the value at program start
def get_help_text():
return """
--- pySerial (%(version)s) - miniterm - help
---
--- %(exit)-8s Exit program
--- %(menu)-8s Menu escape key, followed by:
--- Menu keys:
--- %(itself)-7s Send the menu character itself to remote
--- %(exchar)-7s Send the exit character itself to remote
--- %(info)-7s Show info
--- %(upload)-7s Upload file (prompt will be shown)
--- Toggles:
--- %(rts)-7s RTS %(echo)-7s local echo
--- %(dtr)-7s DTR %(break)-7s BREAK
--- %(lfm)-7s line feed %(repr)-7s Cycle repr mode
---
--- Port settings (%(menu)s followed by the following):
--- p change port
--- 7 8 set data bits
--- n e o s m change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""" % {
'version': getattr(serial, 'VERSION', 'unknown version'),
'exit': key_description(EXITCHARCTER),
'menu': key_description(MENUCHARACTER),
'rts': key_description('\x12'),
'repr': key_description('\x01'),
'dtr': key_description('\x04'),
'lfm': key_description('\x0c'),
'break': key_description('\x02'),
'echo': key_description('\x05'),
'info': key_description('\x09'),
'upload': key_description('\x15'),
'itself': key_description(MENUCHARACTER),
'exchar': key_description(EXITCHARCTER),
}
if sys.version_info >= (3, 0):
def character(b):
return b.decode('latin1')
else:
def character(b):
return b
LF = serial.to_bytes([10])
CR = serial.to_bytes([13])
CRLF = serial.to_bytes([13, 10])
X00 = serial.to_bytes([0])
X0E = serial.to_bytes([0x0e])
# first choose a platform dependant way to read single characters from the console
global console
if os.name == 'nt':
import msvcrt
class Console(object):
def __init__(self):
pass
def setup(self):
pass # Do nothing for 'nt'
def cleanup(self):
pass # Do nothing for 'nt'
def getkey(self):
while True:
z = msvcrt.getch()
if z == X00 or z == X0E: # functions keys, ignore
msvcrt.getch()
else:
if z == CR:
return LF
return z
console = Console()
elif os.name == 'posix':
import termios, sys, os
class Console(object):
def __init__(self):
self.fd = sys.stdin.fileno()
self.old = None
def setup(self):
self.old = termios.tcgetattr(self.fd)
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = os.read(self.fd, 1)
return c
def cleanup(self):
if self.old is not None:
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
console = Console()
def cleanup_console():
console.cleanup()
atexit.register(cleanup_console) # terminal modes have to be restored on exit...
else:
raise NotImplementedError("Sorry no implementation for your platform (%s) available." % sys.platform)
def dump_port_list():
if comports:
sys.stderr.write('\n--- Available ports:\n')
for port, desc, hwid in sorted(comports()):
#~ sys.stderr.write('--- %-20s %s [%s]\n' % (port, desc, hwid))
sys.stderr.write('--- %-20s %s\n' % (port, desc))
CONVERT_CRLF = 2
CONVERT_CR = 1
CONVERT_LF = 0
NEWLINE_CONVERISON_MAP = (LF, CR, CRLF)
LF_MODES = ('LF', 'CR', 'CR/LF')
REPR_MODES = ('raw', 'some control', 'all control', 'hex')
class Miniterm(object):
def __init__(self, port, baudrate, parity, rtscts, xonxoff, echo=False, convert_outgoing=CONVERT_CRLF, repr_mode=0):
try:
self.serial = serial.serial_for_url(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
self.serial = serial.Serial(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
self.echo = echo
self.repr_mode = repr_mode
self.convert_outgoing = convert_outgoing
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
self.dtr_state = True
self.rts_state = True
self.break_state = False
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.setDaemon(True)
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
self.receiver_thread.join()
def start(self):
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer)
self.transmitter_thread.setDaemon(True)
self.transmitter_thread.start()
def stop(self):
self.alive = False
def join(self, transmit_only=False):
self.transmitter_thread.join()
if not transmit_only:
self.receiver_thread.join()
def dump_port_settings(self):
sys.stderr.write("\n--- Settings: %s %s,%s,%s,%s\n" % (
self.serial.portstr,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits))
sys.stderr.write('--- RTS: %-8s DTR: %-8s BREAK: %-8s\n' % (
(self.rts_state and 'active' or 'inactive'),
(self.dtr_state and 'active' or 'inactive'),
(self.break_state and 'active' or 'inactive')))
try:
sys.stderr.write('--- CTS: %-8s DSR: %-8s RI: %-8s CD: %-8s\n' % (
(self.serial.getCTS() and 'active' or 'inactive'),
(self.serial.getDSR() and 'active' or 'inactive'),
(self.serial.getRI() and 'active' or 'inactive'),
(self.serial.getCD() and 'active' or 'inactive')))
except serial.SerialException:
# on RFC 2217 ports it can happen to no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: %s\n' % (self.serial.xonxoff and 'active' or 'inactive'))
sys.stderr.write('--- hardware flow control: %s\n' % (self.serial.rtscts and 'active' or 'inactive'))
sys.stderr.write('--- data escaping: %s linefeed: %s\n' % (
REPR_MODES[self.repr_mode],
LF_MODES[self.convert_outgoing]))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
data = character(self.serial.read(1))
if self.repr_mode == 0:
# direct output, just have to care about newline setting
if data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(data)
elif self.repr_mode == 1:
# escape non-printable, let pass newlines
if self.convert_outgoing == CONVERT_CRLF and data in '\r\n':
if data == '\n':
sys.stdout.write('\n')
elif data == '\r':
pass
elif data == '\n' and self.convert_outgoing == CONVERT_LF:
sys.stdout.write('\n')
elif data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 2:
# escape all non-printable, including newline
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 3:
# escape everything (hexdump)
for c in data:
sys.stdout.write("%s " % c.encode('hex'))
sys.stdout.flush()
except serial.SerialException as e:
self.alive = False
# would be nice if the console reader could be interruptted at this
# point...
raise
def writer(self):
"""\
Loop and copy console->serial until EXITCHARCTER character is
found. When MENUCHARACTER is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
b = console.getkey()
except KeyboardInterrupt:
b = serial.to_bytes([3])
c = character(b)
if menu_active:
if c == MENUCHARACTER or c == EXITCHARCTER: # Menu character again/exit char -> send itself
self.serial.write(b) # send character
if self.echo:
sys.stdout.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
console.cleanup()
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
file = open(filename, 'r')
sys.stderr.write('--- Sending file %s ---\n' % filename)
while True:
line = file.readline().rstrip('\r\n')
if not line:
break
self.serial.write(line)
self.serial.write('\r\n')
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File %s sent ---\n' % filename)
except IOError as e:
sys.stderr.write('--- ERROR opening file %s: %s ---\n' % (filename, e))
console.setup()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.rts_state = not self.rts_state
self.serial.setRTS(self.rts_state)
sys.stderr.write('--- RTS %s ---\n' % (self.rts_state and 'active' or 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.dtr_state = not self.dtr_state
self.serial.setDTR(self.dtr_state)
sys.stderr.write('--- DTR %s ---\n' % (self.dtr_state and 'active' or 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.break_state = not self.break_state
self.serial.setBreak(self.break_state)
sys.stderr.write('--- BREAK %s ---\n' % (self.break_state and 'active' or 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo %s ---\n' % (self.echo and 'active' or 'inactive'))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
elif c == '\x01': # CTRL+A -> cycle escape mode
self.repr_mode += 1
if self.repr_mode > 3:
self.repr_mode = 0
sys.stderr.write('--- escape data: %s ---\n' % (
REPR_MODES[self.repr_mode],
))
elif c == '\x0c': # CTRL+L -> cycle linefeed mode
self.convert_outgoing += 1
if self.convert_outgoing > 2:
self.convert_outgoing = 0
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
sys.stderr.write('--- line feed %s ---\n' % (
LF_MODES[self.convert_outgoing],
))
elif c in 'pP': # P -> change port
dump_port_list()
sys.stderr.write('--- Enter port name: ')
sys.stderr.flush()
console.cleanup()
try:
port = sys.stdin.readline().strip()
except KeyboardInterrupt:
port = None
console.setup()
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
new_serial = serial.Serial()
new_serial.port = port
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.open()
new_serial.setRTS(self.rts_state)
new_serial.setDTR(self.dtr_state)
new_serial.setBreak(self.break_state)
except Exception as e:
sys.stderr.write('--- ERROR opening new port: %s ---\n' % (e,))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: %s ---\n' % (self.serial.port,))
# and restart the reader thread
self._start_reader()
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
console.cleanup()
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: %s ---\n' % (e,))
self.serial.baudrate = backup
else:
self.dump_port_settings()
console.setup()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character %s --\n' % key_description(c))
menu_active = False
elif c == MENUCHARACTER: # next char will be for menu
menu_active = True
elif c == EXITCHARCTER:
self.stop()
break # exit app
elif c == '\n':
self.serial.write(self.newline) # send newline character(s)
if self.echo:
sys.stdout.write(c) # local echo is a real newline in any case
sys.stdout.flush()
else:
self.serial.write(b) # send byte
if self.echo:
sys.stdout.write(c)
sys.stdout.flush()
except:
self.alive = False
raise
def main():
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] [port [baudrate]]",
description = "Miniterm - A simple terminal program for the serial port."
)
group = optparse.OptionGroup(parser, "Port settings")
group.add_option("-p", "--port",
dest = "port",
help = "port, a number or a device name. (deprecated option, use parameter instead)",
default = DEFAULT_PORT
)
group.add_option("-b", "--baud",
dest = "baudrate",
action = "store",
type = 'int',
help = "set baud rate, default %default",
default = DEFAULT_BAUDRATE
)
group.add_option("--parity",
dest = "parity",
action = "store",
help = "set parity, one of [N, E, O, S, M], default=N",
default = 'N'
)
group.add_option("--rtscts",
dest = "rtscts",
action = "store_true",
help = "enable RTS/CTS flow control (default off)",
default = False
)
group.add_option("--xonxoff",
dest = "xonxoff",
action = "store_true",
help = "enable software flow control (default off)",
default = False
)
group.add_option("--rts",
dest = "rts_state",
action = "store",
type = 'int',
help = "set initial RTS line state (possible values: 0, 1)",
default = DEFAULT_RTS
)
group.add_option("--dtr",
dest = "dtr_state",
action = "store",
type = 'int',
help = "set initial DTR line state (possible values: 0, 1)",
default = DEFAULT_DTR
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Data handling")
group.add_option("-e", "--echo",
dest = "echo",
action = "store_true",
help = "enable local echo (default off)",
default = False
)
group.add_option("--cr",
dest = "cr",
action = "store_true",
help = "do not send CR+LF, send CR only",
default = False
)
group.add_option("--lf",
dest = "lf",
action = "store_true",
help = "do not send CR+LF, send LF only",
default = False
)
group.add_option("-D", "--debug",
dest = "repr_mode",
action = "count",
help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",
default = 0
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Hotkeys")
group.add_option("--exit-char",
dest = "exit_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to exit the application",
default = 0x1d
)
group.add_option("--menu-char",
dest = "menu_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to control miniterm (menu)",
default = 0x14
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Diagnostics")
group.add_option("-q", "--quiet",
dest = "quiet",
action = "store_true",
help = "suppress non-error messages",
default = False
)
parser.add_option_group(group)
(options, args) = parser.parse_args()
options.parity = options.parity.upper()
if options.parity not in 'NEOSM':
parser.error("invalid parity")
if options.cr and options.lf:
parser.error("only one of --cr or --lf can be specified")
if options.menu_char == options.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
global EXITCHARCTER, MENUCHARACTER
EXITCHARCTER = chr(options.exit_char)
MENUCHARACTER = chr(options.menu_char)
port = options.port
baudrate = options.baudrate
if args:
if options.port is not None:
parser.error("no arguments are allowed, options only when --port is given")
port = args.pop(0)
if args:
try:
baudrate = int(args[0])
except ValueError:
parser.error("baud rate must be a number, not %r" % args[0])
args.pop(0)
if args:
parser.error("too many arguments")
else:
# no port given on command line -> ask user now
if port is None:
dump_port_list()
port = input('Enter port name:')
convert_outgoing = CONVERT_CRLF
if options.cr:
convert_outgoing = CONVERT_CR
elif options.lf:
convert_outgoing = CONVERT_LF
try:
miniterm = Miniterm(
port,
baudrate,
options.parity,
rtscts=options.rtscts,
xonxoff=options.xonxoff,
echo=options.echo,
convert_outgoing=convert_outgoing,
repr_mode=options.repr_mode,
)
except serial.SerialException as e:
sys.stderr.write("could not open port %r: %s\n" % (port, e))
sys.exit(1)
if not options.quiet:
sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
miniterm.serial.portstr,
miniterm.serial.baudrate,
miniterm.serial.bytesize,
miniterm.serial.parity,
miniterm.serial.stopbits,
))
sys.stderr.write('--- Quit: %s | Menu: %s | Help: %s followed by %s ---\n' % (
key_description(EXITCHARCTER),
key_description(MENUCHARACTER),
key_description(MENUCHARACTER),
key_description('\x08'),
))
if options.dtr_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
miniterm.serial.setDTR(options.dtr_state)
miniterm.dtr_state = options.dtr_state
if options.rts_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
miniterm.serial.setRTS(options.rts_state)
miniterm.rts_state = options.rts_state
console.setup()
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not options.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
#~ console.cleanup()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
generator.py
|
# coding=utf-8
import os
from .batcher import Batcher
from queue import Queue
from threading import Thread
import threading
import numpy as np
import torch
from tqdm import tqdm
import random
script_abs_path = os.path.dirname(__file__)
ROOT_DIR = os.path.join(script_abs_path, '../../../')
DATA_DIR = os.path.join(ROOT_DIR, 'dataset')
RAW_DATA = os.path.join(DATA_DIR, 'raw_data', 'law')
import logging
logger = logging.getLogger("Generator")
logger.setLevel(logging.DEBUG)
PAD_TOKEN = '[PAD]'
UNK_TOKEN = '[UNK]'
SEP_TOKEN = '[SEP]'
CLS_TOKEN = '[CLS]'
class ToBertInput():
def __init__(self, args, tokenizer):
self.args = args
self.tokenizer = tokenizer
def __call__(self, batch_data):
text_input = []
mask_input = []
type_input = []
y = []
for qid, did, query, q_len, text, text_len, _, label in batch_data:
seq = query + text
pad_num = self.args.max_seq_len - q_len - text_len - 3
mask = [1]*(q_len + text_len + 3) + [0]*(pad_num)
type = [0]*(q_len +2) + [1]*(self.args.max_seq_len - q_len -2)
assert len(seq) == len(mask) == len(type) == self.args.max_seq_len
text_input.append(seq)
mask_input.append(mask)
type_input.append(type)
y.append(label)
text_input = np.array(text_input) # [b, seq_len]
mask_input = np.array(mask_input) # [b, seq_len]
type_input = np.array(type_input) # [b, seq_len]
y = np.array(y) # [b]
if self.args.class_num == 1:
y = np.expand_dims(y, axis=-1) # [b, 1]
# y[y > 0.5] = 1.0 # 将标签从 0 1转化为 -1 1 , 然后输入 sigmod 函数,得到 0 1 输出
# y[y < 0.5] = -1.0
''' 数据如果不足以并行,则需要重复 '''
batch_size = y.shape[0]
ratio = 1
if batch_size > self.args.min_bz_per_gpu*self.args.gpu_num:
over_num = batch_size % (self.args.min_bz_per_gpu*self.args.gpu_num)
elif batch_size < self.args.min_bz_per_gpu*self.args.gpu_num:
over_num = batch_size
else:
over_num = 0
if over_num > 0:
assert over_num % self.args.min_bz_per_gpu == 0
remain_num = self.args.min_bz_per_gpu*self.args.gpu_num - over_num
assert remain_num % self.args.min_bz_per_gpu == 0
text_input = np.pad(text_input, ((0, remain_num),(0,0)), 'constant', constant_values=(1,1)) # [(b+remain_num), seq_len]
mask_input = np.pad(mask_input, ((0, remain_num),(0,0)), 'constant', constant_values=(1,1)) # [(b+remain_num), seq_len]
type_input = np.pad(type_input, ((0, remain_num),(0,0)), 'constant', constant_values=(1,1)) # [(b+remain_num), seq_len]
if self.args.class_num == 1:
y = np.pad(y, ((0, remain_num),(0,0)), 'constant', constant_values=(1, 1)) # [(b+remain_num), 1]
else:
y = np.pad(y, (0, remain_num), 'constant', constant_values=(1, 1)) # [(b+remain_num), 1]
padded_batch_size = y.shape[0]
ratio = batch_size/padded_batch_size
text_input = torch.Tensor(text_input).long()
mask_inpit = torch.Tensor(mask_input).float()
type_input = torch.Tensor(type_input).long()
if self.args.class_num == 1:
y = torch.Tensor(y).float()
else:
y = torch.Tensor(y).long() # [b,]
return (text_input, type_input, mask_inpit, y, y, y, y), ratio
class TrainDataGenerator():
def __init__(self, dataset_model, dataset_name, tokenizer, args, transform=None):
"""Init."""
self.transform = transform
self.dataset_model = dataset_model
self.dataset_name = dataset_name
self.batch_size = args.batch_size
self.tokenizer = tokenizer
self.args = args
self.max_q_len = self.args.max_q_len
self.max_d_len = self.args.max_d_len
self.max_seq_len = self.args.max_seq_len
self.max_para_num = self.args.max_para_num
self.batcher = Batcher(dataset_name=dataset_name,
args=args,
tokenizer=tokenizer)
self.buffer = Queue(maxsize=self.batch_size)
self.full_sem = threading.Semaphore(0)
self.empty_sem = threading.Semaphore(1)
self.data_buff_thread = Thread(target=self.get_data_worker)
self.data_buff_thread.start()
self.flag_data_out = False
def get_data_worker(self):
self.flag_data_out = False
i = 0
batches = 0
while True:
qid, did, label = self.batcher.qd_pairs_id[i] # 得到 qid did label
query = self.batcher.get_query(qid) # 得到 q 内容
docs = self.batcher.get_doc(did) # 得到 d 内容
if batches == 0:
self.empty_sem.acquire() # 请求一个批次
q_len = len(query) # 得到query长度
query = [CLS_TOKEN] + query + [SEP_TOKEN]
query = self.tokenizer.convert_tokens_to_ids(query) # 转化为id
''' 处理并缓存 q d label '''
for doc in docs:
d_len = len(doc) # 得到doc长度
''' 保留完整的query, 对 document 进行截断 '''
if d_len + q_len + 2 > self.args.max_seq_len - 1:
exceed_num = (d_len + q_len + 2 ) - self.args.max_seq_len + 1
doc = doc[:-exceed_num]
d_len = len(doc)
assert len(doc) + q_len + 2 == self.args.max_seq_len - 1, ' %d %d %d' % (len(doc), q_len , exceed_num)
doc = doc + [SEP_TOKEN]
pad_num = self.args.max_seq_len - q_len - d_len - 3
doc = doc + [PAD_TOKEN] * pad_num
assert len(doc) + len(query) == self.args.max_seq_len == d_len + q_len + 3 + pad_num, ' %d %d %d %d' % (len(doc), len(query), d_len, q_len )
text = self.tokenizer.convert_tokens_to_ids(doc) # 将text转化为token
self.buffer.put((qid, did, query, q_len, text, d_len, 0, label))
batches += 1
if batches >= self.batch_size: # 每完成一个batch 释放一个信号量
self.full_sem.release() # 释放一个batch信号量
batches = 0
i += 1
if i >= len(self.batcher.qd_pairs_id): # 如果没处理满一个batch但处理完所有的数据
i = 0
self.flag_data_out = True
self.full_sem.release() # 完成最后一个batch,释放一个信号量
batches = 0
def __getitem__(self, item: int):
self.full_sem.acquire() # 请求一个批次
if self.flag_data_out == True:
self.flag_data_out = False
batch_data = []
remain = self.buffer.qsize()
for _ in range(remain):
batch_data.append(self.buffer.get())
for tran in self.transform:
batch_data = tran(batch_data)
else:
assert self.buffer.qsize() == self.batch_size
batch_data = []
for _ in range(self.batch_size):
batch_data.append(self.buffer.get())
for tran in self.transform:
batch_data = tran(batch_data)
self.empty_sem.release() # 释放一个空批次
return batch_data
def __len__(self) -> int:
num_batch = (len(self.batcher.qd_pairs_id) // self.args.batch_size)
remain = len(self.batcher.qd_pairs_id) % self.args.batch_size
if remain > 0:
num_batch += 1
return num_batch
def on_epoch_end_callback(self, callback=None):
"""Reorganize the data_bert while epoch is ended."""
if callback:
callback()
def get_all(self):
return self.batcher.get_all()
class DevTestGenerator():
def __init__(self, dataset_model, dataset_name, tokenizer, args, transform=None):
"""Init."""
self.transform = transform
self.dataset_model = dataset_model
self.dataset_name = dataset_name
self.batch_size = args.batch_size
self.tokenizer = tokenizer
self.args = args
self.max_q_len = self.args.max_q_len
self.max_d_len = self.args.max_d_len
self.max_seq_len = self.args.max_seq_len
self.max_para_num = self.args.max_para_num
self.batcher = Batcher(dataset_name=dataset_name,
args=args,
tokenizer=tokenizer)
self.buffer = Queue(maxsize=self.batch_size)
self.full_sem = threading.Semaphore(0)
self.empty_sem = threading.Semaphore(1)
self.data_buff_thread = Thread(target=self.get_data_worker)
self.data_buff_thread.start()
self.flag_data_out = False
self.flag_data_out_once = False
def get_data_worker(self):
self.flag_data_out = False
i = 0
batches = 0
while True:
qid, did, label = self.batcher.qd_pairs_id[i]
query = self.batcher.get_query(qid)
docs = self.batcher.get_doc(did)
q_len = len(query) # 得到query长度
query = [CLS_TOKEN] + query + [SEP_TOKEN]
query = self.tokenizer.convert_tokens_to_ids(query) # 转化为id
''' 处理并缓存 q d label '''
for doc in docs:
if batches == 0:
self.empty_sem.acquire()
d_len = len(doc) # 得到doc长度
''' 保留完整的query, 对 document 进行截断 '''
if d_len + q_len + 2 > self.args.max_seq_len - 1:
exceed_num = (d_len + q_len + 2 ) - self.args.max_seq_len + 1
doc = doc[:-exceed_num]
d_len = len(doc)
assert len(doc) + q_len + 2 == self.args.max_seq_len - 1, '%s %d %d %d' % (self.dataset_name ,len(doc), q_len, exceed_num )
doc = doc + [SEP_TOKEN]
pad_num = self.args.max_seq_len - q_len - d_len - 3
doc = doc + [PAD_TOKEN]*pad_num
assert len(doc) + len(query) == self.args.max_seq_len == d_len + q_len + 3 + pad_num, ' %d %d %d %d' % (len(doc), len(query), d_len, q_len )
text = self.tokenizer.convert_tokens_to_ids(doc) # 将text转化为token
self.buffer.put((qid, did, query, q_len, text, d_len, 0, label))
batches += 1
if batches >= self.batch_size: # 每完成一个batch 释放一个信号量
self.full_sem.release() # 释放一个信号量
batches = 0
i += 1
if i >= len(self.batcher.qd_pairs_id): # 如果没处理满一个batch但处理完所有的数据
i = 0
self.flag_data_out = True
self.full_sem.release() # 完成最后一个batch,释放一个信号量
batches = 0
def __getitem__(self, item: int):
self.full_sem.acquire() # 请求一个批次
if self.flag_data_out == True:
self.flag_data_out = False
batch_data = []
remain = self.buffer.qsize()
for _ in range(remain):
batch_data.append(self.buffer.get())
for tran in self.transform:
batch_data = tran(batch_data)
else:
assert self.buffer.qsize() == self.batch_size
batch_data = []
for _ in range(self.batch_size):
batch_data.append(self.buffer.get())
for tran in self.transform:
batch_data = tran(batch_data)
self.empty_sem.release() # 释放一个空批次
return batch_data
def __len__(self) -> int:
num_batch = (len(self.batcher.qd_pairs_id) // self.args.batch_size)
remain = len(self.batcher.qd_pairs_id) % self.args.batch_size
if remain > 0:
num_batch += 1
return num_batch
def on_epoch_end_callback(self, callback=None):
"""Reorganize the data_bert while epoch is ended."""
if callback:
callback()
def get_all(self):
return self.batcher.get_all()
|
run.pyw
|
import os
import sys
import webbrowser
from threading import Thread
from tkinter import Tk, Label, CENTER, Button
from api.router import Router
PATH = os.path.dirname(os.path.realpath(sys.argv[0]))
class SimpleServer:
def __init__(self):
self.ui = Tk()
self.init_ui()
self.open_browser() # 自动打开一次
def init_ui(self):
self.ui.title("AnimeSearcher")
self.ui.wm_iconbitmap(f"{PATH}/logo.ico")
width, height = 350, 200
screen_width = self.ui.winfo_screenwidth()
screen_height = self.ui.winfo_screenheight()
position = '%dx%d+%d+%d' % (width, height, (screen_width - width) / 2, (screen_height - height) / 2)
self.ui.geometry(position)
self.ui.resizable(width=False, height=False)
Label(self.ui, text="后台服务正在运行, 请不要关闭此窗口", justify=CENTER, pady=50).pack()
btn = Button(self.ui, text="[ 打开浏览器 ]", relief="groove", command=self.open_browser)
btn.pack()
def open_browser(self):
"""打开一个浏览器窗口"""
webbrowser.open(f"file://{PATH}/web/index.html")
def run(self):
self.ui.mainloop()
if __name__ == '__main__':
server = SimpleServer()
rt = Router()
rt.listen("127.0.0.1", port=6001, ws_port=6002)
Thread(target=rt.run, daemon=True).start()
server.run()
|
search.py
|
from typing import List, Set
import os
from flask import Flask, json, request
import torch
import clip
from PIL import Image
import requests
from io import BytesIO
from waitress import serve
from dotenv import load_dotenv
import threading, queue
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
load_dotenv()
PORT = os.getenv("PORT")
BACKEND = os.getenv("BASE_ADDRESS")
api = Flask(__name__)
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
featureDirectory = "features/"
obj = {}
processQueue = queue.Queue()
def getPhoto(imgId, thumb = False):
response = requests.get("http://" + BACKEND + "/media/" + ("thumb_" if thumb else "") + imgId )
image = preprocess(Image.open(BytesIO(response.content))).unsqueeze(0).to(device)
image_features = model.encode_image(image)
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
torch.save(image_features, featureDirectory+ imgId)
def getAllPhotos():
os.makedirs("features/", exist_ok=True)
files = dict.fromkeys(os.listdir("features/"))
response = requests.get("http://" + BACKEND + "/media/all")
usedSet = set()
for img in response.json():
imgId = img["id"]
usedSet.add(imgId)
try:
if not imgId in files:
getPhoto(imgId, thumb = img["type"] != "photo")
obj[imgId] = torch.load(featureDirectory+ imgId)[0]
except Exception as e:
print(f"Loading {imgId} failed because of {e}")
# remove unused
for file in files:
if not file in usedSet:
os.unlink("features/"+file)
def findByText(term, candidates: List[str]):
text = clip.tokenize([term, ""]).to(device)
text_features = model.encode_text(text)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
usedCandidates = list()
tmp = []
for x in candidates:
if(not x in obj):
continue
usedCandidates.append(x)
tmp.append(obj[x])
if len(tmp) == 0:
return []
tmp = torch.stack(tmp)
scores = (tmp @ text_features.t()).t()
#scores = torch.div(scores[0],1)
scores = torch.div(scores[0],scores[1])
accepted = torch.sort(scores)
ind = torch.searchsorted(accepted.values, torch.tensor(1.1))
res = accepted.indices[ind.item():].tolist()
res.reverse()
return [usedCandidates[x] for x in res]
def findByImage(term, candidates: List[str]):
target_features = torch.squeeze(obj[term])
usedCandidates = list()
tmp = []
for x in candidates:
if(not x in obj):
continue
usedCandidates.append(x)
tmp.append(obj[x])
if len(tmp) == 0:
return []
tmp = torch.stack(tmp)
scores = (tmp @ target_features).t()
accepted = torch.sort(scores)
ind = torch.searchsorted(accepted.values, torch.tensor(0.65))
res = accepted.indices[ind.item():].tolist()
res.reverse()
return [usedCandidates[x] for x in res]
@api.route('/searchByText', methods=['POST'])
def searchByText():
jsonData = request.get_json()
text = jsonData["text"]
candidates = jsonData["candidates"]
return json.dumps(findByText(text, candidates))
@api.route('/searchByImage', methods=['POST'])
def searchByImage():
jsonData = request.get_json()
image = jsonData["image"]
candidates = jsonData["candidates"]
return json.dumps(findByImage(image, candidates))
def worker():
while True:
imgId, thumb = processQueue.get()
try:
getPhoto(imgId, thumb=thumb)
obj[imgId] = torch.load(featureDirectory+ imgId)[0]
except Exception as e:
print(f"Loading {imgId} failed because of {e}")
processQueue.task_done()
@api.route("/upload", methods=["POST"])
def callback():
jsonData = request.get_json()
for imgId, imgType in zip(jsonData["oids"], jsonData["types"]):
processQueue.put((imgId, imgType != "photo"))
return json.dumps(True)
def register():
requests.post("http://" + BACKEND + "/services/register", json = {"names": ["upload", "search"], "port": PORT})
if __name__ == '__main__':
print("Processing all photos")
getAllPhotos()
print("Search starting")
threading.Thread(target=worker, daemon=True).start()
register()
serve(api, listen='*:' + PORT)
|
main.py
|
import argparse
import toml
import json
import re
import functools
import urllib.parse
from . import utils
from . import gitlab
from .utils import lazy_debug
import logging
from threading import Thread, Lock, Timer
import time
import traceback
import sqlite3
import requests
from contextlib import contextmanager
from itertools import chain
from queue import Queue
import os
import sys
from enum import IntEnum
import subprocess
from .git_helper import SSH_KEY_FILE
import shlex
import random
STATUS_TO_PRIORITY = {
'success': 0,
'pending': 1,
'approved': 2,
'': 3,
'error': 4,
'failure': 5,
}
INTERRUPTED_BY_HOMU_FMT = 'Interrupted by Homu ({})'
INTERRUPTED_BY_HOMU_RE = re.compile(r'Interrupted by Homu \((.+?)\)')
DEFAULT_TEST_TIMEOUT = 3600 * 10
global_cfg = {}
global_git_cfg = {}
@contextmanager
def buildbot_sess(repo_cfg):
sess = requests.Session()
sess.post(
repo_cfg['buildbot']['url'] + '/login',
allow_redirects=False,
data={
'username': repo_cfg['buildbot']['username'],
'passwd': repo_cfg['buildbot']['password'],
})
yield sess
sess.get(repo_cfg['buildbot']['url'] + '/logout', allow_redirects=False)
db_query_lock = Lock()
def db_query(db, *args):
with db_query_lock:
db.execute(*args)
class Repository:
treeclosed = -1
gh = None
label = None
db = None
def __init__(self, gh, repo_label, db):
self.gh = gh
self.repo_label = repo_label
self.db = db
db_query(
db,
'SELECT treeclosed FROM repos WHERE repo = ?',
[repo_label]
)
row = db.fetchone()
if row:
self.treeclosed = row[0]
else:
self.treeclosed = -1
def update_treeclosed(self, value):
self.treeclosed = value
db_query(
self.db,
'DELETE FROM repos where repo = ?',
[self.repo_label]
)
if value > 0:
db_query(
self.db,
'INSERT INTO repos (repo, treeclosed) VALUES (?, ?)',
[self.repo_label, value]
)
def __lt__(self, other):
return self.gh < other.gh
class PullReqState:
num = 0
priority = 0
rollup = False
title = ''
body = ''
head_ref = ''
base_ref = ''
assignee = ''
delegate = ''
def __init__(self, id_, num, head_sha, status, db, repo_label, mergeable_que,
gh, owner, name, repos):
self.head_advanced('', use_db=False)
self.num = num
self.id_ = id_
self.head_sha = head_sha
self.status = status
self.db = db
self.repo_label = repo_label
self.mergeable_que = mergeable_que
self.gh = gh
self.owner = owner
self.name = name
self.repos = repos
self.timeout_timer = None
self.test_started = time.time()
def head_advanced(self, head_sha, *, use_db=True):
self.head_sha = head_sha
self.approved_by = ''
self.status = ''
self.merge_sha = ''
self.build_res = {}
self.try_ = False
self.mergeable = None
if use_db:
self.set_status('')
self.set_mergeable(None)
self.init_build_res([])
def __repr__(self):
fmt = 'PullReqState:{}/{}#{}(approved_by={}, priority={}, status={})'
return fmt.format(
self.owner,
self.name,
self.num,
self.approved_by,
self.priority,
self.status,
)
def sort_key(self):
return [
STATUS_TO_PRIORITY.get(self.get_status(), -1),
1 if self.mergeable is False else 0,
0 if self.approved_by else 1,
1 if self.rollup else 0,
-self.priority,
self.num,
]
def __lt__(self, other):
return self.sort_key() < other.sort_key()
def get_issue(self):
issue = getattr(self, 'issue', None)
if not issue:
issue = self.issue = self.get_repo().mergerequests.get(self.num)
return issue
def add_comment(self, text):
self.get_issue().notes.create({"body": text})
def set_status(self, status):
self.status = status
if self.timeout_timer:
self.timeout_timer.cancel()
self.timeout_timer = None
db_query(
self.db,
'UPDATE pull SET status = ? WHERE repo = ? AND num = ?',
[self.status, self.repo_label, self.num]
)
# FIXME: self.try_ should also be saved in the database
if not self.try_:
db_query(
self.db,
'UPDATE pull SET merge_sha = ? WHERE repo = ? AND num = ?',
[self.merge_sha, self.repo_label, self.num]
)
def get_status(self):
if self.status == '' and self.approved_by:
if self.mergeable is not False:
return 'approved'
return self.status
def set_mergeable(self, mergeable, *, cause=None, que=True):
if mergeable is not None:
self.mergeable = mergeable
db_query(
self.db,
'INSERT OR REPLACE INTO mergeable (repo, num, mergeable) VALUES (?, ?, ?)', # noqa
[self.repo_label, self.num, self.mergeable]
)
else:
if que:
self.mergeable_que.put([self, cause])
else:
self.mergeable = None
db_query(
self.db,
'DELETE FROM mergeable WHERE repo = ? AND num = ?',
[self.repo_label, self.num]
)
def init_build_res(self, builders, *, use_db=True):
self.build_res = {x: {
'res': None,
'url': '',
} for x in builders}
if use_db:
db_query(
self.db,
'DELETE FROM build_res WHERE repo = ? AND num = ?',
[self.repo_label, self.num]
)
def set_build_res(self, builder, res, url):
if builder not in self.build_res:
raise Exception('Invalid builder: {}'.format(builder))
self.build_res[builder] = {
'res': res,
'url': url,
}
db_query(
self.db,
'INSERT OR REPLACE INTO build_res (repo, num, builder, res, url, merge_sha) VALUES (?, ?, ?, ?, ?, ?)', # noqa
[
self.repo_label,
self.num,
builder,
res,
url,
self.merge_sha,
])
def build_res_summary(self):
return ', '.join('{}: {}'.format(builder, data['res'])
for builder, data in self.build_res.items())
def get_repo(self):
repo = self.repos[self.repo_label].gh
if not repo:
gh = gitlab.login(
global_cfg['gitlab']['host'],
global_cfg['gitlab']['access_token'],
)
repo = gitlab.get_repository(gh, self.owner, self.name)
self.repos[self.repo_label].gh = repo
# assert repo.owner.login == self.owner
# assert repo.name == self.name
return repo
def save(self):
db_query(
self.db,
'INSERT OR REPLACE INTO pull (id, repo, num, status, merge_sha, title, body, head_sha, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, delegate) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', # noqa
[
self.id_,
self.repo_label,
self.num,
self.status,
self.merge_sha,
self.title,
self.body,
self.head_sha,
self.head_ref,
self.base_ref,
self.assignee,
self.approved_by,
self.priority,
self.try_,
self.rollup,
self.delegate,
])
def refresh(self):
issue = self.get_repo().mergerequests.get(self.num)
self.title = issue.title
self.body = issue.description
def fake_merge(self, repo_cfg):
if not repo_cfg.get('linear', False):
return
if repo_cfg.get('autosquash', False):
return
issue = self.get_issue()
title = issue.title
# We tell gitlab to close the PR via the commit message, but it
# doesn't know that constitutes a merge. Edit the title so that it's
# clearer.
merged_prefix = '[merged] '
if not title.startswith(merged_prefix):
title = merged_prefix + title
issue.edit(title=title)
def change_treeclosed(self, value):
self.repos[self.repo_label].update_treeclosed(value)
def blocked_by_closed_tree(self):
treeclosed = self.repos[self.repo_label].treeclosed
return treeclosed if self.priority < treeclosed else None
def start_testing(self, timeout):
self.test_started = time.time() # FIXME: Save in the local database
self.set_status('pending')
timer = Timer(timeout, self.timed_out)
timer.start()
self.timeout_timer = timer
def timed_out(self):
print('* Test timed out: {}'.format(self))
self.merge_sha = ''
self.save()
self.set_status('failure')
desc = 'Test timed out'
gitlab.create_status(
self.get_repo(),
self.head_sha,
'failed',
'',
desc,
context='homu')
self.add_comment(':boom: {}'.format(desc))
def sha_cmp(short, full):
return len(short) >= 4 and short == full[:len(short)]
def sha_or_blank(sha):
return sha if re.match(r'^[0-9a-f]+$', sha) else ''
class AuthState(IntEnum):
# Higher is more privileged
REVIEWER = 3
TRY = 2
NONE = 1
def verify_auth(username, repo_cfg, state, auth, realtime, my_username):
# In some cases (e.g. non-fully-qualified r+) we recursively talk to
# ourself via a hidden markdown comment in the message. This is so that
# when re-synchronizing after shutdown we can parse these comments and
# still know the SHA for the approval.
#
# So comments from self should always be allowed
if username == my_username:
return True
is_reviewer = False
auth_collaborators = repo_cfg.get('auth_collaborators', False)
if auth_collaborators:
is_reviewer = gitlab.is_collaborator(state.get_repo(), username)
if not is_reviewer:
is_reviewer = username in repo_cfg.get('reviewers', [])
if not is_reviewer:
is_reviewer = username.lower() == state.delegate.lower()
if is_reviewer:
have_auth = AuthState.REVIEWER
elif username in repo_cfg.get('try_users', []):
have_auth = AuthState.TRY
else:
have_auth = AuthState.NONE
if have_auth >= auth:
return True
else:
if realtime:
reply = '@{}: :key: Insufficient privileges: '.format(username)
if auth == AuthState.REVIEWER:
if auth_collaborators:
reply += 'Collaborator required'
else:
reply += 'Not in reviewers'
elif auth == AuthState.TRY:
reply += 'not in try users'
state.add_comment(reply)
return False
PORTAL_TURRET_DIALOG = ["Target acquired", "Activated", "There you are"]
PORTAL_TURRET_IMAGE = "https://cloud.gitlab.sercontent.com/assets/1617736/22222924/c07b2a1c-e16d-11e6-91b3-ac659550585c.png" # noqa
def parse_commands(body, username, repo_cfg, state, my_username, db, states,
*, realtime=False, sha=''):
global global_cfg, global_git_cfg
state_changed = False
_reviewer_auth_verified = functools.partial(
verify_auth,
username,
repo_cfg,
state,
AuthState.REVIEWER,
realtime,
my_username,
)
_try_auth_verified = functools.partial(
verify_auth,
username,
repo_cfg,
state,
AuthState.TRY,
realtime,
my_username,
)
words = list(chain.from_iterable(re.findall(r'\S+', x) for x in body.splitlines() if '@' + my_username in x)) # noqa
if words[1:] == ["are", "you", "still", "there?"] and realtime:
state.add_comment(
":cake: {}\n\n".format(
random.choice(PORTAL_TURRET_DIALOG), PORTAL_TURRET_IMAGE)
)
for i, word in reversed(list(enumerate(words))):
found = True
if word == 'r+' or word.startswith('r='):
if not _reviewer_auth_verified():
continue
if not sha and i + 1 < len(words):
cur_sha = sha_or_blank(words[i + 1])
else:
cur_sha = sha
approver = word[len('r='):] if word.startswith('r=') else username
# Ignore "r=me"
if approver == 'me':
continue
# Ignore WIP PRs
if any(map(state.title.startswith, [
'WIP', 'TODO', '[WIP]', '[TODO]',
])):
if realtime:
state.add_comment(':clipboard: Looks like this PR is still in progress, ignoring approval') # noqa
continue
# Sometimes, GitHub sends the head SHA of a PR as 0000000
# through the webhook. This is called a "null commit", and
# seems to happen when GitHub internally encounters a race
# condition. Last time, it happened when squashing commits
# in a PR. In this case, we just try to retrieve the head
# SHA manually.
if all(x == '0' for x in state.head_sha):
if realtime:
state.add_comment(
':bangbang: Invalid head SHA found, retrying: `{}`'
.format(state.head_sha)
)
state.head_sha = gitlab.get_pull_request_sha(
state.get_repo(),
state.num,
)
state.save()
assert any(x != '0' for x in state.head_sha)
if state.approved_by and realtime and username != my_username:
for _state in states[state.repo_label].values():
if _state.status == 'pending':
break
else:
_state = None
lines = []
if state.status in ['failure', 'error']:
lines.append('- This pull request previously failed. You should add more commits to fix the bug, or use `retry` to trigger a build again.') # noqa
if _state:
if state == _state:
lines.append('- This pull request is currently being tested. If there\'s no response from the continuous integration service, you may use `retry` to trigger a build again.') # noqa
else:
lines.append('- There\'s another pull request that is currently being tested, blocking this pull request: #{}'.format(_state.num)) # noqa
if lines:
lines.insert(0, '')
lines.insert(0, ':bulb: This pull request was already approved, no need to approve it again.') # noqa
state.add_comment('\n'.join(lines))
if sha_cmp(cur_sha, state.head_sha):
state.approved_by = approver
state.try_ = False
state.set_status('')
state.save()
elif realtime and username != my_username:
if cur_sha:
msg = '`{}` is not a valid commit SHA.'.format(cur_sha)
state.add_comment(
':scream_cat: {} Please try again with `{:.7}`.'
.format(msg, state.head_sha)
)
else:
state.add_comment(
':pushpin: Commit {:.7} has been approved by `{}`\n\n<!-- @{} r={} {} -->' # noqa
.format(
state.head_sha,
approver,
my_username,
approver,
state.head_sha,
))
treeclosed = state.blocked_by_closed_tree()
if treeclosed:
state.add_comment(
':evergreen_tree: The tree is currently closed for pull requests below priority {}, this pull request will be tested once the tree is reopened' # noqa
.format(treeclosed)
)
elif word == 'r-':
if not verify_auth(username, repo_cfg, state, AuthState.REVIEWER,
realtime, my_username):
continue
state.approved_by = ''
state.save()
elif word.startswith('p='):
if not verify_auth(username, repo_cfg, state, AuthState.TRY,
realtime, my_username):
continue
try:
pvalue = int(word[len('p='):])
except ValueError:
continue
if pvalue > global_cfg['max_priority']:
if realtime:
state.add_comment(
':stop_sign: Priority higher than {} is ignored.'
.format(global_cfg['max_priority'])
)
continue
state.priority = pvalue
state.save()
elif word.startswith('delegate='):
if not verify_auth(username, repo_cfg, state, AuthState.REVIEWER,
realtime, my_username):
continue
state.delegate = word[len('delegate='):]
state.save()
if realtime:
state.add_comment(
':v: @{} can now approve this pull request'
.format(state.delegate)
)
elif word == 'delegate-':
# TODO: why is this a TRY?
if not _try_auth_verified():
continue
state.delegate = ''
state.save()
elif word == 'delegate+':
if not _reviewer_auth_verified():
continue
state.delegate = gitlab.get_pull_request_user(
state.get_repo(), state.num,
)
state.save()
if realtime:
state.add_comment(
':v: @{} can now approve this pull request'
.format(state.delegate)
)
elif word == 'retry' and realtime:
if not _try_auth_verified():
continue
state.set_status('')
elif word in ['try', 'try-'] and realtime:
if not _try_auth_verified():
continue
state.try_ = word == 'try'
state.merge_sha = ''
state.init_build_res([])
state.save()
elif word in ['rollup', 'rollup-']:
if not _try_auth_verified():
continue
state.rollup = word == 'rollup'
state.save()
elif word == 'force' and realtime:
if not _try_auth_verified():
continue
if 'buildbot' in repo_cfg:
with buildbot_sess(repo_cfg) as sess:
res = sess.post(
repo_cfg['buildbot']['url'] + '/builders/_selected/stopselected', # noqa
allow_redirects=False,
data={
'selected': repo_cfg['buildbot']['builders'],
'comments': INTERRUPTED_BY_HOMU_FMT.format(int(time.time())), # noqa
})
if 'authzfail' in res.text:
err = 'Authorization failed'
else:
mat = re.search('(?s)<div class="error">(.*?)</div>', res.text)
if mat:
err = mat.group(1).strip()
if not err:
err = 'Unknown error'
else:
err = ''
if err:
state.add_comment(
':bomb: Buildbot returned an error: `{}`'.format(err)
)
elif word == 'clean' and realtime:
if not _try_auth_verified():
continue
state.merge_sha = ''
state.init_build_res([])
state.save()
elif (word == 'hello?' or word == 'ping') and realtime:
state.add_comment(":sleepy: I'm awake I'm awake")
elif word.startswith('treeclosed='):
if not _reviewer_auth_verified():
continue
try:
treeclosed = int(word[len('treeclosed='):])
state.change_treeclosed(treeclosed)
except ValueError:
pass
state.save()
elif word == 'treeclosed-':
if not _reviewer_auth_verified():
continue
state.change_treeclosed(-1)
state.save()
elif 'hooks' in global_cfg:
hook_found = False
for hook in global_cfg['hooks']:
hook_cfg = global_cfg['hooks'][hook]
if hook_cfg['realtime'] and not realtime:
continue
if word == hook or word.startswith('%s=' % hook):
if hook_cfg['access'] == "reviewer":
if not _reviewer_auth_verified():
continue
else:
if not _try_auth_verified():
continue
hook_found = True
extra_data = ""
if word.startswith('%s=' % hook):
extra_data = word.split("=")[1]
Thread(
target=handle_hook_response,
args=[state, hook_cfg, body, extra_data]
).start()
if not hook_found:
found = False
else:
found = False
if found:
state_changed = True
words[i] = ''
return state_changed
def handle_hook_response(state, hook_cfg, body, extra_data):
post_data = {}
post_data["pull"] = state.num
post_data["body"] = body
post_data["extra_data"] = extra_data
print(post_data)
response = requests.post(hook_cfg['endpoint'], json=post_data)
print(response.text)
# We only post a response if we're configured to have a response
# non-realtime hooks cannot post
if hook_cfg['has_response'] and hook_cfg['realtime']:
state.add_comment(response.text)
def git_push(git_cmd, branch, state):
merge_sha = subprocess.check_output(git_cmd('rev-parse', 'HEAD')).decode('ascii').strip() # noqa
if utils.silent_call(git_cmd('push', '-f', 'origin', branch)):
utils.logged_call(git_cmd('branch', '-f', 'homu-tmp', branch))
utils.logged_call(git_cmd('push', '-f', 'origin', 'homu-tmp'))
def inner():
gitlab.create_status(
state.get_repo(),
merge_sha,
'success',
'',
'Branch protection bypassed',
context='homu',
)
def fail(err):
state.add_comment(
':boom: Unable to create a status for {} ({})'
.format(merge_sha, err)
)
utils.retry_until(inner, fail, state)
utils.logged_call(git_cmd('push', '-f', 'origin', branch))
return merge_sha
def init_local_git_cmds(repo_cfg, git_cfg):
fpath = 'cache/{}/{}'.format(repo_cfg['owner'], repo_cfg['name'])
host = urllib.parse.urlparse(global_cfg["gitlab"]["host"]).netloc
url = 'git@{}:{}/{}.git'.format(
host,
repo_cfg['owner'], repo_cfg['name'],
) # noqa
if not os.path.exists(SSH_KEY_FILE):
os.makedirs(os.path.dirname(SSH_KEY_FILE), exist_ok=True)
with open(SSH_KEY_FILE, 'w') as fp:
fp.write(git_cfg['ssh_key'])
os.chmod(SSH_KEY_FILE, 0o600)
if not os.path.exists(fpath):
utils.logged_call(['git', 'init', fpath])
utils.logged_call(['git', '-C', fpath, 'config', 'user.name', git_cfg["name"]]) # noqa
utils.logged_call(['git', '-C', fpath, 'config', 'user.email', git_cfg["email"]]) # noqa
utils.logged_call(['git', '-C', fpath, 'remote', 'add', 'origin', url]) # noqa
return lambda *args: ['git', '-C', fpath] + list(args)
def branch_equal_to_merge(git_cmd, state, branch):
utils.logged_call(git_cmd(
'fetch', 'origin',
'merge-requests/{}/merge'.format(state.num),
))
return utils.silent_call(git_cmd('diff', '--quiet', 'FETCH_HEAD', branch)) == 0 # noqa
def create_merge(state, repo_cfg, branch, logger, git_cfg,
ensure_merge_equal=False):
base_sha = gitlab.get_ref_sha(state.get_repo(), 'heads/' + state.base_ref)
state.refresh()
lazy_debug(logger,
lambda: "create_merge: attempting merge {} into {} on {!r}"
.format(state.head_sha, branch, state.get_repo()))
merge_msg = 'Auto merge of !{} - {}, r={}\n\n{}\n\n{}'.format(
state.num,
state.head_ref,
'<try>' if state.try_ else state.approved_by,
state.title,
state.body,
)
desc = 'Merge conflict'
git_cmd = init_local_git_cmds(repo_cfg, git_cfg)
utils.logged_call(
git_cmd(
'fetch', 'origin', state.base_ref,
'merge-requests/{}/head'.format(state.num),
)
)
utils.silent_call(git_cmd('rebase', '--abort'))
utils.silent_call(git_cmd('merge', '--abort'))
if repo_cfg.get('linear', False):
utils.logged_call(
git_cmd('checkout', '-B', branch, state.head_sha))
try:
args = [base_sha]
if repo_cfg.get('autosquash', False):
args += ['-i', '--autosquash']
utils.logged_call(
git_cmd(
'-c',
'user.name=' + git_cfg['name'],
'-c',
'user.email=' + git_cfg['email'],
'rebase',
*args,
)
)
except subprocess.CalledProcessError:
if repo_cfg.get('autosquash', False):
utils.silent_call(git_cmd('rebase', '--abort'))
if utils.silent_call(git_cmd('rebase', base_sha)) == 0:
desc = 'Auto-squashing failed'
else:
ap = '<try>' if state.try_ else state.approved_by
text = '\nCloses: #{}\nApproved by: {}'.format(state.num, ap)
msg_code = 'cat && echo {}'.format(shlex.quote(text))
env_code = 'export GIT_COMMITTER_NAME={} && export GIT_COMMITTER_EMAIL={} && unset GIT_COMMITTER_DATE'.format(shlex.quote(git_cfg['name']), shlex.quote(git_cfg['email'])) # noqa
utils.logged_call(git_cmd('filter-branch', '-f',
'--msg-filter', msg_code,
'--env-filter', env_code,
'{}..'.format(base_sha)))
if ensure_merge_equal:
if not branch_equal_to_merge(git_cmd, state, branch):
return ''
return git_push(git_cmd, branch, state)
else:
utils.logged_call(git_cmd(
'checkout',
'-B',
'homu-tmp',
state.head_sha))
ok = True
if repo_cfg.get('autosquash', False):
try:
merge_base_sha = subprocess.check_output(
git_cmd(
'merge-base',
base_sha,
state.head_sha)).decode('ascii').strip()
utils.logged_call(git_cmd(
'-c',
'user.name=' + git_cfg['name'],
'-c',
'user.email=' + git_cfg['email'],
'rebase',
'-i',
'--autosquash',
'--onto',
merge_base_sha, base_sha))
except subprocess.CalledProcessError:
desc = 'Auto-squashing failed'
ok = False
if ok:
utils.logged_call(git_cmd('checkout', '-B', branch, base_sha))
try:
utils.logged_call(git_cmd(
'-c',
'user.name=' + git_cfg['name'],
'-c',
'user.email=' + git_cfg['email'],
'merge',
'heads/homu-tmp',
'--no-ff',
'-m',
merge_msg))
except subprocess.CalledProcessError:
pass
else:
if ensure_merge_equal:
if not branch_equal_to_merge(git_cmd, state, branch):
return ''
return git_push(git_cmd, branch, state)
state.set_status('error')
gitlab.create_status(
state.get_repo(),
state.head_sha,
'canceled',
'',
desc,
context='homu')
state.add_comment(':lock: ' + desc)
return ''
def pull_is_rebased(state, repo_cfg, git_cfg, base_sha):
assert git_cfg['local_git']
git_cmd = init_local_git_cmds(repo_cfg, git_cfg)
utils.logged_call(
git_cmd(
'fetch', 'origin', state.base_ref,
'merge-requests/{}/head'.format(state.num),
)
)
return utils.silent_call(git_cmd('merge-base', '--is-ancestor',
base_sha, state.head_sha)) == 0
def get_gitlab_merge_sha(state, repo_cfg, git_cfg):
assert git_cfg['local_git']
git_cmd = init_local_git_cmds(repo_cfg, git_cfg)
if state.mergeable is not True:
return None
utils.logged_call(git_cmd(
'fetch', 'origin',
'merge-requests/{}/merge'.format(state.num),
))
return subprocess.check_output(git_cmd('rev-parse', 'FETCH_HEAD')).decode('ascii').strip() # noqa
def do_exemption_merge(state, logger, repo_cfg, git_cfg, url, check_merge,
reason):
try:
merge_sha = create_merge(
state,
repo_cfg,
state.base_ref,
logger,
git_cfg,
check_merge)
except subprocess.CalledProcessError:
print('* Unable to create a merge commit for the exempted PR: {}'.format(state)) # noqa
traceback.print_exc()
return False
if not merge_sha:
return False
desc = 'Test exempted'
state.set_status('success')
gitlab.create_status(
state.get_repo(), state.head_sha, 'success',
url, desc, context='homu',
)
state.add_comment(':zap: {}: {}.'.format(desc, reason))
state.merge_sha = merge_sha
state.save()
state.fake_merge(repo_cfg)
return True
def try_travis_exemption(state, logger, repo_cfg, git_cfg):
travis_info = None
for info in gitlab.iter_statuses(state.get_repo(), state.head_sha):
if info.context == 'continuous-integration/travis-ci/pr':
travis_info = info
break
if travis_info is None or travis_info.state != 'success':
return False
mat = re.search('/builds/([0-9]+)$', travis_info.target_url)
if not mat:
return False
url = 'https://api.travis-ci.org/{}/{}/builds/{}'.format(state.owner,
state.name,
mat.group(1))
try:
res = requests.get(url)
except Exception as ex:
print('* Unable to gather build info from Travis CI: {}'.format(ex))
return False
repo = state.get_repo()
travis_sha = json.loads(res.text)['commit']
travis_commit = gitlab.get_commit(state.get_repo(), travis_sha)
if not travis_commit:
return False
base_sha = gitlab.get_ref_sha(state.get_repo(), 'heads/' + state.base_ref)
travis_commit_parent_shas = gitlab.get_parent_shas(repo, travis_sha)
if (travis_commit_parent_shas[0] == base_sha and
travis_commit_parent_shas[1] == state.head_sha):
# make sure we check against the gitlab.merge sha before pushing
return do_exemption_merge(state, logger, repo_cfg, git_cfg,
travis_info.target_url, True,
"merge already tested by Travis CI")
return False
def try_status_exemption(state, logger, repo_cfg, git_cfg):
# If all the builders are status-based, then we can do some checks to
# exempt testing under the following cases:
# 1. The PR head commit has the equivalent statuses set to 'success' and
# it is fully rebased on the HEAD of the target base ref.
# 2. The PR head and merge commits have the equivalent statuses set to
# state 'success' and the merge commit's first parent is the HEAD of
# the target base ref.
if not git_cfg['local_git']:
raise RuntimeError('local_git is required to use status exemption')
statuses_all = set()
# equivalence dict: pr context --> auto context
status_equivalences = {}
for key, value in repo_cfg['status'].items():
context = value.get('context')
pr_context = value.get('pr_context', context)
if context is not None:
statuses_all.add(context)
status_equivalences[pr_context] = context
assert len(statuses_all) > 0
# let's first check that all the statuses we want are set to success
statuses_pass = set()
for info in gitlab.iter_statuses(state.get_repo(), state.head_sha):
if info.context in status_equivalences and info.state == 'success':
statuses_pass.add(status_equivalences[info.context])
if statuses_all != statuses_pass:
return False
# is the PR fully rebased?
base_sha = gitlab.get_ref_sha(state.get_repo(), 'heads/' + state.base_ref)
if pull_is_rebased(state, repo_cfg, git_cfg, base_sha):
return do_exemption_merge(state, logger, repo_cfg, git_cfg, '', False,
"pull fully rebased and already tested")
# check if we can use the gitlab.merge sha as proof
merge_sha = get_gitlab_merge_sha(state, repo_cfg, git_cfg)
if merge_sha is None:
return False
statuses_merge_pass = set()
for info in gitlab.iter_statuses(state.get_repo(), merge_sha):
if info.context in status_equivalences and info.state == 'success':
statuses_merge_pass.add(status_equivalences[info.context])
merge_commit_parent_shas = gitlab.get_parent_shas(
state.get_repo(), merge_sha,
)
if (statuses_all == statuses_merge_pass and
merge_commit_parent_shas[0] == base_sha and
merge_commit_parent_shas[1] == state.head_sha):
# make sure we check against the gitlab.merge sha before pushing
return do_exemption_merge(state, logger, repo_cfg, git_cfg, '', True,
"merge already tested")
return False
def start_build(state, repo_cfgs, buildbot_slots, logger, db, git_cfg):
if buildbot_slots[0]:
return True
lazy_debug(logger, lambda: "start_build on {!r}".format(state.get_repo()))
pull_request_sha = gitlab.get_pull_request_sha(state.get_repo(), state.num)
assert state.head_sha == pull_request_sha
repo_cfg = repo_cfgs[state.repo_label]
builders = []
branch = 'try' if state.try_ else 'auto'
branch = repo_cfg.get('branch', {}).get(branch, branch)
can_try_travis_exemption = False
only_status_builders = True
if 'buildbot' in repo_cfg:
if state.try_:
builders += repo_cfg['buildbot']['try_builders']
else:
builders += repo_cfg['buildbot']['builders']
only_status_builders = False
if 'travis' in repo_cfg:
builders += ['travis']
only_status_builders = False
if 'job' in repo_cfg:
found_travis_context = False
for key, value in repo_cfg['job'].items():
context = value.get('context')
if context is not None:
if state.try_ and not value.get('try', True):
# Skip this builder for tries.
continue
builders += ['job-' + key]
# We have an optional fast path if the Travis test passed
# for a given commit and master is unchanged, we can do
# a direct push.
if context == 'continuous-integration/travis-ci/push':
found_travis_context = True
if found_travis_context and len(builders) == 1:
can_try_travis_exemption = True
if len(builders) is 0:
raise RuntimeError('Invalid configuration')
lazy_debug(logger, lambda: "start_build: builders={!r}".format(builders))
if (only_status_builders and state.approved_by and
repo_cfg.get('status_based_exemption', False)):
if can_try_travis_exemption:
if try_travis_exemption(state, logger, repo_cfg, git_cfg):
return True
if try_status_exemption(state, logger, repo_cfg, git_cfg):
return True
merge_sha = create_merge(state, repo_cfg, branch, logger, git_cfg)
lazy_debug(logger, lambda: "start_build: merge_sha={}".format(merge_sha))
if not merge_sha:
return False
state.init_build_res(builders)
state.merge_sha = merge_sha
state.save()
if 'buildbot' in repo_cfg:
buildbot_slots[0] = state.merge_sha
logger.info('Starting build of {}/{}#{} on {}: {}'.format(
state.owner,
state.name,
state.num,
branch,
state.merge_sha))
timeout = repo_cfg.get('timeout', DEFAULT_TEST_TIMEOUT)
state.start_testing(timeout)
desc = '{} commit {} with merge {}...'.format(
'Trying' if state.try_ else 'Testing',
state.head_sha,
state.merge_sha,
)
try:
gitlab.create_status(
state.get_repo(),
state.head_sha,
'running',
'',
desc,
context='homu',
)
except gitlab.CommonError:
# GitLab set duplicate will cause error
pass
state.add_comment(':hourglass: ' + desc)
return True
def start_rebuild(state, repo_cfgs):
repo_cfg = repo_cfgs[state.repo_label]
if 'buildbot' not in repo_cfg or not state.build_res:
return False
builders = []
succ_builders = []
for builder, info in state.build_res.items():
if not info['url']:
return False
if info['res']:
succ_builders.append([builder, info['url']])
else:
builders.append([builder, info['url']])
if not builders or not succ_builders:
return False
repo = state.get_repo()
base_sha = gitlab.get_ref_sha(repo, 'heads/' + state.base_ref)
parent_shas = gitlab.get_parent_shas(repo, state.merge_sha)
if base_sha not in parent_shas:
return False
gitlab.set_ref(
state.get_repo(),
'tags/homu-tmp',
state.merge_sha,
force=True)
builders.sort()
succ_builders.sort()
with buildbot_sess(repo_cfg) as sess:
for builder, url in builders:
res = sess.post(url + '/rebuild', allow_redirects=False, data={
'useSourcestamp': 'exact',
'comments': 'Initiated by Homu',
})
if 'authzfail' in res.text:
err = 'Authorization failed'
elif builder in res.text:
err = ''
else:
mat = re.search('<title>(.+?)</title>', res.text)
err = mat.group(1) if mat else 'Unknown error'
if err:
state.add_comment(':bomb: Failed to start rebuilding: `{}`'.format(err)) # noqa
return False
timeout = repo_cfg.get('timeout', DEFAULT_TEST_TIMEOUT)
state.start_testing(timeout)
msg_1 = 'Previous build results'
msg_2 = ' for {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in succ_builders)) # noqa
msg_3 = ' are reusable. Rebuilding'
msg_4 = ' only {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in builders)) # noqa
gitlab.create_status(
state.get_repo(),
state.head_sha,
'running',
'',
'{}{}...'.format(msg_1, msg_3),
context='homu')
state.add_comment(':zap: {}{}{}{}...'.format(msg_1, msg_2, msg_3, msg_4))
return True
def start_build_or_rebuild(state, repo_cfgs, *args):
if start_rebuild(state, repo_cfgs):
return True
return start_build(state, repo_cfgs, *args)
def process_queue(states, repos, repo_cfgs, logger, buildbot_slots, db,
git_cfg):
for repo_label, repo in repos.items():
repo_states = sorted(states[repo_label].values())
for state in repo_states:
lazy_debug(logger, lambda: "process_queue: state={!r}, building {}"
.format(state, repo_label))
if state.priority < repo.treeclosed:
continue
if state.status == 'pending' and not state.try_:
break
elif state.status == 'success' and hasattr(state, 'fake_merge_sha'): # noqa
break
elif state.status == '' and state.approved_by:
if start_build_or_rebuild(state, repo_cfgs, buildbot_slots,
logger, db, git_cfg):
return
elif state.status == 'success' and state.try_ and state.approved_by: # noqa
state.try_ = False
state.save()
if start_build(state, repo_cfgs, buildbot_slots, logger, db,
git_cfg):
return
for state in repo_states:
if state.status == '' and state.try_:
if start_build(state, repo_cfgs, buildbot_slots, logger, db,
git_cfg):
return
def fetch_mergeability(mergeable_que):
re_pull_num = re.compile('(?i)merge (?:of|pull request) #([0-9]+)')
while True:
try:
state, cause = mergeable_que.get()
if state.status == 'success':
continue
# Wait 10 seconds for mergeable check
for i in range(10):
merge_status = gitlab.get_pull(state.get_repo(), state.num).merge_status # noqa
if merge_status != "unchecked":
break
time.sleep(1)
if merge_status == "unchecked":
continue
mergeable = merge_status == "can_be_merged"
if state.mergeable is True and mergeable is False:
if cause:
mat = re_pull_num.search(cause['title'])
if mat:
issue_or_commit = '#' + mat.group(1)
else:
issue_or_commit = cause['sha']
else:
issue_or_commit = ''
_blame = ''
if issue_or_commit:
_blame = ' (presumably {})'.format(issue_or_commit)
state.add_comment(':umbrella: The latest upstream changes{} made this pull request unmergeable. Please resolve the merge conflicts.'.format( # noqa
_blame
))
state.set_mergeable(mergeable, que=False)
except Exception:
print('* Error while fetching mergeability')
traceback.print_exc()
finally:
mergeable_que.task_done()
def synchronize(repo_label, repo_cfg, logger, gh, states, repos, db, mergeable_que, my_username, repo_labels): # noqa
logger.info('Synchronizing {}...'.format(repo_label))
repo = gitlab.get_repository(gh, repo_cfg['owner'], repo_cfg['name'])
db_query(db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
db_query(db, 'DELETE FROM build_res WHERE repo = ?', [repo_label])
db_query(db, 'DELETE FROM mergeable WHERE repo = ?', [repo_label])
saved_states = {}
for num, state in states[repo_label].items():
saved_states[num] = {
'merge_sha': state.merge_sha,
'build_res': state.build_res,
}
states[repo_label] = {}
repos[repo_label] = Repository(repo, repo_label, db)
for pull in repo.mergerequests.list(state='opened'):
db_query(
db,
'SELECT status FROM pull WHERE repo = ? AND num = ?',
[repo_label, pull.iid])
row = db.fetchone()
if row:
status = row[0]
else:
status = ''
for info in gitlab.iter_statuses(repo, pull.sha):
if info.context == 'homu':
status = info.state
break
state = PullReqState(pull.id, pull.iid, pull.sha, status, db, repo_label, mergeable_que, gh, repo_cfg['owner'], repo_cfg['name'], repos) # noqa
state.title = pull.title
state.body = pull.description
state.head_ref = pull.author["username"] + ':' + pull.source_branch
state.base_ref = pull.target_branch
state.set_mergeable(None)
state.assignee = pull.assignee["username"] if pull.assignee else ''
for comment in gitlab.iter_issue_comments(repo, pull.iid):
parse_commands(
comment.body,
comment.author["username"],
repo_cfg,
state,
my_username,
db,
states,
)
saved_state = saved_states.get(pull.iid)
if saved_state:
for key, val in saved_state.items():
setattr(state, key, val)
state.save()
states[repo_label][pull.iid] = state
logger.info('Done synchronizing {}!'.format(repo_label))
def arguments():
parser = argparse.ArgumentParser(
description='A bot that integrates with GitHub and your favorite '
'continuous integration service')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Enable more verbose logging')
parser.add_argument(
'-c',
'--config',
action='store',
help='Path to cfg.toml',
default='cfg.toml')
return parser.parse_args()
def main():
global global_cfg, global_git_cfg
args = arguments()
logger = logging.getLogger('homu')
logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)
logger.addHandler(logging.StreamHandler())
if sys.getfilesystemencoding() == 'ascii':
logger.info('You need to set a locale compatible with unicode or homu will choke on Unicode in PR descriptions/titles. See http://stackoverflow.com/a/27931669') # noqa
try:
with open(args.config) as fp:
cfg = toml.loads(fp.read())
except FileNotFoundError:
# Fall back to cfg.json only if we're using the defaults
if args.config == 'cfg.toml':
with open('cfg.json') as fp:
cfg = json.loads(fp.read())
else:
raise
global_cfg = cfg
gh = gitlab.login(cfg["gitlab"]["host"], cfg['gitlab']['access_token'])
gh.auth()
user = gh.user
cfg_git = cfg.get('git', {})
user_email = cfg_git.get('email')
if user_email is None:
raise RuntimeError('Email not set') # noqa
user_name = cfg_git.get('name', user.name if user.name else user.username)
states = {}
repos = {}
repo_cfgs = {}
buildbot_slots = ['']
my_username = user.username
repo_labels = {}
mergeable_que = Queue()
global_git_cfg = git_cfg = {
'name': user_name,
'email': user_email,
'ssh_key': cfg_git.get('ssh_key', ''),
'local_git': cfg_git.get('local_git', False),
}
db_file = cfg.get('db', {}).get('file', 'main.db')
db_conn = sqlite3.connect(db_file,
check_same_thread=False,
isolation_level=None)
db = db_conn.cursor()
db_query(db, '''CREATE TABLE IF NOT EXISTS pull (
repo TEXT NOT NULL,
id INTEGER NOT NULL,
num INTEGER NOT NULL,
status TEXT NOT NULL,
merge_sha TEXT,
title TEXT,
body TEXT,
head_sha TEXT,
head_ref TEXT,
base_ref TEXT,
assignee TEXT,
approved_by TEXT,
priority INTEGER,
try_ INTEGER,
rollup INTEGER,
delegate TEXT,
UNIQUE (repo, num)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS build_res (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
builder TEXT NOT NULL,
res INTEGER,
url TEXT NOT NULL,
merge_sha TEXT NOT NULL,
UNIQUE (repo, num, builder)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS mergeable (
repo TEXT NOT NULL,
num INTEGER NOT NULL,
mergeable INTEGER NOT NULL,
UNIQUE (repo, num)
)''')
db_query(db, '''CREATE TABLE IF NOT EXISTS repos (
repo TEXT NOT NULL,
treeclosed INTEGER NOT NULL,
UNIQUE (repo)
)''')
for repo_label, repo_cfg in cfg['repo'].items():
repo_cfgs[repo_label] = repo_cfg
repo_labels[repo_cfg['owner'], repo_cfg['name']] = repo_label
repo_states = {}
repos[repo_label] = Repository(None, repo_label, db)
db_query(
db,
'SELECT id, num, head_sha, status, title, body, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, delegate, merge_sha FROM pull WHERE repo = ?', # noqa
[repo_label])
for id_, num, head_sha, status, title, body, head_ref, base_ref, assignee, approved_by, priority, try_, rollup, delegate, merge_sha in db.fetchall(): # noqa
state = PullReqState(id_, num, head_sha, status, db, repo_label, mergeable_que, gh, repo_cfg['owner'], repo_cfg['name'], repos) # noqa
state.title = title
state.body = body
state.head_ref = head_ref
state.base_ref = base_ref
state.assignee = assignee
state.approved_by = approved_by
state.priority = int(priority)
state.try_ = bool(try_)
state.rollup = bool(rollup)
state.delegate = delegate
builders = []
if merge_sha:
if 'buildbot' in repo_cfg:
builders += repo_cfg['buildbot']['builders']
if 'travis' in repo_cfg:
builders += ['travis']
if 'job' in repo_cfg:
builders += ['job-' + key for key, value in repo_cfg['job'].items() if 'context' in value] # noqa
if len(builders) is 0:
raise RuntimeError('Invalid configuration')
state.init_build_res(builders, use_db=False)
state.merge_sha = merge_sha
elif state.status == 'pending':
# FIXME: There might be a better solution
state.status = ''
state.save()
repo_states[num] = state
states[repo_label] = repo_states
db_query(
db,
'SELECT repo, num, builder, res, url, merge_sha FROM build_res')
for repo_label, num, builder, res, url, merge_sha in db.fetchall():
try:
state = states[repo_label][num]
if builder not in state.build_res:
raise KeyError
if state.merge_sha != merge_sha:
raise KeyError
except KeyError:
db_query(
db,
'DELETE FROM build_res WHERE repo = ? AND num = ? AND builder = ?', # noqa
[repo_label, num, builder])
continue
state.build_res[builder] = {
'res': bool(res) if res is not None else None,
'url': url,
}
db_query(db, 'SELECT repo, num, mergeable FROM mergeable')
for repo_label, num, mergeable in db.fetchall():
try:
state = states[repo_label][num]
except KeyError:
db_query(
db,
'DELETE FROM mergeable WHERE repo = ? AND num = ?',
[repo_label, num])
continue
state.mergeable = bool(mergeable) if mergeable is not None else None
db_query(db, 'SELECT repo FROM pull GROUP BY repo')
for repo_label, in db.fetchall():
if repo_label not in repos:
db_query(db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
queue_handler_lock = Lock()
def queue_handler():
with queue_handler_lock:
return process_queue(states, repos, repo_cfgs, logger, buildbot_slots, db, git_cfg) # noqa
os.environ['GIT_SSH'] = os.path.join(os.path.dirname(__file__), 'git_helper.py') # noqa
os.environ['GIT_EDITOR'] = 'cat'
from . import server
Thread(
target=server.start,
args=[
cfg,
states,
queue_handler,
repo_cfgs,
repos,
logger,
buildbot_slots,
my_username,
db,
repo_labels,
mergeable_que,
gh,
]).start()
Thread(target=fetch_mergeability, args=[mergeable_que]).start()
queue_handler()
if __name__ == '__main__':
main()
|
main.py
|
"""
main.py
Copyright 2007 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from __future__ import absolute_import
# Now that I know that I have them, import them!
import gtk
import gobject
import shelve
import os
import webbrowser
import time
import sys
from multiprocessing.dummy import Process
import w3af.core.controllers.output_manager as om
import w3af.core.data.kb.config as cf
from w3af.core.controllers.w3afCore import w3afCore
from w3af.core.controllers.misc_settings import MiscSettings
from w3af.core.controllers.exceptions import BaseFrameworkException, ScanMustStopByUserRequest
from w3af.core.controllers.exception_handling.helpers import pprint_plugins, get_versions
from w3af.core.controllers.misc.homeDir import get_home_dir
from w3af.core.controllers.misc.get_w3af_version import get_w3af_version
from w3af.core.ui.gui import GUI_DATA_PATH
from w3af.core.ui.gui.splash import Splash
from w3af.core.ui.gui.disclaimer import DisclaimerController
from w3af.core.ui.gui.exception_handling import unhandled
from w3af.core.ui.gui.exception_handling import user_reports_bug
from w3af.core.ui.gui.constants import W3AF_ICON, MAIN_TITLE, UI_MENU
from w3af.core.ui.gui.output.gtk_output import GtkOutput
from w3af.core.ui.gui.auto_update.gui_updater import GUIUpdater
from w3af.core.ui.gui import scanrun, helpers, profiles, compare
from w3af.core.ui.gui import export_request
from w3af.core.ui.gui import entries, pluginconfig, confpanel
from w3af.core.ui.gui import wizard, guardian
from w3af.core.ui.gui.tools import encdec
from w3af.core.ui.gui.user_help.open_help import open_help
from w3af.core.ui.gui.tabs.log.main_body import LogBody
from w3af.core.ui.gui.tabs.exploit.main_body import ExploitBody
from w3af.core.ui.gui.tools.fuzzy_requests import FuzzyRequests
from w3af.core.ui.gui.tools.manual_requests import ManualRequests
from w3af.core.ui.gui.tools.proxywin import ProxiedRequests
# This is just general info, to help people know their system and report more
# complete bugs
print "Starting w3af, running on:"
print get_versions()
# pylint: disable=E1101
# Threading initializer
if sys.platform == "win32":
gobject.threads_init()
# Load the theme, this fixes bug 2022433: Windows buttons without images
gtk.rc_add_default_file('%USERPROFILE%/.gtkrc-2.0')
else:
gtk.gdk.threads_init()
gtk.gdk.threads_enter()
# pylint: enable=E1101
class FakeShelve(dict):
def close(self):
pass
class AboutDialog(gtk.Dialog):
"""A dialog with the About information.
:author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
"""
def __init__(self, w3af):
super(
AboutDialog, self).__init__(_("About..."), None, gtk.DIALOG_MODAL,
(_("Check the web site"), gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
# content
img = gtk.image_new_from_file(os.path.join(GUI_DATA_PATH, 'splash.png'))
self.vbox.pack_start(img)
version = get_w3af_version()
self.label = gtk.Label(version)
#self.label.set_justify(gtk.JUSTIFY_CENTER)
self.vbox.pack_start(self.label)
# the home button
self.butt_home = self.action_area.get_children()[1]
self.butt_home.connect("clicked", self._goWeb)
# the ok button
self.butt_saveas = self.action_area.get_children()[0]
self.butt_saveas.connect("clicked", lambda x: self.destroy())
self.show_all()
def _goWeb(self, w):
"""Opens the web site and closes the dialog."""
try:
webbrowser.open("http://w3af.org/")
except Exception:
#
# This catches bug #2685576
# https://sourceforge.net/tracker2/?func=detail&atid=853652&aid=2685576&group_id=170274
#
# Which seems to be related to:
# http://mail.python.org/pipermail/python-list/2004-July/269513.html
#
pass
self.destroy()
class WindowsCommunication(object):
def __init__(self, w3af, winCreator):
self.w3af = w3af
self.winCreator = winCreator
self.isActive = False
def e(x):
raise RuntimeError(
_("BUG! The communicator was never initialized"))
self.callback = e
self.client = e
def destroy(self):
"""Destroys the window."""
self.isActive = False
return True
def create(self, info=None):
"""Assures the window is shown.
Create a new window if not active, raises the previous one if already
is created.
:param info: info to sent initially to the window
"""
if self.isActive:
self.client.present()
else:
self.winCreator(self.w3af, self)
self.isActive = True
if info is not None:
self.send(info)
__call__ = create
def send(self, info):
"""Sends information to the window.
:param info: info to sent initially to the window
"""
if not self.isActive:
self.create()
self.callback(info)
def enable(self, window, callback):
"""Enables the window."""
self.client = window
self.callback = callback
class MainApp(object):
"""Main GTK application
:author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
"""
def __init__(self, profile, do_upd):
disclaimer = DisclaimerController()
if not disclaimer.accept_disclaimer():
return
# First of all, create the nice splash screen so we can show something
# to the user while all the hard work is done on the background
splash = Splash()
# Create a new window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_icon_from_file(W3AF_ICON)
self.window.connect("delete_event", self.quit)
self.window.connect('key_press_event', self.help_f1)
# This is the way we track if the window is currently maximize or not
self.is_maximized = False
self.window.connect("window-state-event", self.on_window_state_event)
splash.push(_("Loading..."))
self.w3af = w3af_core = w3afCore()
# Now we start the error handling
unhandled.set_except_hook(w3af_core)
# Please note that this doesn't block the Splash window since it will
# (hopefully) call splash.push once every time it has made some
# progress, thus calling the splash window mainloop() and handling any
# pending events
gui_upd = GUIUpdater(do_upd, splash.push)
gui_upd.update()
# title and positions
self.window.set_title(MAIN_TITLE)
genconfigfile = os.path.join(get_home_dir(), "gui_config.pkl")
try:
self.generalconfig = shelve.open(genconfigfile)
except Exception, e:
print ("WARNING: something bad happened when trying to open the"
" general config! File: %s. Problem: %s" % (genconfigfile, e))
self.generalconfig = FakeShelve()
window_size = self.generalconfig.get("mainwindow-size", (1024, 768))
window_position = self.generalconfig.get("mainwindow-position", (0, 0))
should_maximize = self.generalconfig.get("should-maximize", True)
self.window.resize(*window_size)
self.window.move(*window_position)
if should_maximize:
self.window.maximize()
mainvbox = gtk.VBox()
self.window.add(mainvbox)
mainvbox.show()
splash.push(_("Initializing core..."))
# This is inited before all, to have a full logging facility.
om.manager.set_output_plugin_inst(GtkOutput())
# status bar
splash.push(_("Building the status bar..."))
guard = guardian.FoundObjectsGuardian(self.w3af)
self.exceptions_sb = guardian.FoundExceptionsStatusBar(self.w3af)
self.sb = entries.StatusBar(_("Program started"), [self.exceptions_sb,
guard])
self.w3af.mainwin = self
self.is_running = False
self.paused = False
self.scan_should = "start"
self.stopped_by_user = False
self.menuViews = {}
# Create a UIManager instance
splash.push(_("Creating menu and toolbar..."))
uimanager = gtk.UIManager()
accelgroup = uimanager.get_accel_group()
self.window.add_accel_group(accelgroup)
self._actiongroup = actiongroup = gtk.ActionGroup('UIManager')
# Create actions
actiongroup.add_actions([
# xml_name, icon, real_menu_text, accelerator, tooltip, callback
('Quit', gtk.STOCK_QUIT, _('_Quit'), None, _(
'Exit the program'), lambda w: self.quit(None, None)),
('New', gtk.STOCK_NEW, _('_New'), None, _(
'Create a new profile'), lambda w: self.profile_action("new")),
('Save', gtk.STOCK_SAVE, _('_Save'), None, _('Save this configuration'), lambda w: self.profile_action("save")),
('SaveAs', gtk.STOCK_SAVE_AS, _('Save _as...'), None, _('Save this configuration in a new profile'), lambda w: self.profile_action("save_as")),
('Revert', gtk.STOCK_REVERT_TO_SAVED, _('_Revert'), None, _('Revert the profile to its saved state'), lambda w: self.profile_action("revert")),
('Delete', gtk.STOCK_DELETE, _('_Delete'), None, _('Delete this profile'), lambda w: self.profile_action("delete")),
('ProfilesMenu', None, _('_Profiles')),
('ViewMenuScan', None, _('_View')),
('ViewMenuExploit', None, _('_View')),
('EditPlugin', gtk.STOCK_EDIT, _('_Edit plugin'),
None, _('Edit selected plugin'), self._edit_selected_plugin),
('EditMenuScan', None, _('_Edit'), None, _('Edit'),
self._editMenu),
('URLconfig', None, _('_HTTP Config'), None, _(
'HTTP configuration'), self.menu_config_http),
('Miscellaneous', None, _('_Miscellaneous'), None,
_('Miscellaneous configuration'), self.menu_config_misc),
('ConfigurationMenu', None, _('_Configuration')),
('ManualRequest', gtk.STOCK_INDEX, _('_Manual Request'), '<Control>m', _('Generate manual HTTP request'), self._manual_request),
('FuzzyRequest', gtk.STOCK_PROPERTIES, _('_Fuzzy Request'), '<Control>u', _('Generate fuzzy HTTP requests'), self._fuzzy_request),
('EncodeDecode', gtk.STOCK_CONVERT, _('Enc_ode/Decode'), '<Control>o', _('Encodes and Decodes in different ways'), self._encode_decode),
('ExportRequest', gtk.STOCK_COPY, _('_Export Request'),
'<Control>e', _('Export HTTP request'), self._export_request),
('Compare', gtk.STOCK_ZOOM_100, _('_Compare'), '<Control>r',
_('Compare different requests and responses'), self._compare),
('Proxy', gtk.STOCK_CONNECT, _('_Proxy'), '<Control>p',
_('Proxies the HTTP requests, allowing their modification'),
self._proxy_tool),
('ToolsMenu', None, _('_Tools')),
('Wizards', gtk.STOCK_SORT_ASCENDING, _('_Wizards'),
None, _('Point & Click Penetration Test'), self._wizards),
('ReportBug', gtk.STOCK_SORT_ASCENDING, _(
'_Report a Bug'), None, _('Report a Bug'), self.report_bug),
('Help', gtk.STOCK_HELP, _('_Help'), None, _(
'Help regarding the framework'), self.menu_help),
('About', gtk.STOCK_ABOUT, _('_About'), None, _(
'About the framework'), self.menu_about),
('HelpMenu', None, _('_Help')),
('StartStop', gtk.STOCK_MEDIA_PLAY, _('_Start'),
None, _('Start scan'), self._scan_director),
('ExploitAll', gtk.STOCK_EXECUTE, _('_Multiple Exploit'),
None, _('Exploit all vulns'), self._exploit_all),
])
actiongroup.add_toggle_actions([
# xml_name, icon, real_menu_text, accelerator, tooltip, callback,
# initial_flag
('Pause', gtk.STOCK_MEDIA_PAUSE, _('_Pause'),
None, _('Pause scan'), self._scan_pause, False),
])
# the view menu for exploit
actiongroup.add_toggle_actions([
# xml_name, icon, real_menu_text, accelerator, tooltip, callback,
# initial_flag
('ExploitVuln', None, '_Plugins', None,
_('Toggle the plugins panel'),
lambda w: self.dyn_panels(w, "exploitvuln"), True),
('Interactive', None, '_Shells and Proxies', None,
_('Toggle the shells and proxies window'),
lambda w: self.dyn_panels(w, "interac"), True),
])
ag = actiongroup.get_action("ViewMenuExploit")
ag.set_sensitive(False)
ag.set_visible(False)
self.menuViews["Exploit"] = ag
# the sensitive options for profiles
self.profile_actions = [actiongroup.get_action(
x) for x in "Save SaveAs Revert Delete".split()]
self.activate_profile_actions([False, True, False, False])
# the sensitive options for edit
ag = actiongroup.get_action("EditPlugin")
ag.set_sensitive(False)
# Add the actiongroup to the uimanager
uimanager.insert_action_group(actiongroup, 0)
uimanager.add_ui_from_string(UI_MENU)
# menubar and toolbar
menubar = uimanager.get_widget('/MenuBar')
mainvbox.pack_start(menubar, False)
toolbar = uimanager.get_widget('/Toolbar')
mainvbox.pack_start(toolbar, False)
# put both start/stop buttons inside the wrapper
self.startstopbtns = helpers.BroadcastWrapper()
# get toolbar items
assert toolbar.get_n_items() == 16
toolbut_startstop = entries.ToolbuttonWrapper(toolbar, 5)
self.startstopbtns.addWidget(toolbut_startstop)
self.toolbut_pause = toolbar.get_nth_item(6)
self.toolbut_pause.set_sensitive(False)
self.scanok = helpers.PropagateBuffer(self.startstopbtns.set_sensitive)
exploitall = toolbar.get_nth_item(8)
self.exploitallsens = helpers.SensitiveAnd(
exploitall, ("stopstart", "tabinfo"))
# tab dependent widgets
self.tabDependant = [(
lambda x: self.exploitallsens.set_sensitive(
x, "tabinfo"), ('Exploit',)),
(actiongroup.get_action("EditMenuScan")
.set_sensitive, ('Scan config')),
]
# the throbber
splash.push(_("Building the throbber..."))
self.throbber = helpers.Throbber()
separat = gtk.SeparatorToolItem()
separat.set_draw(False)
separat.set_expand(True)
separat.show()
toolbar.insert(separat, -1)
toolbar.insert(self.throbber, -1)
# help structure
self.w3af.helpChapters = dict(main="Configuring_the_scan",
scanrun="Browsing_the_Knowledge_Base")
self.helpChapter = ("Configuring_the_scan",
"Running_the_scan", "--RESULTS--", "Exploitation")
# notebook
splash.push(_("Building the main screen..."))
self.nb = gtk.Notebook()
self.nb.connect("switch-page", self.nb_changed_page)
mainvbox.pack_start(self.nb, True)
self.nb.show()
# scan config tab
pan = entries.RememberingHPaned(self.w3af, "pane-scanconfig", 150)
self.pcbody = pluginconfig.PluginConfigBody(self, self.w3af)
try:
self.profiles = profiles.ProfileList(self.w3af, initial=profile)
except ValueError, ve:
# This is raised when the profile doesn't exist
#
# I handle this by creating the profiles without an initial profile
# selected and by reporting it to the user in a toolbar
self.profiles = profiles.ProfileList(self.w3af, initial=None)
self.sb(str(ve))
pan.pack1(self.profiles)
pan.pack2(self.pcbody)
pan.show_all()
label = gtk.Label(_("Scan config"))
self.nb.append_page(pan, label)
self.viewSignalRecipient = self.pcbody
self.notetabs = {}
# dummy tabs creation for notebook, real ones are done in set_tabs
for title in (_("Log"), _("Results")):
dummy = gtk.Label("dummy")
self.notetabs[title] = dummy
self.nb.append_page(dummy, gtk.Label())
self.set_tabs(False)
label = gtk.Label(_("Exploit"))
exploit_tab_body = ExploitBody(self.w3af)
self.nb.append_page(exploit_tab_body, label)
self.notetabs[_("Exploit")] = exploit_tab_body
# status bar
mainvbox.pack_start(self.sb, False)
# communication between different windows
self.commCompareTool = WindowsCommunication(self.w3af, compare.Compare)
# finish it
self.window.show()
splash.destroy()
self.exceptions_sb.hide_all()
# No need to add a try/except here to catch KeyboardInterrupt since
# it is already done in unhandled.handle_crash
gtk.main()
def profile_changed(self, *args, **kwargs):
if hasattr(self, "profiles"):
self.profiles.profile_changed(*args, **kwargs)
def _editMenu(self, widget):
"""
This handles the click action of the user over the edit menu.
The main objective of this function is to disable the "Edit Plugin"
option, if the user isn't focused over a plugin.
:param widget: Not used
"""
treeToUse = None
if self.pcbody.out_plugin_tree.is_focus():
treeToUse = self.pcbody.out_plugin_tree
elif self.pcbody.std_plugin_tree.is_focus():
treeToUse = self.pcbody.std_plugin_tree
else:
# No focus, we should keep the option disabled
return None
# We know that we have focus.... but... is the selection a plugin ?
(path, column) = treeToUse.get_cursor()
if path is not None and len(path) > 1:
# Excellent! it is over a plugin!
# enable the menu option
ag = self._actiongroup.get_action("EditPlugin")
ag.set_sensitive(True)
def _edit_selected_plugin(self, widget):
"""
This is the handler for the "Edit Plugin" menu option.
:param widget: Not used
"""
self.pcbody.edit_selected_plugin()
def on_window_state_event(self, widget, event, data=None):
mask = gtk.gdk.WINDOW_STATE_MAXIMIZED
self.is_maximized = widget.get_window().get_state() & mask == mask
def quit(self, widget, event, data=None):
"""Main quit.
:param widget: who sent the signal.
:param event: the event that happened
:param data: optional data to receive.
"""
msg = _("Do you really want to quit?")
dlg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION,
gtk.BUTTONS_YES_NO, msg)
opt = dlg.run()
dlg.destroy()
if opt != gtk.RESPONSE_YES:
return True
helpers.end_threads()
self.sb.clear()
try:
# saving windows config
w = self.window
self.generalconfig["should-maximize"] = self.is_maximized
self.generalconfig["mainwindow-size"] = w.get_size()
self.generalconfig["mainwindow-position"] = w.get_position()
self.generalconfig.close()
finally:
# We set the generalconfig to a fake shelve just in case other
# windows are still open and want to get some data from it, this
# prevents: "ValueError: invalid operation on closed shelf"
#
# https://github.com/andresriancho/w3af/issues/2691
#
self.generalconfig = FakeShelve()
# Quit the mainloop
gtk.main_quit()
time.sleep(0.5)
self.w3af.quit()
return False
def _scan_director(self, widget):
"""Directs what to do with the Scan."""
action = "_scan_" + self.scan_should
func = getattr(self, action)
func()
def save_state_to_core(self, relaxedTarget=False):
"""Save the actual state to the core.
:param relaxedTarget: if True, return OK even if the target wasn't
successfully saved
:return: True if all went ok
"""
# Clear everything
for plugin_type in self.w3af.plugins.get_plugin_types():
self.w3af.plugins.set_plugins([], plugin_type)
# save the activated plugins
for plugin_type, plugins in self.pcbody.get_activated_plugins():
self.w3af.plugins.set_plugins(plugins, plugin_type)
# save the URL, the rest of the options are saved in the "Advanced"
# dialog
options = self.w3af.target.get_options()
# unicode str needed. pygtk works with 'utf8'
url = self.pcbody.target.get_text().decode('utf8')
target_option = options['target']
if relaxedTarget:
try:
target_option.set_value(url)
self.w3af.target.set_options(options)
except:
pass
return True
else:
try:
helpers.coreWrap(target_option.set_value, url)
helpers.coreWrap(self.w3af.target.set_options, options)
except BaseFrameworkException:
return False
return True
def _scan_start(self):
"""
Starts the actual scanning
"""
if not self.save_state_to_core():
return
def real_scan_start():
# Verify that everything is ready to run
try:
helpers.coreWrap(self.w3af.plugins.init_plugins)
helpers.coreWrap(self.w3af.verify_environment)
except BaseFrameworkException:
return
self.w3af.start()
def start_scan_wrap():
# Just in case, make sure we have a GtkOutput in the output manager
# for the current scan
om.manager.set_output_plugin_inst(GtkOutput())
try:
real_scan_start()
except KeyboardInterrupt:
# FIXME: Confirm: we should never get here because threads
# send the KeyboardInterrupt to the main thread.
pass
except ScanMustStopByUserRequest:
pass
except Exception:
#
# Exceptions generated by plugins are handled in
# ExceptionHandler
#
# The only exceptions that can get here are the ones in the
# framework and UI itself.
#
plugins_str = pprint_plugins(self.w3af)
exc_class, exc_inst, exc_tb = sys.exc_info()
unhandled.handle_crash(self.w3af, exc_class, exc_inst,
exc_tb, plugins=plugins_str)
finally:
gobject.idle_add(self._scan_stopfeedback)
self._scan_finished()
# Starting output manager to try to avoid bug
# https://github.com/andresriancho/w3af/issues/997
om.out.debug('Starting output manager')
# start real work in background, and start supervising if it ends
scanner = Process(target=start_scan_wrap, name='MainGTKScanner')
scanner.daemon = True
scanner.start()
gobject.timeout_add(500, self._scan_superviseStatus)
self.sb(_("The scan has started"))
self.set_tabs(True)
self.throbber.running(True)
self.toolbut_pause.set_sensitive(True)
self.startstopbtns.change_internals("Stop", gtk.STOCK_MEDIA_STOP,
_("Stop scan"))
self.scan_should = "stop"
self.stopped_by_user = False
self.nb.set_current_page(1)
self.exploitallsens.set_sensitive(True, "stopstart")
# Save the target URL to the history
self.pcbody.target.insert_url()
# sets the title
targets = cf.cf.get('targets')
if targets:
target_domain_obj = targets[0]
target_domain = target_domain_obj.get_domain()
self.window.set_title("w3af - " + target_domain)
def _scan_pause(self, widget):
"""Pauses the scan."""
shall_pause = widget.get_active()
# stop/start core and throbber
self.w3af.pause(shall_pause)
self.startstopbtns.set_sensitive(not shall_pause)
self.toolbut_pause.set_sensitive(not shall_pause)
self.throbber.running(not shall_pause)
self.paused = shall_pause
if not shall_pause:
self.sb(_("Resuming the scan..."))
# start the status supervisor
gobject.timeout_add(500, self._scan_superviseStatus)
else:
self.sb(_("The scan is paused"))
def _scan_stop(self):
"""Stops the scanning."""
def stop_scan_wrap():
try:
self.w3af.stop()
except Exception:
#
# Exceptions generated by plugins are handled in
# ExceptionHandler
#
# The only exceptions that can get here are the ones in the
# framework and UI itself.
#
plugins_str = pprint_plugins(self.w3af)
exc_class, exc_inst, exc_tb = sys.exc_info()
unhandled.handle_crash(self.w3af, exc_class, exc_inst,
exc_tb, plugins=plugins_str)
# start real work in background, and start supervising if it ends
scan_stop = Process(target=stop_scan_wrap, name='ScanStopper')
scan_stop.daemon = True
scan_stop.start()
self.startstopbtns.set_sensitive(False)
self.toolbut_pause.set_sensitive(False)
self.sb(_("Stopping the scan..."), 15)
self.stopped_by_user = True
def _scan_stopfeedback(self):
"""Visual elements when stopped.
This is separated because it's called when the process finishes by
itself or by the user click.
"""
self.startstopbtns.change_internals(_("Clear"),
gtk.STOCK_CLEAR,
_("Clear all the obtained results"))
self.throbber.running(False)
self.toolbut_pause.set_sensitive(False)
self.scan_should = "clear"
self.startstopbtns.set_sensitive(True)
if self.stopped_by_user:
self.sb(_("The scan has stopped by user request"))
else:
self.sb(_("The scan has finished"))
def _scan_finished(self):
"""
This method is called when the scan finishes successfully of because
of an exception.
"""
# After the scan finishes, I want to be able to use the GtkOutput
# features for exploitation
om.manager.set_output_plugin_inst(GtkOutput())
exception_list = self.w3af.exception_handler.get_unique_exceptions()
if exception_list:
# damn...
self.sb(_("Scan finished with exceptions"))
self.exceptions_sb.show_all(len(exception_list))
def _scan_clear(self):
"""Clears core and gui, and fixes button to next step."""
# cleanup
self.nb.set_current_page(0)
self.w3af.cleanup()
self.set_tabs(False)
self.sb(_("Scan results cleared"))
self.exploitallsens.set_sensitive(False, "stopstart")
# put the button in start
self.startstopbtns.change_internals(
_("Start"), gtk.STOCK_MEDIA_PLAY, _("Start scan"))
self.scan_should = "start"
self.window.set_title(MAIN_TITLE)
# This is done here in order to keep the logging facility.
om.manager.set_output_plugin_inst(GtkOutput())
def _scan_superviseStatus(self):
"""Handles the waiting until core finishes the scan.
:return: True to be called again
"""
if self.w3af.status.is_running():
return True
if self.paused:
# stop checking, but don't change any feedback, only
# turn on the pause button
self.toolbut_pause.set_sensitive(True)
return True
# core is stopped, we had it in on, stop all
self._scan_stopfeedback()
return False
def set_tabs(self, sensit):
"""Set the exploits tabs to real window or dummies labels.
:param sensit: if it's active or not
"""
# the View menu
for menu in self.menuViews.values():
menu.set_sensitive(sensit)
self.is_running = sensit
# ok, the tabs, :p
self._set_tab(sensit, _("Log"), LogBody)
self._set_tab(sensit, _("Results"), scanrun.ScanRunBody)
def _set_tab(self, sensit, title, realWidget):
# create title and window/label
label = gtk.Label(title)
if sensit:
newone = realWidget(self.w3af)
else:
newone = gtk.Label(_("The scan has not started: no info yet"))
newone.show()
label.set_sensitive(False)
newone.set_sensitive(False)
# remove old page and insert this one
pointer = self.notetabs[title]
pos = self.nb.page_num(pointer)
self.nb.remove_page(pos)
self.nb.insert_page(newone, label, pos)
self.notetabs[title] = newone
def menu_config_http(self, action):
"""Configure HTTP options."""
configurable = self.w3af.uri_opener.settings
confpanel.ConfigDialog(_("Configure HTTP settings"), self.w3af,
configurable)
def menu_config_misc(self, action):
"""Configure Misc options."""
configurable = MiscSettings()
confpanel.ConfigDialog(
_("Configure Misc settings"), self.w3af, configurable)
def dyn_panels(self, widget, panel):
"""Turns on and off the Log Panel."""
active = widget.get_active()
if hasattr(self.viewSignalRecipient, 'toggle_panels'):
self.viewSignalRecipient.toggle_panels(panel, active)
def nb_changed_page(self, notebook, page, page_num):
"""Changed the page in the Notebook.
It manages which View will be visible in the Menu, and
to which recipient the signal of that View should be
directed.
"""
ch = notebook.get_nth_page(page_num)
page = notebook.get_tab_label(ch).get_text()
self.w3af.helpChapters["main"] = self.helpChapter[page_num]
self.viewSignalRecipient = None
for name, menu in self.menuViews.items():
if name == page:
menu.set_sensitive(self.is_running)
menu.set_visible(True)
self.viewSignalRecipient = self.notetabs[name]
else:
menu.set_visible(False)
if page not in self.menuViews:
# even when we don't have no view, we should put
# anyone, but disabled
fake = self.menuViews.items()[0][1]
fake.set_sensitive(False)
fake.set_visible(True)
# generic tab dependant widgets
for widg, where in self.tabDependant:
widg(page in where)
def profile_action(self, action):
"""Do the action on the profile."""
methname = action + "_profile"
method = getattr(self.profiles, methname)
method()
def activate_profile_actions(self, newstatus):
"""Activate profiles buttons.
:param newstatus: if the profile changed or not.
"""
for opt, stt in zip(self.profile_actions, newstatus):
opt.set_sensitive(stt)
def menu_help(self, action):
"""Shows the help message."""
open_help()
def menu_about(self, action):
"""Shows the about message."""
dlg = AboutDialog(self.w3af)
dlg.run()
def report_bug(self, action):
"""Report bug to Sourceforge"""
user_reports_bug.user_reports_bug()
def _exploit_all(self, action):
"""Exploits all vulns."""
exploitpage = self.notetabs[_("Exploit")]
exploitpage.exploit_all()
def _manual_request(self, action):
"""Generate manual HTTP requests."""
ManualRequests(self.w3af)
def _export_request(self, action):
"""Export HTTP requests to python, javascript, etc."""
export_request.export_request(self.w3af)
def _fuzzy_request(self, action):
"""Generate fuzzy HTTP requests."""
FuzzyRequests(self.w3af)
def _encode_decode(self, action):
"""Generate fuzzy HTTP requests."""
encdec.EncodeDecode(self.w3af)
def _compare(self, action):
"""Generate fuzzy HTTP requests."""
self.commCompareTool.create()
def _proxy_tool(self, action):
"""Proxies the HTTP calls."""
self.set_tabs(True)
ProxiedRequests(self.w3af)
def _wizards(self, action):
"""Execute the wizards machinery."""
wizard.WizardChooser(self.w3af)
def help_f1(self, widget, event):
if event.keyval != 65470: # F1, check: gtk.gdk.keyval_name(event.keyval)
return
chapter = self.w3af.helpChapters["main"]
if chapter == "--RESULTS--":
chapter = self.w3af.helpChapters["scanrun"]
open_help(chapter)
def main(profile, do_upd):
MainApp(profile, do_upd)
|
master.py
|
#
# Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import copy
import time
import json
import uuid
import flask
import queue
import logging
import requests
from pathlib import Path
import concurrent.futures as cf
from gevent.pywsgi import WSGIServer
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
from lithops.constants import LITHOPS_TEMP_DIR, SA_LOG_FILE, JOBS_DIR,\
SA_SERVICE_PORT, SA_CONFIG_FILE, SA_DATA_FILE
from lithops.localhost.localhost import LocalhostHandler
from lithops.standalone.standalone import LithopsValidationError
from lithops.utils import verify_runtime_name, iterchunks, setup_lithops_logger
from lithops.standalone.utils import get_worker_setup_script
from lithops.standalone.keeper import BudgetKeeper
from lithops.version import __version__ as lithops_version
log_format = "%(asctime)s\t[%(levelname)s] %(name)s:%(lineno)s -- %(message)s"
setup_lithops_logger(logging.DEBUG, filename=SA_LOG_FILE, log_format=log_format)
logger = logging.getLogger('lithops.standalone.master')
app = flask.Flask(__name__)
MAX_INSTANCE_CREATE_RETRIES = 2
REUSE_WORK_QUEUE_NAME = 'all'
exec_mode = 'consume'
workers = {}
workers_state = {}
standalone_config = None
standalone_handler = None
budget_keeper = None
work_queues = {}
master_ip = None
# variables for consume mode
localhost_manager_process = None
localhos_handler = None
last_job_key = None
def is_worker_free(worker):
"""
Checks if the Lithops service is ready and free in the worker VM instance
"""
url = f"http://{worker.private_ip}:{SA_SERVICE_PORT}/ping"
r = requests.get(url, timeout=0.5)
if r.status_code == 200:
if r.json()['status'] == 'free':
return True
return False
def setup_worker(worker_info, work_queue_name):
"""
Run worker process
Install all the Lithops dependencies into the worker.
Runs the job
"""
global workers, workers_state
worker = standalone_handler.backend.get_instance(**worker_info, public=False)
logger.debug(f'Starting setup for VM instance {worker.name} ({worker.private_ip})')
def wait_worker_ready(worker):
workers_state[worker.name] = {'state': 'starting'}
worker_ready = False
retry = 1
while not worker_ready and retry <= MAX_INSTANCE_CREATE_RETRIES:
try:
worker.wait_ready(verbose=True)
worker_ready = True
except TimeoutError as e: # VM not started in time
if retry == MAX_INSTANCE_CREATE_RETRIES:
raise e
logger.debug(f'Timeout Error. Recreating VM instance {worker.name}')
retry += 1
worker.delete()
worker.create()
wait_worker_ready(worker)
instance_create_retries = 0
max_instance_create_retries = standalone_config.get('worker_create_retries', MAX_INSTANCE_CREATE_RETRIES)
while instance_create_retries < max_instance_create_retries:
try:
logger.debug(f'Validating {worker.name}')
worker.validate_capabilities()
break
except LithopsValidationError as e:
logger.debug(f'{worker.name} validation error {e}')
workers_state[worker.name] = {'state': 'error', 'err': str(e)}
if instance_create_retries + 1 < max_instance_create_retries:
# Continue retrying
logger.warning(f'Worker {worker.name} setup failed with error {e} after {instance_create_retries} retries')
worker.delete()
worker.create()
instance_create_retries += 1
wait_worker_ready(worker)
else:
workers_state[worker.name] = {'state': 'setup', 'err': workers_state[worker.name].get('err')}
break
# upload zip lithops package
logger.debug(f'Uploading lithops files to {worker}')
worker.get_ssh_client().upload_local_file(
'/opt/lithops/lithops_standalone.zip',
'/tmp/lithops_standalone.zip')
logger.debug(f'Executing lithops installation process on {worker}')
vm_data = {'name': worker.name,
'private_ip': worker.private_ip,
'instance_id': worker.instance_id,
'ssh_credentials': worker.ssh_credentials,
'master_ip': master_ip,
'work_queue': work_queue_name}
remote_script = "/tmp/install_lithops.sh"
script = get_worker_setup_script(standalone_config, vm_data)
worker.get_ssh_client().upload_data_to_file(script, remote_script)
cmd = f"chmod 777 {remote_script}; sudo {remote_script};"
worker.get_ssh_client().run_remote_command(cmd, run_async=True)
worker.del_ssh_client()
logger.debug(f'Installation script submitted to {worker}')
workers_state[worker.name] = {'state': 'running', 'err': workers_state[worker.name].get('err')}
logger.debug(f'Appending {worker.name} to Worker list')
workers[worker.name] = worker
def start_workers(job_payload, work_queue_name):
"""
Creates the workers (if any)
"""
workers = job_payload['worker_instances']
if not workers:
return
futures = []
with ThreadPoolExecutor(len(workers)) as executor:
for worker_info in workers:
futures.append(executor.submit(setup_worker, worker_info, work_queue_name))
for future in cf.as_completed(futures):
try:
future.result()
except Exception as e:
# TODO consider to update worker state
logger.error(f"Worker setup produced an exception {e}")
logger.debug(f'All workers set up for work queue "{work_queue_name}"')
def run_job_local(work_queue):
"""
Localhost jobs manager process for consume mode
"""
global localhos_handler
global last_job_key
pull_runtime = standalone_config.get('pull_runtime', False)
def wait_job_completed(job_key):
done = os.path.join(JOBS_DIR, job_key+'.done')
while True:
if os.path.isfile(done):
break
time.sleep(1)
try:
localhos_handler = LocalhostHandler({'pull_runtime': pull_runtime})
while True:
job_payload = work_queue.get()
job_key = job_payload['job_key']
last_job_key = job_key
job_payload['config']['lithops']['backend'] = 'localhost'
localhos_handler.invoke(job_payload)
wait_job_completed(job_key)
except Exception as e:
logger.error(e)
def run_job_worker(job_payload, work_queue):
"""
Process responsible to wait for workers to become ready, and
submit individual tasks of the job to them
"""
job_key = job_payload['job_key']
call_ids = job_payload['call_ids']
chunksize = job_payload['chunksize']
for call_ids_range in iterchunks(call_ids, chunksize):
task_payload = copy.deepcopy(job_payload)
dbr = task_payload['data_byte_ranges']
task_payload['call_ids'] = call_ids_range
task_payload['data_byte_ranges'] = [dbr[int(call_id)] for call_id in call_ids_range]
work_queue.put(task_payload)
while not work_queue.empty():
time.sleep(1)
done = os.path.join(JOBS_DIR, job_key+'.done')
Path(done).touch()
logger.debug(f'Job process {job_key} finished')
def error(msg):
response = flask.jsonify({'error': msg})
response.status_code = 404
return response
@app.route('/workers', methods=['GET'])
def get_workers():
"""
Returns the number of free workers
"""
global workers
global budget_keeper
# update last_usage_time to prevent race condition when keeper stops the vm
budget_keeper.last_usage_time = time.time()
current_workers = [(worker.name, worker.private_ip) for worker in workers.values()]
logger.debug(f'Current workers: {current_workers}')
free_workers = []
def check_worker(worker):
if is_worker_free(worker):
free_workers.append((
worker.name,
worker.private_ip,
worker.instance_id,
worker.ssh_credentials)
)
if workers:
with ThreadPoolExecutor(len(workers)) as ex:
ex.map(check_worker, workers.values())
logger.debug(f'Total free workers: {len(free_workers)}')
response = flask.jsonify(free_workers)
response.status_code = 200
return response
@app.route('/workers-state', methods=['GET'])
def get_workers_state():
"""
Returns the current workers state
"""
logger.debug(f'Workers state: {workers_state}')
return flask.jsonify(workers_state)
@app.route('/get-task/<work_queue_name>', methods=['GET'])
def get_task(work_queue_name):
"""
Returns a task from the work queue
"""
global work_queues
try:
task_payload = work_queues.setdefault(work_queue_name, queue.Queue()).get(False)
response = flask.jsonify(task_payload)
response.status_code = 200
job_key = task_payload['job_key']
calls = task_payload['call_ids']
worker_ip = flask.request.remote_addr
logger.debug(f'Worker {worker_ip} retrieved Job {job_key} - Calls {calls}')
except queue.Empty:
response = ('', 204)
return response
def stop_job_process(job_key_list):
"""
Stops a job process
"""
global localhos_handler
global work_queues
for job_key in job_key_list:
logger.debug(f'Received SIGTERM: Stopping job process {job_key}')
if exec_mode == 'consume':
if job_key == last_job_key:
# kill current running job process
localhos_handler.clear()
done = os.path.join(JOBS_DIR, job_key+'.done')
Path(done).touch()
else:
# Delete job_payload from pending queue
work_queue = work_queues['local']
tmp_queue = []
while not work_queue.empty():
try:
job_payload = work_queue.get(False)
if job_payload['job_key'] != job_key:
tmp_queue.append(job_payload)
except Exception:
pass
for job_payload in tmp_queue:
work_queue.put(job_payload)
else:
wqn = job_key if exec_mode == 'create' else REUSE_WORK_QUEUE_NAME
# empty work queue
work_queue = work_queues.setdefault(wqn, queue.Queue())
while not work_queue.empty():
try:
work_queue.get(False)
except Exception:
pass
def stop_task(worker):
private_ip = worker['private_ip']
url = f"http://{private_ip}:{SA_SERVICE_PORT}/stop/{job_key}"
requests.post(url, timeout=0.5)
# Send stop signal to all workers
with ThreadPoolExecutor(len(workers)) as ex:
ex.map(stop_task, workers.values())
@app.route('/stop', methods=['POST'])
def stop():
"""
Stops received job processes
"""
job_key_list = flask.request.get_json(force=True, silent=True)
# Start a separate thread to do the task in background,
# for not keeping the client waiting.
Thread(target=stop_job_process, args=(job_key_list, )).start()
return ('', 204)
@app.route('/run', methods=['POST'])
def run():
"""
Run a job locally, in consume mode
"""
global budget_keeper
global work_queues
global exec_mode
global localhost_manager_process
job_payload = flask.request.get_json(force=True, silent=True)
if job_payload and not isinstance(job_payload, dict):
return error('The action did not receive a dictionary as an argument.')
try:
runtime = job_payload['runtime_name']
verify_runtime_name(runtime)
except Exception as e:
return error(str(e))
job_key = job_payload['job_key']
logger.debug('Received job {}'.format(job_key))
budget_keeper.last_usage_time = time.time()
budget_keeper.update_config(job_payload['config']['standalone'])
budget_keeper.jobs[job_key] = 'running'
exec_mode = job_payload['config']['standalone'].get('exec_mode', 'consume')
if exec_mode == 'consume':
# Consume mode runs jobs in this master VM
work_queue_name = 'local'
work_queue = work_queues.setdefault(work_queue_name, queue.Queue())
if not localhost_manager_process:
logger.debug('Starting manager process for localhost jobs')
lmp = Thread(target=run_job_local, args=(work_queue, ), daemon=True)
lmp.start()
localhost_manager_process = lmp
logger.debug(f'Putting job {job_key} into master queue')
work_queue.put(job_payload)
elif exec_mode in ['create', 'reuse']:
# Create and reuse mode runs jobs on woker VMs
logger.debug(f'Starting process for job {job_key}')
work_queue_name = job_key if exec_mode == 'create' else REUSE_WORK_QUEUE_NAME
work_queue = work_queues.setdefault(work_queue_name, queue.Queue())
Thread(target=start_workers, args=(job_payload, work_queue_name)).start()
Thread(target=run_job_worker, args=(job_payload, work_queue), daemon=True).start()
act_id = str(uuid.uuid4()).replace('-', '')[:12]
response = flask.jsonify({'activationId': act_id})
response.status_code = 202
return response
@app.route('/ping', methods=['GET'])
def ping():
response = flask.jsonify({'response': lithops_version})
response.status_code = 200
return response
@app.route('/preinstalls', methods=['GET'])
def preinstalls():
payload = flask.request.get_json(force=True, silent=True)
if payload and not isinstance(payload, dict):
return error('The action did not receive a dictionary as an argument.')
try:
runtime = payload['runtime']
verify_runtime_name(runtime)
except Exception as e:
return error(str(e))
pull_runtime = standalone_config.get('pull_runtime', False)
lh = LocalhostHandler({'runtime': runtime, 'pull_runtime': pull_runtime})
runtime_meta = lh.create_runtime(runtime)
if 'lithops_version' in runtime_meta:
logger.debug("Runtime metdata extracted correctly: Lithops "
f"{runtime_meta['lithops_version']}")
response = flask.jsonify(runtime_meta)
response.status_code = 200
return response
def main():
global standalone_config
global standalone_handler
global budget_keeper
global master_ip
os.makedirs(LITHOPS_TEMP_DIR, exist_ok=True)
with open(SA_CONFIG_FILE, 'r') as cf:
standalone_config = json.load(cf)
# Delete ssh_key_filename
backend = standalone_config['backend']
if 'ssh_key_filename' in standalone_config[backend]:
del standalone_config[backend]['ssh_key_filename']
with open(SA_DATA_FILE, 'r') as ad:
master_ip = json.load(ad)['private_ip']
budget_keeper = BudgetKeeper(standalone_config)
budget_keeper.start()
standalone_handler = budget_keeper.sh
server = WSGIServer(('0.0.0.0', SA_SERVICE_PORT), app, log=app.logger)
server.serve_forever()
if __name__ == '__main__':
main()
|
configmanager.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2015-2019 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ultimately, the design is to handle all the complicated stuff at set
# rather than get tiime. When something is set on a group, then all
# members of that group are examined and 'inheritedfrom' attributes
# are pushed. as expression definned values are iinserted, their
# dependdentt attributes are added to a private dict to aid in auto
# calculation. When a name is changed, all attributes are re-evaluated
# on get, should be simple read value *except* for encrypted values,
# which are only decrypted when explicitly requested
# encrypted fields do not support expressions, either as a source or
# destination
# Note on the cryptography. Default behavior is mostly just to pave the
# way to meaningful security. Root all potentially sensitive data in
# one key. That key is in plain sight, so not meaningfully protected
# However, the key can be protected in the following ways:
# - Passphrase protected (requiring human interaction every restart)
# - TPM sealing (which would forgo the interactive assuming risk of
# physical attack on TPM is not a concern)
# This module provides cryptographic convenience functions, largely to be
# used by config.py to protect/unlock configuration as appropriopriate.
# The default behavior provides no meaningful protection, all encrypted
# values are linked to a master key that is stored in the clear.
# meanigful protection comes when the user elects to protect the key
# by passphrase and optionally TPM
import Cryptodome.Protocol.KDF as KDF
from Cryptodome.Cipher import AES
from Cryptodome.Hash import HMAC
from Cryptodome.Hash import SHA256
try:
import anydbm as dbm
except ModuleNotFoundError:
import dbm
import ast
import base64
from binascii import hexlify
import confluent.config.attributes as allattributes
import confluent.config.conf as conf
import confluent.log
import confluent.noderange as noderange
import confluent.util
import confluent.netutil as netutil
import confluent.exceptions as exc
import copy
import crypt
try:
import cPickle
except ModuleNotFoundError:
import pickle as cPickle
import errno
import eventlet
import eventlet.event as event
import eventlet.green.select as select
import eventlet.green.threading as gthread
import fnmatch
import hashlib
import json
import msgpack
import operator
import os
import random
import re
import string
import struct
import sys
import threading
import traceback
try:
unicode
except NameError:
unicode = str
_masterkey = None
_masterintegritykey = None
_dirtylock = threading.RLock()
_leaderlock = gthread.RLock()
_synclock = threading.RLock()
_rpclock = gthread.RLock()
_initlock = gthread.RLock()
_followerlocks = {}
_config_areas = ('nodegroups', 'nodes', 'usergroups', 'users')
tracelog = None
statelessmode = False
_cfgstore = None
_pendingchangesets = {}
_txcount = 0
_hasquorum = True
_attraliases = {
'bmc': 'hardwaremanagement.manager',
'bmcuser': 'secret.hardwaremanagementuser',
'switchuser': 'secret.hardwaremanagementuser',
'bmcpass': 'secret.hardwaremanagementpassword',
'switchpass': 'secret.hardwaremanagementpassword',
}
_validroles = ('Administrator', 'Operator', 'Monitor')
membership_callback = None
def attrib_supports_expression(attrib):
if not isinstance(attrib, str):
attrib = attrib.decode('utf8')
attrib = _attraliases.get(attrib, attrib)
if attrib.startswith('secret.') or attrib.startswith('crypted.'):
return False
return True
def _mkpath(pathname):
try:
os.makedirs(pathname)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(pathname):
pass
else:
raise
def _derive_keys(password, salt):
#implement our specific combination of pbkdf2 transforms to get at
#key. We bump the iterations up because we can afford to
#TODO: WORKERPOOL PBKDF2 is expensive
tmpkey = KDF.PBKDF2(password, salt, 32, 50000,
lambda p, s: HMAC.new(p, s, SHA256).digest())
finalkey = KDF.PBKDF2(tmpkey, salt, 32, 50000,
lambda p, s: HMAC.new(p, s, SHA256).digest())
return finalkey[:16], finalkey[16:]
def _get_protected_key(keydict, password, paramname):
if password and 'unencryptedvalue' in keydict:
set_global(paramname, _format_key(
keydict['unencryptedvalue'],
password=password))
if 'unencryptedvalue' in keydict:
return keydict['unencryptedvalue']
# TODO(jbjohnso): check for TPM sealing
if 'passphraseprotected' in keydict:
if password is None:
raise exc.LockedCredentials("Passphrase protected secret requires password")
pp = keydict['passphraseprotected']
salt = pp[0]
privkey, integkey = _derive_keys(password, salt)
return decrypt_value(pp[1:], key=privkey, integritykey=integkey)
else:
raise exc.LockedCredentials("No available decryption key")
def _parse_key(keydata, password=None):
if keydata.startswith('*unencrypted:'):
return base64.b64decode(keydata[13:])
elif password:
cryptbits = [base64.b64decode(x)
for x in keydata.split('!')]
salt, iv, crypt, hmac = cryptbits[:4]
privkey, integkey = _derive_keys(password, salt)
if len(cryptbits) > 4:
integkey = None
return decrypt_value(cryptbits[1:], privkey, integkey)
raise(exc.LockedCredentials(
"Passphrase protected secret requires password"))
def _format_key(key, password=None):
if password is not None:
salt = os.urandom(32)
privkey, integkey = _derive_keys(password, salt)
cval = crypt_value(key, key=privkey) # , integritykey=integkey)
return {"passphraseprotected": (salt,) + cval}
else:
return {"unencryptedvalue": key}
def _do_notifier(cfg, watcher, callback):
try:
callback(nodeattribs=watcher['nodeattrs'], configmanager=cfg)
except Exception:
logException()
def _rpc_master_del_usergroup(tenant, name):
ConfigManager(tenant).del_usergroup(name)
def _rpc_del_usergroup(tenant, name):
ConfigManager(tenant)._true_del_usergroup(name)
def _rpc_master_set_usergroup(tenant, name, attributemap):
ConfigManager(tenant).set_usergroup(name, attributemap)
def _rpc_set_usergroup(tenant, name, attributemap):
ConfigManager(tenant)._true_set_usergroup(name, attributemap)
def _rpc_master_set_user(tenant, name, attributemap):
ConfigManager(tenant).set_user(name, attributemap)
def _rpc_set_user(tenant, name, attributemap):
ConfigManager(tenant)._true_set_user(name, attributemap)
def _rpc_master_set_node_attributes(tenant, attribmap, autocreate):
ConfigManager(tenant).set_node_attributes(attribmap, autocreate)
def _rpc_master_rename_nodes(tenant, renamemap):
ConfigManager(tenant).rename_nodes(renamemap)
def _rpc_master_rename_nodegroups(tenant, renamemap):
ConfigManager(tenant).rename_nodegroups(renamemap)
def _rpc_master_clear_node_attributes(tenant, nodes, attributes):
ConfigManager(tenant).clear_node_attributes(nodes, attributes)
def _rpc_clear_node_attributes(tenant, nodes, attributes):
ConfigManager(tenant)._true_clear_node_attributes(nodes, attributes)
def _rpc_master_set_group_attributes(tenant, attribmap, autocreate):
ConfigManager(tenant).set_group_attributes(attribmap, autocreate)
def _rpc_master_clear_group_attributes(tenant, groups, attributes):
ConfigManager(tenant).clear_group_attributes(groups, attributes)
def _rpc_clear_group_attributes(tenant, groups, attributes):
ConfigManager(tenant)._true_clear_group_attributes(groups, attributes)
def _rpc_master_del_user(tenant, name):
ConfigManager(tenant).del_user(name)
def _rpc_del_user(tenant, name):
ConfigManager(tenant)._true_del_user(name)
def _rpc_master_create_user(tenant, *args):
ConfigManager(tenant).create_user(*args)
def _rpc_master_create_usergroup(tenant, *args):
ConfigManager(tenant).create_usergroup(*args)
def _rpc_create_user(tenant, *args):
ConfigManager(tenant)._true_create_user(*args)
def _rpc_create_usergroup(tenant, *args):
ConfigManager(tenant)._true_create_usergroup(*args)
def _rpc_master_del_groups(tenant, groups):
ConfigManager(tenant).del_groups(groups)
def _rpc_del_groups(tenant, groups):
ConfigManager(tenant)._true_del_groups(groups)
def _rpc_master_del_nodes(tenant, nodes):
ConfigManager(tenant).del_nodes(nodes)
def _rpc_del_nodes(tenant, nodes):
ConfigManager(tenant)._true_del_nodes(nodes)
def _rpc_set_node_attributes(tenant, attribmap, autocreate):
ConfigManager(tenant)._true_set_node_attributes(attribmap, autocreate)
def _rpc_rename_nodes(tenant, renamemap):
ConfigManager(tenant)._true_rename_nodes(renamemap)
def _rpc_rename_nodegroups(tenant, renamemap):
ConfigManager(tenant)._true_rename_nodegroups(renamemap)
def _rpc_set_group_attributes(tenant, attribmap, autocreate):
ConfigManager(tenant)._true_set_group_attributes(attribmap, autocreate)
def check_quorum():
if isinstance(cfgleader, bool):
raise exc.DegradedCollective()
if (not cfgleader) and len(cfgstreams) < (len(_cfgstore.get('collective', {})) // 2):
# the leader counts in addition to registered streams
raise exc.DegradedCollective()
if cfgleader and not _hasquorum:
raise exc.DegradedCollective()
def exec_on_leader(function, *args):
if isinstance(cfgleader, bool):
raise exc.DegradedCollective()
xid = confluent.util.stringify(base64.b64encode(os.urandom(8)))
while xid in _pendingchangesets:
xid = confluent.util.stringify(base64.b64encode(os.urandom(8)))
_pendingchangesets[xid] = event.Event()
rpcpayload = msgpack.packb({'function': function, 'args': args,
'xid': xid}, use_bin_type=False)
rpclen = len(rpcpayload)
cfgleader.sendall(struct.pack('!Q', rpclen))
cfgleader.sendall(rpcpayload)
_pendingchangesets[xid].wait()
del _pendingchangesets[xid]
return
def exec_on_followers(fnname, *args):
pushes = eventlet.GreenPool()
# Check health of collective prior to attempting
for _ in pushes.starmap(
_push_rpc, [(cfgstreams[s], b'') for s in cfgstreams]):
pass
if len(cfgstreams) < (len(_cfgstore['collective']) // 2):
# the leader counts in addition to registered streams
raise exc.DegradedCollective()
exec_on_followers_unconditional(fnname, *args)
def exec_on_followers_unconditional(fnname, *args):
global _txcount
pushes = eventlet.GreenPool()
_txcount += 1
payload = msgpack.packb({'function': fnname, 'args': args,
'txcount': _txcount}, use_bin_type=False)
for _ in pushes.starmap(
_push_rpc, [(cfgstreams[s], payload) for s in cfgstreams]):
pass
def logException():
global tracelog
if tracelog is None:
tracelog = confluent.log.Logger('trace')
tracelog.log(traceback.format_exc(),
ltype=confluent.log.DataTypes.event,
event=confluent.log.Events.stacktrace)
def _do_add_watcher(watcher, added, configmanager, renamed=()):
try:
watcher(added=added, deleting=(), renamed=renamed, configmanager=configmanager)
except Exception:
logException()
def init_masterkey(password=None, autogen=True):
global _masterkey
global _masterintegritykey
cfgn = get_global('master_privacy_key')
if cfgn:
_masterkey = _get_protected_key(cfgn, password, 'master_privacy_key')
elif autogen:
_masterkey = os.urandom(32)
set_global('master_privacy_key', _format_key(
_masterkey,
password=password))
cfgn = get_global('master_integrity_key')
if cfgn:
_masterintegritykey = _get_protected_key(cfgn, password,
'master_integrity_key')
#elif autogen:
# _masterintegritykey = os.urandom(64)
# set_global('master_integrity_key', _format_key(
# _masterintegritykey,
# password=password))
def _push_rpc(stream, payload):
with _rpclock:
try:
stream.sendall(struct.pack('!Q', len(payload)))
if len(payload):
stream.sendall(payload)
return True
except Exception:
logException()
try:
del cfgstreams[stream]
except KeyError:
pass
if membership_callback:
membership_callback()
stream.close()
def decrypt_value(cryptvalue,
key=None,
integritykey=None):
# for future reference, if cryptvalue len == 3, then cbc+hmac, 4 includes version
iv, cipherdata, hmac = cryptvalue[:3]
if key is None and integritykey is None:
if _masterkey is None:
init_masterkey(autogen=False)
key = _masterkey
integritykey = _masterintegritykey
if len(cryptvalue) == 3:
check_hmac = HMAC.new(integritykey, cipherdata, SHA256).digest()
if hmac != check_hmac:
check_hmac = HMAC.new(integritykey, cipherdata + iv, SHA256).digest()
if hmac != check_hmac:
raise Exception("bad HMAC value on crypted value")
decrypter = AES.new(key, AES.MODE_CBC, iv)
value = decrypter.decrypt(cipherdata)
padsize = bytearray(value)[-1]
pad = value[-padsize:]
# Note that I cannot grasp what could be done with a subliminal
# channel in padding in this case, but check the padding anyway
for padbyte in bytearray(pad):
if padbyte != padsize:
raise Exception("bad padding in encrypted value")
return value[0:-padsize]
else:
decrypter = AES.new(key, AES.MODE_GCM, nonce=iv)
value = decrypter.decrypt(cipherdata)
decrypter.verify(hmac)
return value
def fixup_attribute(attrname, attrval):
# Normalize some data, for example strings and numbers to bool
attrname = _get_valid_attrname(attrname)
if attrname not in allattributes.node: # no fixup possible
return attrval
if 'type' in allattributes.node[attrname] and not isinstance(attrval, allattributes.node[attrname]['type']):
if (allattributes.node[attrname]['type'] == bool and
(isinstance(attrval, str) or isinstance(attrval, unicode))):
return attrval.lower() in ('true', '1', 'y', 'yes', 'enable', 'enabled')
return attrval
def attribute_is_invalid(attrname, attrval):
if attrname.startswith('custom.'):
# No type checking or name checking is provided for custom,
# it's not possible
return False
attrname = _get_valid_attrname(attrname)
if attrname not in allattributes.node:
# Otherwise, it must be in the allattributes key list
return True
if 'type' in allattributes.node[attrname]:
if not isinstance(attrval, allattributes.node[attrname]['type']):
# it is valid if it is {'value': actualvalue}
if (isinstance(attrval, dict) and 'value' in attrval and
isinstance(attrval['value'],
allattributes.node[attrname]['type'])):
return False
# provide type checking for attributes with a specific type
return True
return False
def _get_valid_attrname(attrname):
if attrname.startswith('net.'):
# For net.* attribtues, split on the dots and put back together
# longer term we might want a generic approach, but
# right now it's just net. attributes
netattrparts = attrname.split('.')
attrname = netattrparts[0] + '.' + netattrparts[-1]
return attrname
def grub_hashcrypt_value(value):
salt = os.urandom(64)
algo = 'sha512'
rounds = 10000
if not isinstance(value, bytes):
value = value.encode('utf8')
crypted = hexlify(hashlib.pbkdf2_hmac(algo, value, salt, rounds))
crypted = crypted.upper()
salt = hexlify(salt).upper()
if not isinstance(salt, str):
salt = salt.decode('utf8')
if not isinstance(crypted, str):
crypted = crypted.decode('utf8')
ret = 'grub.pbkdf2.{0}.{1}.{2}.{3}'.format(algo, rounds, salt, crypted)
return ret
def hashcrypt_value(value):
salt = confluent.util.stringify(base64.b64encode(os.urandom(12),
altchars=b'./'))
salt = '$6${0}'.format(salt)
return crypt.crypt(value, salt)
def crypt_value(value,
key=None,
integritykey=None):
# encrypt given value
# PKCS7 is the padding scheme to employ, if no padded needed, pad with 16
# check HMAC prior to attempting decrypt
hmac = None
if key is None:
if _masterkey is None:
init_masterkey()
key = _masterkey
iv = os.urandom(12)
crypter = AES.new(key, AES.MODE_GCM, nonce=iv)
if not isinstance(value, bytes):
value = value.encode('utf-8')
cryptval, hmac = crypter.encrypt_and_digest(value)
return iv, cryptval, hmac, b'\x02'
def _load_dict_from_dbm(dpath, tdb):
try:
dbe = dbm.open(tdb, 'r')
currdict = _cfgstore
for elem in dpath:
elem = confluent.util.stringify(elem)
if elem not in currdict:
currdict[elem] = {}
currdict = currdict[elem]
try:
for tk in dbe.keys():
tks = confluent.util.stringify(tk)
currdict[tks] = cPickle.loads(dbe[tk])
except AttributeError:
tk = dbe.firstkey()
while tk != None:
tks = confluent.util.stringify(tk)
currdict[tks] = cPickle.loads(dbe[tk])
tk = dbe.nextkey(tk)
except dbm.error:
return
def is_tenant(tenant):
try:
return tenant in _cfgstore['tenant']
except KeyError:
return False
def get_global(globalname):
"""Get a global variable
:param globalname: The global parameter name to read
"""
if _cfgstore is None:
init()
try:
return _cfgstore['globals'][globalname]
except KeyError:
return None
def set_global(globalname, value, sync=True):
"""Set a global variable.
Globals should be rarely ever used. Almost everything should be under a
tenant scope. Some things like master key and socket numbers/paths can be
reasonably considered global in nature.
:param globalname: The global parameter name to store
:param value: The value to set the global parameter to.
"""
if _cfgstore is None:
init(not sync)
globalname = confluent.util.stringify(globalname)
with _dirtylock:
if 'dirtyglobals' not in _cfgstore:
_cfgstore['dirtyglobals'] = set()
_cfgstore['dirtyglobals'].add(globalname)
if 'globals' not in _cfgstore:
_cfgstore['globals'] = {globalname: value}
else:
_cfgstore['globals'][globalname] = value
if sync:
ConfigManager._bg_sync_to_file()
cfgstreams = {}
def relay_slaved_requests(name, listener):
global cfgleader
global _hasquorum
pushes = eventlet.GreenPool()
if name not in _followerlocks:
_followerlocks[name] = gthread.RLock()
with _followerlocks[name]:
try:
stop_following()
if name in cfgstreams:
try:
cfgstreams[name].close()
except Exception:
pass
del cfgstreams[name]
if membership_callback:
membership_callback()
cfgstreams[name] = listener
lh = StreamHandler(listener)
_hasquorum = len(cfgstreams) >= (
len(_cfgstore['collective']) // 2)
_newquorum = None
while _hasquorum != _newquorum:
if _newquorum is not None:
_hasquorum = _newquorum
payload = msgpack.packb({'quorum': _hasquorum}, use_bin_type=False)
for _ in pushes.starmap(
_push_rpc,
[(cfgstreams[s], payload) for s in cfgstreams]):
pass
_newquorum = len(cfgstreams) >= (
len(_cfgstore['collective']) // 2)
_hasquorum = _newquorum
if _hasquorum and _pending_collective_updates:
apply_pending_collective_updates()
msg = lh.get_next_msg()
while msg:
if name not in cfgstreams:
raise Exception("Unexpected loss of node in followers: " + name)
sz = struct.unpack('!Q', msg)[0]
if sz != 0:
rpc = b''
while len(rpc) < sz:
nrpc = listener.recv(sz - len(rpc))
if not nrpc:
raise Exception('Truncated client error')
rpc += nrpc
rpc = msgpack.unpackb(rpc, raw=False)
exc = None
if not (rpc['function'].startswith('_rpc_') or rpc['function'].endswith('_collective_member')):
raise Exception('Unsupported function {0} called'.format(rpc['function']))
try:
globals()[rpc['function']](*rpc['args'])
except ValueError as ve:
exc = ['ValueError', str(ve)]
except Exception as e:
logException()
exc = ['Exception', str(e)]
if 'xid' in rpc:
res = _push_rpc(listener, msgpack.packb({'xid': rpc['xid'],
'exc': exc}, use_bin_type=False))
if not res:
break
try:
msg = lh.get_next_msg()
except Exception:
msg = None
finally:
try:
listener.close()
except Exception:
pass
try:
del cfgstreams[name]
except KeyError:
pass # May have already been closed/deleted...
if cfgstreams:
_hasquorum = len(cfgstreams) >= (
len(_cfgstore['collective']) // 2)
payload = msgpack.packb({'quorum': _hasquorum}, use_bin_type=False)
for _ in pushes.starmap(
_push_rpc,
[(cfgstreams[s], payload) for s in cfgstreams]):
pass
if membership_callback:
membership_callback()
if not cfgstreams and not cfgleader: # last one out, set cfgleader to boolean to mark dead collective
stop_following(True)
return False
return True
class StreamHandler(object):
def __init__(self, sock):
self.sock = sock
self.keepalive = confluent.util.monotonic_time() + 20
self.expiry = self.keepalive + 40
def get_next_msg(self):
r = (False,)
try:
while not r[0]:
r = select.select(
(self.sock,), (), (),
self.keepalive - confluent.util.monotonic_time())
if confluent.util.monotonic_time() > self.expiry:
return None
if confluent.util.monotonic_time() > self.keepalive:
res = _push_rpc(self.sock, b'') # nulls are a keepalive
if not res:
return None
self.keepalive = confluent.util.monotonic_time() + 20
self.expiry = confluent.util.monotonic_time() + 60
msg = self.sock.recv(8)
except Exception:
msg = None
return msg
def close(self):
self.sock = None
def stop_following(replacement=None):
with _leaderlock:
global cfgleader
if cfgleader and not isinstance(cfgleader, bool):
try:
cfgleader.close()
except Exception:
pass
cfgleader = replacement
def stop_leading(newleader=None):
rpcpayload = None
if newleader is not None:
rpcpayload = msgpack.packb({'newleader': newleader}, use_bin_type=False)
for stream in list(cfgstreams):
try:
if rpcpayload is not None:
_push_rpc(cfgstreams[stream], rpcpayload)
cfgstreams[stream].close()
except Exception:
pass
try:
del cfgstreams[stream]
except KeyError:
pass # may have already been deleted..
if membership_callback:
membership_callback()
_oldcfgstore = None
_oldtxcount = 0
def rollback_clear():
global _cfgstore
global _txcount
global _oldcfgstore
global _oldtxcount
_txcount = _oldtxcount
_cfgstore = _oldcfgstore
_oldtxcount = 0
_oldcfgstore = None
ConfigManager.wait_for_sync(True)
def clear_configuration():
global _cfgstore
global _txcount
global _oldcfgstore
global _oldtxcount
stop_leading()
stop_following()
_oldcfgstore = _cfgstore
_oldtxcount = _txcount
_cfgstore = {}
_txcount = 0
def commit_clear():
global _oldtxcount
global _oldcfgstore
# first, copy over old non-key globals, as those are
# currently defined as local to each collective member
# currently just 'autosense' which is intended to be active
# per collective member
for globvar in _oldcfgstore.get('globals', ()):
if globvar.endswith('_key') or globvar == 'confluent_uuid':
continue
_cfgstore['globals'][globvar] = _oldcfgstore['globals'][globvar]
_oldcfgstore = None
_oldtxcount = 0
with _synclock:
todelete = ('transactioncount', 'globals', 'collective') + _config_areas
for cfg in todelete:
try:
os.remove(os.path.join(ConfigManager._cfgdir, cfg))
except OSError as oe:
pass
ConfigManager.wait_for_sync(True)
ConfigManager._bg_sync_to_file()
cfgleader = None
def follow_channel(channel):
global _txcount
global _hasquorum
try:
stop_leading()
stop_following(channel)
lh = StreamHandler(channel)
msg = lh.get_next_msg()
while msg:
sz = struct.unpack('!Q', msg)[0]
if sz != 0:
rpc = b''
while len(rpc) < sz:
nrpc = channel.recv(sz - len(rpc))
if not nrpc:
raise Exception('Truncated message error')
rpc += nrpc
rpc = msgpack.unpackb(rpc, raw=False)
if 'txcount' in rpc:
_txcount = rpc['txcount']
if 'newleader' in rpc:
return rpc
if 'function' in rpc:
if not (rpc['function'].startswith('_true') or rpc['function'].startswith('_rpc')):
raise Exception("Received unsupported function call: {0}".format(rpc['function']))
try:
globals()[rpc['function']](*rpc['args'])
except Exception as e:
print(repr(e))
if 'xid' in rpc and rpc['xid']:
if rpc.get('exc', None):
exctype, excstr = rpc['exc']
if exctype == 'ValueError':
exc = ValueError(excstr)
else:
exc = Exception(excstr)
_pendingchangesets[rpc['xid']].send_exception(exc)
else:
_pendingchangesets[rpc['xid']].send()
if 'quorum' in rpc:
_hasquorum = rpc['quorum']
res = _push_rpc(channel, b'') # use null as ACK
if not res:
break
msg = lh.get_next_msg()
finally:
# mark the connection as broken
if cfgstreams:
stop_following(None)
else:
stop_following(True)
return {}
def add_collective_member(name, address, fingerprint):
if cfgleader:
return exec_on_leader('add_collective_member', name, address, fingerprint)
if cfgstreams:
exec_on_followers('_true_add_collective_member', name, address, fingerprint)
_true_add_collective_member(name, address, fingerprint)
def del_collective_member(name):
if cfgleader and not isinstance(cfgleader, bool):
return exec_on_leader('del_collective_member', name)
if cfgstreams:
exec_on_followers_unconditional('_true_del_collective_member', name)
_true_del_collective_member(name)
def _true_del_collective_member(name, sync=True):
global cfgleader
name = confluent.util.stringify(name)
if _cfgstore is None:
return
if 'collective' not in _cfgstore:
return
if name not in _cfgstore['collective']:
return
del _cfgstore['collective'][name]
with _dirtylock:
if 'collectivedirty' not in _cfgstore:
_cfgstore['collectivedirty'] = set([])
_cfgstore['collectivedirty'].add(name)
if len(_cfgstore['collective']) < 2:
del _cfgstore['collective']
cfgleader = None
if sync:
ConfigManager._bg_sync_to_file()
_pending_collective_updates = {}
def update_collective_address(name ,address):
fprint = _cfgstore['collective'][name]['fingerprint']
oldaddress = _cfgstore['collective'][name]['address']
if oldaddress == address:
return
try:
check_quorum()
add_collective_member(name, address, fprint)
except exc.DegradedCollective:
_pending_collective_updates[name] = address
def apply_pending_collective_updates():
for name in list(_pending_collective_updates):
fprint = _cfgstore['collective'][name]['fingerprint']
address = _pending_collective_updates[name]
add_collective_member(name, address, fprint)
del _pending_collective_updates[name]
def _true_add_collective_member(name, address, fingerprint, sync=True):
name = confluent.util.stringify(name)
if _cfgstore is None:
init(not sync) # use not sync to avoid read from disk
if 'collective' not in _cfgstore:
_cfgstore['collective'] = {}
_cfgstore['collective'][name] = {'name': name, 'address': address,
'fingerprint': fingerprint}
with _dirtylock:
if 'collectivedirty' not in _cfgstore:
_cfgstore['collectivedirty'] = set([])
_cfgstore['collectivedirty'].add(name)
if sync:
ConfigManager._bg_sync_to_file()
def list_collective():
if _cfgstore is None:
init()
return iter(_cfgstore.get('collective', ()))
def get_collective_member(name):
if _cfgstore is None:
init()
return _cfgstore.get('collective', {}).get(name, None)
def get_collective_member_by_address(address):
if _cfgstore is None:
init()
for name in _cfgstore.get('collective', {}):
currdrone = _cfgstore['collective'][name]
if netutil.addresses_match(address, currdrone['address']):
return currdrone
def _mark_dirtykey(category, key, tenant=None):
key = confluent.util.stringify(key)
with _dirtylock:
if 'dirtykeys' not in _cfgstore:
_cfgstore['dirtykeys'] = {}
if tenant not in _cfgstore['dirtykeys']:
_cfgstore['dirtykeys'][tenant] = {}
if category not in _cfgstore['dirtykeys'][tenant]:
_cfgstore['dirtykeys'][tenant][category] = set()
_cfgstore['dirtykeys'][tenant][category].add(key)
def _generate_new_id():
# generate a random id outside the usual ranges used for normal users in
# /etc/passwd. Leave an equivalent amount of space near the end disused,
# just in case
uid = str(confluent.util.securerandomnumber(65537, 4294901759))
if 'idmap' not in _cfgstore['main']:
return uid
while uid in _cfgstore['main']['idmap']:
uid = str(confluent.util.securerandomnumber(65537, 4294901759))
return uid
class _ExpressionFormat(string.Formatter):
# This class is used to extract the literal value from an expression
# in the db
# This is made easier by subclassing one of the 'fprintf' mechanisms
# baked into python
posmatch = re.compile('^n([0-9]*)$')
nummatch = re.compile('[0-9]+')
_supported_ops = {
ast.Mult: operator.mul,
ast.Div: operator.floordiv,
ast.Mod: operator.mod,
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.LShift: operator.lshift,
ast.RShift: operator.rshift,
ast.BitAnd: operator.and_,
ast.BitXor: operator.xor,
ast.BitOr: operator.or_,
}
def __init__(self, nodeobj, nodename):
self._nodeobj = nodeobj
self._nodename = nodename
self._numbers = None
def get_field(self, field_name, args, kwargs):
parsed = ast.parse(field_name)
return self._handle_ast_node(parsed.body[0].value), field_name
def _handle_ast_node(self, node):
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Attribute):
#ok, we have something with a dot
left = node
key = ''
while isinstance(left, ast.Attribute):
# Loop through, to handle multi dot expressions
# such as 'net.pxe.hwaddr'
key = '.' + left.attr + key
left = left.value
key = left.id + key
if (not key.startswith('custom.') and
_get_valid_attrname(key) not in allattributes.node):
raise ValueError(
'{0} is not a valid attribute name'.format(key))
val = self._expand_attribute(key)
return val['value'] if val and 'value' in val else ""
elif isinstance(node, ast.Name):
var = node.id
if var in ('node', 'nodename'):
return self._nodename
if var in _attraliases:
val = self._expand_attribute(_attraliases[var])
return val['value'] if 'value' in val else ""
mg = re.match(self.posmatch, var)
if mg:
idx = int(mg.group(1))
if self._numbers is None:
self._numbers = re.findall(self.nummatch, self._nodename)
return int(self._numbers[idx - 1])
else:
if var in self._nodeobj:
val = self._expand_attribute(var)
return val['value'] if val and 'value' in val else ""
elif (not var.startswith('custom.') and
_get_valid_attrname(var) not in allattributes.node):
raise ValueError(
'{0} is not a valid attribute name'.format(var))
elif isinstance(node, ast.BinOp):
optype = type(node.op)
if optype not in self._supported_ops:
raise Exception("Unsupported operation")
op = self._supported_ops[optype]
return op(int(self._handle_ast_node(node.left)),
int(self._handle_ast_node(node.right)))
def _expand_attribute(self, key):
if '_expressionkeys' not in self._nodeobj:
self._nodeobj['_expressionkeys'] = set([key])
else:
self._nodeobj['_expressionkeys'].add(key)
val = _decode_attribute(key, self._nodeobj,
formatter=self)
return val
def _decode_attribute(attribute, nodeobj, formatter=None, decrypt=False):
if attribute not in nodeobj:
return None
# if we have an expression and a formatter, that overrides 'value'
# which may be out of date
# get methods will skip the formatter allowing value to come on through
# set methods induce recalculation as appropriate to get a cached value
if 'expression' in nodeobj[attribute] and formatter is not None:
retdict = copy.deepcopy(nodeobj[attribute])
if 'value' in retdict:
del retdict['value']
try:
retdict['value'] = formatter.format(retdict['expression'])
except Exception as e:
retdict['broken'] = str(e)
return retdict
elif 'value' in nodeobj[attribute]:
return nodeobj[attribute]
elif 'cryptvalue' in nodeobj[attribute] and decrypt:
retdict = copy.deepcopy(nodeobj[attribute])
retdict['value'] = decrypt_value(nodeobj[attribute]['cryptvalue'])
return retdict
return nodeobj[attribute]
# my thinking at this point is that noderange and configdata objects
# will be constructed and passed as part of a context object to plugins
# reasoning being that the main program will handle establishing the
# tenant context and then modules need not consider the current tenant
# most of the time as things are automatic
def _addchange(changeset, node, attrname):
if node not in changeset:
changeset[node] = {attrname: 1}
else:
changeset[node][attrname] = 1
def hook_new_configmanagers(callback):
"""Register callback for new tenants
From the point when this function is called until the end,
callback may be invoked to indicate a new tenant and
callback is notified to perform whatever tasks appropriate for
a new tenant
:param callback: Function to call for each possible config manager
:returns: identifier that can be used to cancel this registration
"""
#TODO(jbjohnso): actually live up to the promise of ongoing callbacks
callback(ConfigManager(None))
try:
for tenant in _cfgstore['tenant']:
callback(ConfigManager(tenant))
except KeyError:
pass
class ConfigManager(object):
if os.name == 'nt':
_cfgdir = os.path.join(
os.getenv('SystemDrive'), '\\ProgramData', 'confluent', 'cfg')
else:
_cfgdir = "/etc/confluent/cfg"
_cfgwriter = None
_writepending = False
_syncrunning = False
_syncstate = threading.RLock()
_attribwatchers = {}
_nodecollwatchers = {}
_notifierids = {}
@property
def _cfgstore(self):
if self.tenant is None:
return _cfgstore['main']
return _cfgstore['tenant'][self.tenant]
def __init__(self, tenant, decrypt=False, username=None):
self.clientfiles = {}
global _cfgstore
with _initlock:
if _cfgstore is None:
init()
self.decrypt = decrypt
self.current_user = username
if tenant is None:
self.tenant = None
if 'main' not in _cfgstore:
_cfgstore['main'] = {}
self._bg_sync_to_file()
if 'nodegroups' not in self._cfgstore: # This can happen during a clear... it seams... and if so it messes up...
self._cfgstore['nodegroups'] = {'everything': {'nodes': set()}}
_mark_dirtykey('nodegroups', 'everything', self.tenant)
self._bg_sync_to_file()
if 'nodes' not in self._cfgstore:
self._cfgstore['nodes'] = {}
self._bg_sync_to_file()
return
elif 'tenant' not in _cfgstore:
_cfgstore['tenant'] = {tenant: {}}
self._bg_sync_to_file()
elif tenant not in _cfgstore['tenant']:
_cfgstore['tenant'][tenant] = {}
self._bg_sync_to_file()
self.tenant = tenant
if 'nodegroups' not in self._cfgstore:
self._cfgstore['nodegroups'] = {'everything': {}}
_mark_dirtykey('nodegroups', 'everything', self.tenant)
if 'nodes' not in self._cfgstore:
self._cfgstore['nodes'] = {}
self._bg_sync_to_file()
self.wait_for_sync()
def add_client_file(self, clientfile):
self.clientfiles[clientfile.filename] = clientfile.fileobject
def close_client_files(self):
for f in self.clientfiles:
self.clientfiles[f].close()
def get_collective_member(self, name):
return get_collective_member(name)
@classmethod
def check_quorum(cls):
return check_quorum()
def filter_node_attributes(self, expression, nodes=None):
"""Filtered nodelist according to expression
expression may be:
attribute.name=value
attribute.name==value
attribute.name=~value
attribute.name!=value
attribute.name!~value
== and != do strict equality. The ~ operators do a regular expression.
! negates the sense of the match
:param expression: The expression containing the criteria to match
:param nodes: Optional iterable set of nodes to limit the check
"""
exmatch = None
yieldmatches = True
if nodes is None:
nodes = self._cfgstore['nodes']
if '==' in expression:
attribute, match = expression.split('==')
elif '!=' in expression:
attribute, match = expression.split('!=')
yieldmatches = False
elif '=~' in expression:
attribute, match = expression.split('=~')
exmatch = re.compile(match)
elif '!~' in expression:
attribute, match = expression.split('!~')
exmatch = re.compile(match)
yieldmatches = False
elif '=' in expression:
attribute, match = expression.split('=')
else:
raise Exception('Invalid Expression')
if attribute.startswith('secret.'):
raise Exception('Filter by secret attributes is not supported')
for node in nodes:
try:
currvals = [self._cfgstore['nodes'][node][attribute]['value']]
except KeyError:
# Let's treat 'not set' as being an empty string for this path
currvals = list(
[self._cfgstore['nodes'][node][x].get('value', '')
for x in fnmatch.filter(self._cfgstore['nodes'][node], attribute)])
currvals.append('')
for currval in currvals:
if exmatch:
if yieldmatches:
if exmatch.search(currval):
yield node
break
else:
if not exmatch.search(currval):
yield node
break
else:
if yieldmatches:
if match == currval:
yield node
break
else:
if match != currval:
yield node
break
def filter_nodenames(self, expression, nodes=None):
"""Filter nodenames by regular expression
:param expression: Regular expression for matching nodenames
:param nodes: Optional iterable of candidates
"""
if nodes is None:
nodes = self._cfgstore['nodes']
expression = re.compile(expression)
for node in nodes:
if expression.search(node):
yield node
def watch_attributes(self, nodes, attributes, callback):
"""
Watch a list of attributes for changes on a list of nodes. The
attributes may be literal, or a filename style wildcard like
'net*.switch'
:param nodes: An iterable of node names to be watching
:param attributes: An iterable of attribute names to be notified about
:param callback: A callback to process a notification
Returns an identifier that can be used to unsubscribe from these
notifications using remove_watcher
"""
notifierid = random.randint(0, sys.maxsize)
while notifierid in self._notifierids:
notifierid = random.randint(0, sys.maxsize)
self._notifierids[notifierid] = {'attriblist': []}
if self.tenant not in self._attribwatchers:
self._attribwatchers[self.tenant] = {}
attribwatchers = self._attribwatchers[self.tenant]
for node in nodes:
if node not in attribwatchers:
attribwatchers[node] = {}
for attribute in attributes:
self._notifierids[notifierid]['attriblist'].append(
(node, attribute))
if attribute not in attribwatchers[node]:
attribwatchers[node][attribute] = {
notifierid: callback
}
else:
attribwatchers[node][attribute][notifierid] = callback
if '*' in attribute:
currglobs = attribwatchers[node].get('_attrglobs', set([]))
currglobs.add(attribute)
attribwatchers[node]['_attrglobs'] = currglobs
return notifierid
def watch_nodecollection(self, callback):
"""
Watch the nodecollection for addition or removal of nodes.
A watcher is notified prior after node has been added and before node
is actually removed.
:param callback: Function to call when a node is added or removed
Returns an identifier that can be used to unsubscribe from these
notifications using remove_watcher
"""
# first provide an identifier for the calling code to
# use in case of cancellation.
# I anticipate no more than a handful of watchers of this sort, so
# this loop should not have to iterate too many times
notifierid = random.randint(0, sys.maxsize)
while notifierid in self._notifierids:
notifierid = random.randint(0, sys.maxsize)
# going to track that this is a nodecollection type watcher,
# but there is no additional data associated.
self._notifierids[notifierid] = set(['nodecollection'])
if self.tenant not in self._nodecollwatchers:
self._nodecollwatchers[self.tenant] = {}
self._nodecollwatchers[self.tenant][notifierid] = callback
return notifierid
def remove_watcher(self, watcher):
# identifier of int would be a collection watcher
if watcher not in self._notifierids:
raise Exception("Invalid")
# return
if 'attriblist' in self._notifierids[watcher]:
attribwatchers = self._attribwatchers[self.tenant]
for nodeattrib in self._notifierids[watcher]['attriblist']:
node, attrib = nodeattrib
del attribwatchers[node][attrib][watcher]
elif 'nodecollection' in self._notifierids[watcher]:
del self._nodecollwatchers[self.tenant][watcher]
else:
raise Exception("Completely not a valid place to be")
del self._notifierids[watcher]
def list_users(self):
try:
return list(self._cfgstore['users'])
except KeyError:
return []
def list_usergroups(self):
try:
return list(self._cfgstore['usergroups'])
except KeyError:
return []
def get_user(self, name):
"""Get user information from DB
:param name: Name of the user
Returns a dictionary describing parameters of a user. These parameters
may include numeric id (id), certificate thumbprint (certthumb),
password hash (passhash, which currently is going to be PBKDF2 derived)
full name (displayname), ...
"""
try:
return copy.deepcopy(self._cfgstore['users'][name])
except KeyError:
return None
def get_usergroup(self, groupname):
"""Get user group information from DB
:param groupname: Name of the group
Returns a dictionary describing parameters of a user group.
This may include the role for users in the group to receive
if no more specific information is found.
"""
try:
return copy.deepcopy(self._cfgstore['usergroups'][groupname])
except KeyError:
return None
def set_usergroup(self, groupname, attributemap):
"""Set usergroup attribute(s)
:param groupname: the name of teh group to modify
:param attributemap: The mapping of keys to values to set
"""
if cfgleader:
return exec_on_leader('_rpc_master_set_usergroup', self.tenant,
groupname, attributemap)
if cfgstreams:
exec_on_followers('_rpc_set_usergroup', self.tenant, groupname,
attributemap)
self._true_set_usergroup(groupname, attributemap)
def _true_set_usergroup(self, groupname, attributemap):
for attribute in attributemap:
if attribute == 'role':
therole = None
for candrole in _validroles:
if candrole.lower().startswith(attributemap[attribute].lower()):
therole = candrole
if therole not in _validroles:
raise ValueError(
'Unrecognized role "{0}" (valid roles: {1})'.format(attributemap[attribute], ','.join(_validroles)))
attributemap[attribute] = therole
self._cfgstore['usergroups'][groupname][attribute] = attributemap[attribute]
_mark_dirtykey('usergroups', groupname, self.tenant)
self._bg_sync_to_file()
def create_usergroup(self, groupname, role="Administrator"):
"""Create a new user
:param groupname: The name of the user group
:param role: The role the user should be considered. Can be
"Administrator" or "Technician", defaults to
"Administrator"
"""
if cfgleader:
return exec_on_leader('_rpc_master_create_usergroup', self.tenant,
groupname, role)
if cfgstreams:
exec_on_followers('_rpc_create_usergroup', self.tenant, groupname,
role)
self._true_create_usergroup(groupname, role)
def _true_create_usergroup(self, groupname, role="Administrator"):
if 'usergroups' not in self._cfgstore:
self._cfgstore['usergroups'] = {}
groupname = confluent.util.stringify(groupname)
if groupname in self._cfgstore['usergroups']:
raise Exception("Duplicate groupname requested")
for candrole in _validroles:
if candrole.lower().startswith(role.lower()):
role = candrole
break
if role not in _validroles:
raise ValueError(
'Unrecognized role "{0}" (valid roles: {1})'.format(
role, ','.join(_validroles)))
self._cfgstore['usergroups'][groupname] = {'role': role}
_mark_dirtykey('usergroups', groupname, self.tenant)
self._bg_sync_to_file()
def del_usergroup(self, name):
if cfgleader:
return exec_on_leader('_rpc_master_del_usergroup', self.tenant, name)
if cfgstreams:
exec_on_followers('_rpc_del_usergroup', self.tenant, name)
self._true_del_usergroup(name)
def _true_del_usergroup(self, name):
if name in self._cfgstore['usergroups']:
del self._cfgstore['usergroups'][name]
_mark_dirtykey('usergroups', name, self.tenant)
self._bg_sync_to_file()
def set_user(self, name, attributemap):
"""Set user attribute(s)
:param name: The login name of the user
:param attributemap: A dict of key values to set
"""
if cfgleader:
return exec_on_leader('_rpc_master_set_user', self.tenant, name,
attributemap)
if cfgstreams:
exec_on_followers('_rpc_set_user', self.tenant, name, attributemap)
self._true_set_user(name, attributemap)
def _true_set_user(self, name, attributemap):
user = self._cfgstore['users'][name]
for attribute in attributemap:
if attribute == 'role':
therole = None
for candrole in _validroles:
if candrole.lower().startswith(attributemap[attribute].lower()):
therole = candrole
if therole not in _validroles:
raise ValueError(
'Unrecognized role "{0}" (valid roles: {1})'.format(attributemap[attribute], ','.join(_validroles)))
attributemap[attribute] = therole
if attribute == 'password':
salt = os.urandom(8)
#TODO: WORKERPOOL, offload password set to a worker
crypted = KDF.PBKDF2(
attributemap[attribute], salt, 32, 10000,
lambda p, s: HMAC.new(p, s, SHA256).digest()
)
user['cryptpass'] = (salt, crypted)
else:
user[attribute] = attributemap[attribute]
_mark_dirtykey('users', name, self.tenant)
self._bg_sync_to_file()
def del_user(self, name):
if cfgleader:
return exec_on_leader('_rpc_master_del_user', self.tenant, name)
if cfgstreams:
exec_on_followers('_rpc_del_user', self.tenant, name)
self._true_del_user(name)
def _true_del_user(self, name):
if name in self._cfgstore['users']:
del self._cfgstore['users'][name]
_mark_dirtykey('users', name, self.tenant)
self._bg_sync_to_file()
def create_user(self, name,
role="Administrator", uid=None, displayname=None,
attributemap=None):
"""Create a new user
:param name: The login name of the user
:param role: The role the user should be considered. Can be
"Administrator" or "Technician", defaults to
"Administrator"
:param uid: Custom identifier number if desired. Defaults to random.
:param displayname: Optional long format name for UI consumption
"""
if cfgleader:
return exec_on_leader('_rpc_master_create_user', self.tenant,
name, role, uid, displayname, attributemap)
if cfgstreams:
exec_on_followers('_rpc_create_user', self.tenant, name,
role, uid, displayname, attributemap)
self._true_create_user(name, role, uid, displayname, attributemap)
def _true_create_user(self, name, role="Administrator", uid=None,
displayname=None, attributemap=None):
if 'idmap' not in _cfgstore['main']:
_cfgstore['main']['idmap'] = {}
if uid is None:
uid = _generate_new_id()
else:
if uid in _cfgstore['main']['idmap']:
raise Exception("Duplicate id requested")
if 'users' not in self._cfgstore:
self._cfgstore['users'] = {}
name = confluent.util.stringify(name)
if name in self._cfgstore['users']:
raise Exception("Duplicate username requested")
for candrole in _validroles:
if candrole.lower().startswith(role.lower()):
role = candrole
break
if role not in _validroles:
raise ValueError(
'Unrecognized role "{0}" (valid roles: {1})'.format(
role, ','.join(_validroles)))
self._cfgstore['users'][name] = {'id': uid, 'role': role}
if displayname is not None:
self._cfgstore['users'][name]['displayname'] = displayname
_cfgstore['main']['idmap'][uid] = {
'tenant': self.tenant,
'username': name,
'role': role,
}
if attributemap:
self._true_set_user(name, attributemap)
_mark_dirtykey('users', name, self.tenant)
_mark_dirtykey('idmap', uid)
self._bg_sync_to_file()
def is_node(self, node):
return node in self._cfgstore['nodes']
def is_nodegroup(self, nodegroup):
return nodegroup in self._cfgstore['nodegroups']
def get_groups(self, sizesort=False):
if sizesort:
return reversed(
sorted(self._cfgstore['nodegroups'], key=lambda x: len(
self._cfgstore['nodegroups'][x]['nodes'])))
return iter(self._cfgstore['nodegroups'])
def list_nodes(self):
try:
return iter(self._cfgstore['nodes'])
except KeyError:
return []
def get_nodegroup_attributes(self, nodegroup, attributes=()):
cfgnodeobj = self._cfgstore['nodegroups'][nodegroup]
if not attributes:
attributes = cfgnodeobj
nodeobj = {}
for attribute in attributes:
if attribute.startswith('_'):
continue
if attribute not in cfgnodeobj:
continue
nodeobj[attribute] = _decode_attribute(attribute, cfgnodeobj,
decrypt=self.decrypt)
return nodeobj
def expand_attrib_expression(self, nodelist, expression):
if type(nodelist) in (unicode, str):
nodelist = (nodelist,)
for node in nodelist:
cfgobj = self._cfgstore['nodes'][node]
fmt = _ExpressionFormat(cfgobj, node)
yield (node, fmt.format(expression))
def get_node_attributes(self, nodelist, attributes=(), decrypt=None):
if decrypt is None:
decrypt = self.decrypt
retdict = {}
if isinstance(nodelist, str) or isinstance(nodelist, unicode):
nodelist = [nodelist]
if isinstance(attributes, str) or isinstance(attributes, unicode):
attributes = [attributes]
relattribs = attributes
for node in nodelist:
if node not in self._cfgstore['nodes']:
continue
cfgnodeobj = self._cfgstore['nodes'][node]
nodeobj = {}
if len(attributes) == 0:
relattribs = cfgnodeobj
for attribute in relattribs:
if attribute.startswith('_'):
# skip private things
continue
if '*' in attribute:
for attr in fnmatch.filter(list(cfgnodeobj), attribute):
nodeobj[attr] = _decode_attribute(attr, cfgnodeobj,
decrypt=decrypt)
if attribute not in cfgnodeobj:
continue
# since the formatter is not passed in, the calculator is
# skipped. The decryption, however, we want to do only on
# demand
nodeobj[attribute] = _decode_attribute(attribute, cfgnodeobj,
decrypt=decrypt)
retdict[node] = nodeobj
return retdict
def _node_added_to_group(self, node, group, changeset):
try:
nodecfg = self._cfgstore['nodes'][node]
groupcfg = self._cfgstore['nodegroups'][group]
except KeyError: # something did not exist, nothing to do
return
for attrib in groupcfg:
self._do_inheritance(nodecfg, attrib, node, changeset)
_addchange(changeset, node, attrib)
def _node_removed_from_group(self, node, group, changeset):
try:
nodecfg = self._cfgstore['nodes'][node]
except KeyError: # node did not exist, nothing to do
return
for attrib in list(nodecfg.keys()):
if attrib.startswith("_"):
continue
if attrib == 'groups':
continue
try:
if nodecfg[attrib]['inheritedfrom'] == group:
del nodecfg[attrib] # remove invalid inherited data
self._do_inheritance(nodecfg, attrib, node, changeset)
_addchange(changeset, node, attrib)
_mark_dirtykey('nodes', node, self.tenant)
except KeyError: # inheritedfrom not set, move on
pass
def _do_inheritance(self, nodecfg, attrib, nodename, changeset,
srcgroup=None):
# for now, just do single inheritance
# TODO: concatenating inheritance if requested
if attrib in ('nodes', 'groups'):
#not attributes that should be considered here
return
if attrib in nodecfg and 'inheritedfrom' not in nodecfg[attrib]:
return # already has a non-inherited value set, nothing to do
# if the attribute is not set, this will search for a candidate
# if it is set, but inheritedfrom, search for a replacement, just
# in case
if not 'groups' in nodecfg:
return
for group in nodecfg['groups']:
if attrib in self._cfgstore['nodegroups'][group]:
if srcgroup is not None and group != srcgroup:
# skip needless deepcopy
return
nodecfg[attrib] = \
copy.deepcopy(self._cfgstore['nodegroups'][group][attrib])
nodecfg[attrib]['inheritedfrom'] = group
self._refresh_nodecfg(nodecfg, attrib, nodename,
changeset=changeset)
_mark_dirtykey('nodes', nodename, self.tenant)
return
if srcgroup is not None and group == srcgroup:
# break out
return
def _sync_groups_to_node(self, groups, node, changeset):
for group in self._cfgstore['nodegroups']:
if group not in groups:
if node in self._cfgstore['nodegroups'][group]['nodes']:
self._cfgstore['nodegroups'][group]['nodes'].discard(node)
self._node_removed_from_group(node, group, changeset)
_mark_dirtykey('nodegroups', group, self.tenant)
for group in groups:
if group not in self._cfgstore['nodegroups']:
self._cfgstore['nodegroups'][group] = {'nodes': set([node])}
_mark_dirtykey('nodegroups', group, self.tenant)
elif node not in self._cfgstore['nodegroups'][group]['nodes']:
self._cfgstore['nodegroups'][group]['nodes'].add(node)
_mark_dirtykey('nodegroups', group, self.tenant)
# node was not already in given group, perform inheritence fixup
self._node_added_to_group(node, group, changeset)
def _sync_nodes_to_group(self, nodes, group, changeset):
for node in self._cfgstore['nodes']:
if node not in nodes and 'groups' in self._cfgstore['nodes'][node]:
if group in self._cfgstore['nodes'][node]['groups']:
self._cfgstore['nodes'][node]['groups'].remove(group)
self._node_removed_from_group(node, group, changeset)
_mark_dirtykey('nodes', node, self.tenant)
for node in nodes:
if node not in self._cfgstore['nodes']:
self._cfgstore['nodes'][node] = {'groups': [group]}
_mark_dirtykey('nodes', node, self.tenant)
elif group not in self._cfgstore['nodes'][node]['groups']:
self._cfgstore['nodes'][node]['groups'].insert(0, group)
_mark_dirtykey('nodes', node, self.tenant)
else:
continue # next node, this node already in
self._node_added_to_group(node, group, changeset)
def add_group_attributes(self, attribmap):
self.set_group_attributes(attribmap, autocreate=True)
def set_group_attributes(self, attribmap, autocreate=False):
for group in attribmap:
curr = attribmap[group]
for attrib in curr:
if attrib.startswith('crypted.'):
if not isinstance(curr[attrib], dict):
curr[attrib] = {'value': curr[attrib]}
if 'hashvalue' not in curr[attrib] and curr[attrib].get('value', None):
curr[attrib]['hashvalue'] = hashcrypt_value(
curr[attrib]['value'])
if 'grubhashvalue' not in curr[attrib]:
curr[attrib]['grubhashvalue'] = grub_hashcrypt_value(
curr[attrib]['value'])
if 'value' in curr[attrib]:
del curr[attrib]['value']
if cfgleader: # currently config slave to another
return exec_on_leader('_rpc_master_set_group_attributes',
self.tenant, attribmap, autocreate)
if cfgstreams:
exec_on_followers('_rpc_set_group_attributes', self.tenant,
attribmap, autocreate)
self._true_set_group_attributes(attribmap, autocreate)
def _true_set_group_attributes(self, attribmap, autocreate=False):
changeset = {}
for group in attribmap:
if group == '':
raise ValueError('"{0}" is not a valid group name'.format(
group))
if autocreate:
try:
noderange._parser.parseString(
'({0})'.format(group)).asList()
except noderange.pp.ParseException as pe:
raise ValueError('"{0}" is not a valid group name'.format(
group))
if not autocreate and group not in self._cfgstore['nodegroups']:
raise ValueError("{0} group does not exist".format(group))
for attr in attribmap[group]:
# first do a pass to normalize out any aliased attribute names
if attr in _attraliases:
newattr = _attraliases[attr]
attribmap[group][newattr] = attribmap[group][attr]
del attribmap[group][attr]
if 'noderange' in attribmap[group]:
if len(attribmap[group]) > 1:
raise ValueError('noderange attribute must be set by itself')
for attr in attribmap[group]:
if attr in _attraliases:
newattr = _attraliases[attr]
attribmap[group][newattr] = attribmap[group][attr]
del attribmap[group][attr]
if attr not in ('nodes', 'noderange'):
attrval = fixup_attribute(attr, attribmap[group][attr])
if attribute_is_invalid(attr, attrval):
errstr = "{0} attribute is invalid".format(attr)
raise ValueError(errstr)
attribmap[group][attr] = attrval
if attr == 'nodes':
if isinstance(attribmap[group][attr], dict):
currnodes = list(self.get_nodegroup_attributes(
group, ['nodes']).get('nodes', []))
if attribmap[group][attr].get('prepend', False):
newnodes = noderange.NodeRange(attribmap[group][attr][
'prepend'], config=self).nodes
attribmap[group][attr] = list(
newnodes) + currnodes
elif attribmap[group][attr].get('remove', False):
delnodes = noderange.NodeRange(
attribmap[group][attr]['remove'],
config=self).nodes
attribmap[group][attr] = [
x for x in currnodes if x not in delnodes]
if not isinstance(attribmap[group][attr], list):
if type(attribmap[group][attr]) is unicode or type(attribmap[group][attr]) is str:
attribmap[group][attr] = noderange.NodeRange(
attribmap[group][attr], config=self).nodes
else:
raise ValueError("nodes attribute on group must be list")
for node in attribmap[group]['nodes']:
if node not in self._cfgstore['nodes']:
raise ValueError(
"{0} node does not exist to add to {1}".format(
node, group))
for group in attribmap:
group = confluent.util.stringify(group)
if group not in self._cfgstore['nodegroups']:
self._cfgstore['nodegroups'][group] = {'nodes': set()}
cfgobj = self._cfgstore['nodegroups'][group]
if 'noderange' in attribmap[group] and attribmap[group]['noderange']:
if cfgobj['nodes']:
raise ValueError('Cannot set both nodes and noderange on group')
if set(cfgobj) - set(['noderange', 'nodes']):
raise ValueError('Cannot set noderange on a group with attributes')
elif 'noderange' in cfgobj and cfgobj['noderange']:
raise ValueError('Attributes cannot be set on a group with a noderange')
for attr in attribmap[group]:
if attr == 'nodes':
newdict = set(attribmap[group][attr])
elif (isinstance(attribmap[group][attr], str) or
isinstance(attribmap[group][attr], unicode) or
isinstance(attribmap[group][attr], bool)):
newdict = {'value': attribmap[group][attr]}
else:
newdict = attribmap[group][attr]
if 'value' in newdict and attr.startswith("secret."):
newdict['cryptvalue'] = crypt_value(newdict['value'])
del newdict['value']
if 'value' in newdict and attr.startswith("crypted."):
newdict['hashvalue'] = hashcrypt_value(newdict['value'])
del newdict['value']
cfgobj[attr] = newdict
if attr == 'nodes':
self._sync_nodes_to_group(group=group,
nodes=attribmap[group]['nodes'],
changeset=changeset)
elif attr != 'noderange': # update inheritence
for node in cfgobj['nodes']:
nodecfg = self._cfgstore['nodes'][node]
self._do_inheritance(nodecfg, attr, node, changeset,
srcgroup=group)
_addchange(changeset, node, attr)
_mark_dirtykey('nodegroups', group, self.tenant)
self._notif_attribwatchers(changeset)
self._bg_sync_to_file()
def clear_group_attributes(self, groups, attributes):
if cfgleader:
return exec_on_leader('_rpc_master_clear_group_attributes',
self.tenant, groups, attributes)
if cfgstreams:
exec_on_followers('_rpc_clear_group_attributes', self.tenant,
groups, attributes)
self._true_clear_group_attributes(groups, attributes)
def _true_clear_group_attributes(self, groups, attributes):
changeset = {}
realattributes = []
for attrname in list(attributes):
if attrname in _attraliases:
realattributes.append(_attraliases[attrname])
else:
realattributes.append(attrname)
attributes = realattributes
if type(groups) in (str, unicode):
groups = (groups,)
for group in groups:
group = confluent.util.stringify(group)
try:
groupentry = self._cfgstore['nodegroups'][group]
except KeyError:
continue
for attrib in attributes:
if attrib == 'nodes':
groupentry['nodes'] = set()
self._sync_nodes_to_group(
group=group, nodes=(), changeset=changeset)
else:
try:
del groupentry[attrib]
except KeyError:
pass
for node in groupentry['nodes']:
nodecfg = self._cfgstore['nodes'][node]
try:
delnodeattrib = (
nodecfg[attrib]['inheritedfrom'] == group)
except KeyError:
delnodeattrib = False
if delnodeattrib:
del nodecfg[attrib]
self._do_inheritance(nodecfg, attrib, node,
changeset)
_addchange(changeset, node, attrib)
_mark_dirtykey('nodes', node, self.tenant)
_mark_dirtykey('nodegroups', group, self.tenant)
self._notif_attribwatchers(changeset)
self._bg_sync_to_file()
def _refresh_nodecfg(self, cfgobj, attrname, node, changeset):
exprmgr = None
if 'expression' in cfgobj[attrname]: # evaluate now
if exprmgr is None:
exprmgr = _ExpressionFormat(cfgobj, node)
cfgobj[attrname] = _decode_attribute(attrname, cfgobj,
formatter=exprmgr)
if ('_expressionkeys' in cfgobj and
attrname in cfgobj['_expressionkeys']):
if exprmgr is None:
exprmgr = _ExpressionFormat(cfgobj, node)
self._recalculate_expressions(cfgobj, formatter=exprmgr, node=node,
changeset=changeset)
def _notif_attribwatchers(self, nodeattrs):
if self.tenant not in self._attribwatchers:
return
notifdata = {}
attribwatchers = self._attribwatchers[self.tenant]
for node in nodeattrs:
if node not in attribwatchers:
continue
attribwatcher = attribwatchers[node]
# usually, we will only look at the specific attribute keys that
# have had change flagged, so set up to iterate through only those
checkattrs = nodeattrs[node]
if '_nodedeleted' in nodeattrs[node]:
# in the case of a deleted node, we want to iterate through
# *all* attributes that the node might have had set prior
# to deletion, to make all watchers aware of the removed
# node and take appropriate action
checkattrs = attribwatcher
globattrs = {}
for attrglob in attribwatcher.get('_attrglobs', []):
for matched in fnmatch.filter(list(checkattrs), attrglob):
globattrs[matched] = attrglob
for attrname in checkattrs:
if attrname == '_attrglobs':
continue
watchkey = attrname
# the attrib watcher could still have a glob
if attrname not in attribwatcher:
if attrname in globattrs:
watchkey = globattrs[attrname]
else:
continue
for notifierid in attribwatcher[watchkey]:
if notifierid in notifdata:
if node in notifdata[notifierid]['nodeattrs']:
notifdata[notifierid]['nodeattrs'][node].append(
attrname)
else:
notifdata[notifierid]['nodeattrs'][node] = [
attrname]
else:
notifdata[notifierid] = {
'nodeattrs': {node: [attrname]},
'callback': attribwatcher[watchkey][notifierid]
}
for watcher in notifdata:
watcher = notifdata[watcher]
callback = watcher['callback']
eventlet.spawn_n(_do_notifier, self, watcher, callback)
def del_nodes(self, nodes):
if isinstance(nodes, set):
nodes = list(nodes) # msgpack can't handle set
if cfgleader: # slaved to a collective
return exec_on_leader('_rpc_master_del_nodes', self.tenant,
nodes)
if cfgstreams:
exec_on_followers('_rpc_del_nodes', self.tenant, nodes)
self._true_del_nodes(nodes)
def _true_del_nodes(self, nodes):
if self.tenant in self._nodecollwatchers:
for watcher in self._nodecollwatchers[self.tenant]:
watcher = self._nodecollwatchers[self.tenant][watcher]
watcher(added=(), deleting=nodes, renamed=(), configmanager=self)
changeset = {}
for node in nodes:
# set a reserved attribute for the sake of the change notification
# framework to trigger on
changeset[node] = {'_nodedeleted': 1}
node = confluent.util.stringify(node)
if node in self._cfgstore['nodes']:
self._sync_groups_to_node(node=node, groups=[],
changeset=changeset)
del self._cfgstore['nodes'][node]
_mark_dirtykey('nodes', node, self.tenant)
self._notif_attribwatchers(changeset)
self._bg_sync_to_file()
def del_groups(self, groups):
if cfgleader:
return exec_on_leader('_rpc_master_del_groups', self.tenant,
groups)
if cfgstreams:
exec_on_followers('_rpc_del_groups', self.tenant, groups)
self._true_del_groups(groups)
def _true_del_groups(self, groups):
changeset = {}
for group in groups:
if group in self._cfgstore['nodegroups']:
self._sync_nodes_to_group(group=group, nodes=[],
changeset=changeset)
del self._cfgstore['nodegroups'][group]
_mark_dirtykey('nodegroups', group, self.tenant)
self._notif_attribwatchers(changeset)
self._bg_sync_to_file()
def clear_node_attributes(self, nodes, attributes):
if cfgleader:
return exec_on_leader('_rpc_master_clear_node_attributes',
self.tenant, nodes, attributes)
if cfgstreams:
exec_on_followers('_rpc_clear_node_attributes', self.tenant,
nodes, attributes)
self._true_clear_node_attributes(nodes, attributes)
def _true_clear_node_attributes(self, nodes, attributes):
# accumulate all changes into a changeset and push in one go
changeset = {}
realattributes = []
for attrname in list(attributes):
if attrname in _attraliases:
realattributes.append(_attraliases[attrname])
else:
realattributes.append(attrname)
attributes = realattributes
for node in nodes:
node = confluent.util.stringify(node)
try:
nodek = self._cfgstore['nodes'][node]
except KeyError:
continue
recalcexpressions = False
for attrib in attributes:
if attrib in nodek and 'inheritedfrom' not in nodek[attrib]:
# if the attribute is set and not inherited,
# delete it and check for inheritence to backfil data
del nodek[attrib]
self._do_inheritance(nodek, attrib, node, changeset)
_addchange(changeset, node, attrib)
_mark_dirtykey('nodes', node, self.tenant)
if ('_expressionkeys' in nodek and
attrib in nodek['_expressionkeys']):
recalcexpressions = True
if recalcexpressions:
exprmgr = _ExpressionFormat(nodek, node)
self._recalculate_expressions(nodek, formatter=exprmgr,
node=node, changeset=changeset)
self._notif_attribwatchers(changeset)
self._bg_sync_to_file()
def add_node_attributes(self, attribmap):
for node in attribmap:
if 'groups' not in attribmap[node]:
attribmap[node]['groups'] = []
self.set_node_attributes(attribmap, autocreate=True)
def rename_nodes(self, renamemap):
if cfgleader:
return exec_on_leader('_rpc_master_rename_nodes', self.tenant,
renamemap)
if cfgstreams:
exec_on_followers('_rpc_rename_nodes', self.tenant, renamemap)
self._true_rename_nodes(renamemap)
def _true_rename_nodes(self, renamemap):
oldnames = set(renamemap)
exprmgr = None
currnodes = set(self._cfgstore['nodes'])
missingnodes = oldnames - currnodes
if missingnodes:
raise ValueError(
'The following nodes to rename do not exist: {0}'.format(
','.join(missingnodes)))
newnames = set([])
for name in renamemap:
newnames.add(renamemap[name])
if newnames & currnodes:
raise ValueError(
'The following requested new names conflict with existing nodes: {0}'.format(
','.join(newnames & currnodes)))
for name in renamemap:
self._cfgstore['nodes'][renamemap[name]] = self._cfgstore['nodes'][name]
del self._cfgstore['nodes'][name]
_mark_dirtykey('nodes', name, self.tenant)
_mark_dirtykey('nodes', renamemap[name], self.tenant)
for group in self._cfgstore['nodes'][renamemap[name]].get('groups', []):
self._cfgstore['nodegroups'][group]['nodes'].discard(name)
self._cfgstore['nodegroups'][group]['nodes'].add(renamemap[name])
_mark_dirtykey('nodegroups', group, self.tenant)
cfgobj = self._cfgstore['nodes'][renamemap[name]]
node = renamemap[name]
changeset = {}
if exprmgr is None:
exprmgr = _ExpressionFormat(cfgobj, node)
self._recalculate_expressions(cfgobj, formatter=exprmgr, node=renamemap[name], changeset=changeset)
if self.tenant in self._nodecollwatchers:
nodecollwatchers = self._nodecollwatchers[self.tenant]
for watcher in nodecollwatchers:
watcher = nodecollwatchers[watcher]
eventlet.spawn_n(_do_add_watcher, watcher, (), self, renamemap)
self._bg_sync_to_file()
def rename_nodegroups(self, renamemap):
if cfgleader:
return exec_on_leader('_rpc_master_rename_nodegroups', self.tenant, renamemap)
if cfgstreams:
exec_on_followers('_rpc_rename_nodegroups', self.tenant, renamemap)
self._true_rename_groups(renamemap)
def _true_rename_groups(self, renamemap):
oldnames = set(renamemap)
currgroups = set(self._cfgstore['nodegroups'])
missinggroups = oldnames - currgroups
if missinggroups:
raise ValueError(
'The following groups to rename do not exist: {0}'.format(
','.join(missinggroups)))
newnames = set([])
for name in renamemap:
newnames.add(renamemap[name])
if newnames & currgroups:
raise ValueError(
'The following requested new names conflict with existing groups: {0}'.format(
','.join(newnames & currgroups)))
for name in renamemap:
self._cfgstore['nodegroups'][renamemap[name]] = self._cfgstore['nodegroups'][name]
del self._cfgstore['nodegroups'][name]
_mark_dirtykey('nodegroups', name, self.tenant)
_mark_dirtykey('nodegroups', renamemap[name], self.tenant)
for node in self._cfgstore['nodegroups'][renamemap[name]].get('nodes', []):
lidx = self._cfgstore['nodes'][node]['groups'].index(name)
self._cfgstore['nodes'][node]['groups'][lidx] = renamemap[name]
_mark_dirtykey('nodes', node, self.tenant)
self._bg_sync_to_file()
def set_node_attributes(self, attribmap, autocreate=False):
for node in attribmap:
curr = attribmap[node]
for attrib in curr:
if attrib.startswith('crypted.'):
if not isinstance(curr[attrib], dict):
curr[attrib] = {'value': curr[attrib]}
if 'hashvalue' not in curr[attrib] and curr[attrib].get('value', None):
curr[attrib]['hashvalue'] = hashcrypt_value(
curr[attrib]['value'])
if 'grubhashvalue' not in curr[attrib]:
curr[attrib]['grubhashvalue'] = grub_hashcrypt_value(
curr[attrib]['value'])
if 'value' in curr[attrib]:
del curr[attrib]['value']
if cfgleader: # currently config slave to another
return exec_on_leader('_rpc_master_set_node_attributes',
self.tenant, attribmap, autocreate)
if cfgstreams:
exec_on_followers('_rpc_set_node_attributes',
self.tenant, attribmap, autocreate)
self._true_set_node_attributes(attribmap, autocreate)
def _true_set_node_attributes(self, attribmap, autocreate):
# TODO(jbjohnso): multi mgr support, here if we have peers,
# pickle the arguments and fire them off in eventlet
# flows to peers, all should have the same result
newnodes = []
changeset = {}
# first do a sanity check of the input upfront
# this mitigates risk of arguments being partially applied
for node in attribmap:
node = confluent.util.stringify(node)
if node == '':
raise ValueError('"{0}" is not a valid node name'.format(node))
if autocreate:
try:
noderange._parser.parseString(
'({0})'.format(node)).asList()
except noderange.pp.ParseException as pe:
raise ValueError(
'"{0}" is not a valid node name'.format(node))
if autocreate is False and node not in self._cfgstore['nodes']:
raise ValueError("node {0} does not exist".format(node))
for attrname in list(attribmap[node]):
if attrname in _attraliases:
truename = _attraliases[attrname]
attribmap[node][truename] = attribmap[node][attrname]
del attribmap[node][attrname]
for attrname in attribmap[node]:
attrval = attribmap[node][attrname]
try:
if (allattributes.node[attrname]['type'] == 'list' and
type(attrval) in (str, unicode)):
attrval = attrval.split(",")
except KeyError:
pass
if attrname == 'groups':
if isinstance(attribmap[node]['groups'], dict):
currgroups = self.get_node_attributes(
node, 'groups').get(node, {}).get('groups', [])
if attribmap[node]['groups'].get('prepend', False):
newgroups = attribmap[node]['groups'][
'prepend'].split(',')
attribmap[node]['groups'] = newgroups + currgroups
elif attribmap[node]['groups'].get('remove', False):
delgroups = attribmap[node]['groups'][
'remove'].split(',')
newgroups = [
x for x in currgroups if x not in delgroups]
attribmap[node]['groups'] = newgroups
elif type(attribmap[node]['groups']) != list:
attribmap[node]['groups']=attribmap[node]['groups'].split(",")
for group in attribmap[node]['groups']:
if group not in self._cfgstore['nodegroups']:
raise ValueError(
"group {0} does not exist".format(group))
if ('everything' in self._cfgstore['nodegroups'] and
'everything' not in attribmap[node]['groups']):
attribmap[node]['groups'].append('everything')
else:
attrval = fixup_attribute(attrname, attrval)
if attribute_is_invalid(attrname, attrval):
errstr = "{0} attribute on node {1} is invalid".format(
attrname, node)
raise ValueError(errstr)
attribmap[node][attrname] = attrval
for node in attribmap:
node = confluent.util.stringify(node)
exprmgr = None
if node not in self._cfgstore['nodes']:
newnodes.append(node)
self._cfgstore['nodes'][node] = {}
cfgobj = self._cfgstore['nodes'][node]
recalcexpressions = False
for attrname in attribmap[node]:
if (isinstance(attribmap[node][attrname], str) or
isinstance(attribmap[node][attrname], unicode) or
isinstance(attribmap[node][attrname], bool)):
newdict = {'value': attribmap[node][attrname]}
else:
newdict = attribmap[node][attrname]
# add check here, skip None attributes
if newdict is None:
continue
if 'value' in newdict and attrname.startswith("secret."):
newdict['cryptvalue'] = crypt_value(newdict['value'])
del newdict['value']
if 'value' in newdict and attrname.startswith("crypted."):
newdict['hashvalue'] = hashcrypt_value(newdict['value'])
newdict['grubhashvalue'] = grub_hashcrypt_value(
newdict['value'])
del newdict['value']
cfgobj[attrname] = newdict
if attrname == 'groups':
self._sync_groups_to_node(node=node,
groups=attribmap[node]['groups'],
changeset=changeset)
if ('_expressionkeys' in cfgobj and
attrname in cfgobj['_expressionkeys']):
recalcexpressions = True
if 'expression' in cfgobj[attrname]: # evaluate now
if exprmgr is None:
exprmgr = _ExpressionFormat(cfgobj, node)
cfgobj[attrname] = _decode_attribute(attrname, cfgobj,
formatter=exprmgr)
# if any code is watching these attributes, notify
# them of the change
_addchange(changeset, node, attrname)
_mark_dirtykey('nodes', node, self.tenant)
if recalcexpressions:
if exprmgr is None:
exprmgr = _ExpressionFormat(cfgobj, node)
self._recalculate_expressions(cfgobj, formatter=exprmgr,
node=node, changeset=changeset)
self._notif_attribwatchers(changeset)
if newnodes:
if self.tenant in self._nodecollwatchers:
nodecollwatchers = self._nodecollwatchers[self.tenant]
for watcher in nodecollwatchers:
watcher = nodecollwatchers[watcher]
eventlet.spawn_n(_do_add_watcher, watcher, newnodes, self)
self._bg_sync_to_file()
#TODO: wait for synchronization to suceed/fail??)
def _load_from_json(self, jsondata, sync=True):
"""Load fresh configuration data from jsondata
:param jsondata: String of jsondata
:return:
"""
dumpdata = json.loads(jsondata)
tmpconfig = {}
for confarea in _config_areas:
if confarea not in dumpdata:
continue
tmpconfig[confarea] = {}
for element in dumpdata[confarea]:
newelement = copy.deepcopy(dumpdata[confarea][element])
try:
noderange._parser.parseString(
'({0})'.format(element)).asList()
except noderange.pp.ParseException as pe:
raise ValueError(
'"{0}" is not a supported name, it must be renamed or '
'removed from backup to restore'.format(element))
for attribute in dumpdata[confarea][element]:
if newelement[attribute] == '*REDACTED*':
raise Exception(
"Unable to restore from redacted backup")
elif attribute == 'cryptpass':
passparts = newelement[attribute].split('!')
newelement[attribute] = tuple([base64.b64decode(x)
for x in passparts])
elif 'cryptvalue' in newelement[attribute]:
bincrypt = newelement[attribute]['cryptvalue']
bincrypt = tuple([base64.b64decode(x)
for x in bincrypt.split('!')])
newelement[attribute]['cryptvalue'] = bincrypt
elif attribute in ('nodes', '_expressionkeys'):
# A group with nodes
# delete it and defer until nodes are being added
# which will implicitly fill this up
# Or _expressionkeys attribute, which will similarly
# be rebuilt
del newelement[attribute]
tmpconfig[confarea][element] = newelement
# We made it through above section without an exception, go ahead and
# replace
# Start by erasing the dbm files if present
for confarea in _config_areas:
try:
os.unlink(os.path.join(self._cfgdir, confarea))
except OSError as e:
if e.errno == 2:
pass
# Now we have to iterate through each fixed up element, using the
# set attribute to flesh out inheritence and expressions
_cfgstore['main']['idmap'] = {}
for confarea in _config_areas:
self._cfgstore[confarea] = {}
if confarea not in tmpconfig:
continue
if confarea == 'nodes':
self.set_node_attributes(tmpconfig[confarea], True)
elif confarea == 'nodegroups':
self.set_group_attributes(tmpconfig[confarea], True)
elif confarea == 'usergroups':
for usergroup in tmpconfig[confarea]:
self.create_usergroup(usergroup)
elif confarea == 'users':
for user in tmpconfig[confarea]:
uid = tmpconfig[confarea].get('id', None)
displayname = tmpconfig[confarea].get('displayname', None)
self.create_user(user, uid=uid, displayname=displayname)
if 'cryptpass' in tmpconfig[confarea][user]:
self._cfgstore['users'][user]['cryptpass'] = \
tmpconfig[confarea][user]['cryptpass']
_mark_dirtykey('users', user, self.tenant)
if sync:
self._bg_sync_to_file()
def _dump_to_json(self, redact=None):
"""Dump the configuration in json form to output
password is used to protect the 'secret' attributes in liue of the
actual in-configuration master key (which will have no clear form
in the dump
:param redact: If True, then sensitive password data will be redacted.
Other values may be used one day to redact in more
complex and interesting ways for non-secret
data.
"""
dumpdata = {}
for confarea in _config_areas:
if confarea not in self._cfgstore:
continue
dumpdata[confarea] = {}
for element in self._cfgstore[confarea]:
dumpdata[confarea][element] = \
copy.deepcopy(self._cfgstore[confarea][element])
for attribute in self._cfgstore[confarea][element]:
if 'inheritedfrom' in dumpdata[confarea][element][attribute]:
del dumpdata[confarea][element][attribute]
elif (attribute == 'cryptpass' or
'cryptvalue' in
dumpdata[confarea][element][attribute]):
if redact is not None:
dumpdata[confarea][element][attribute] = '*REDACTED*'
else:
if attribute == 'cryptpass':
target = dumpdata[confarea][element][attribute]
else:
target = dumpdata[confarea][element][attribute]['cryptvalue']
cryptval = []
for value in target:
cryptval.append(confluent.util.stringify(base64.b64encode(value)))
if attribute == 'cryptpass':
dumpdata[confarea][element][attribute] = '!'.join(cryptval)
else:
dumpdata[confarea][element][attribute]['cryptvalue'] = '!'.join(cryptval)
elif isinstance(dumpdata[confarea][element][attribute], set):
dumpdata[confarea][element][attribute] = \
list(dumpdata[confarea][element][attribute])
return json.dumps(
dumpdata, sort_keys=True, indent=4, separators=(',', ': '))
@classmethod
def _read_from_path(cls):
global _cfgstore
global _txcount
_cfgstore = {}
rootpath = cls._cfgdir
try:
with open(os.path.join(rootpath, 'transactioncount'), 'rb') as f:
txbytes = f.read()
if len(txbytes) == 8:
_txcount = struct.unpack('!Q', txbytes)[0]
except IOError:
pass
_load_dict_from_dbm(['collective'], os.path.join(rootpath,
"collective"))
_load_dict_from_dbm(['globals'], os.path.join(rootpath, "globals"))
for confarea in _config_areas:
_load_dict_from_dbm(['main', confarea], os.path.join(rootpath, confarea))
try:
for tenant in os.listdir(os.path.join(rootpath, 'tenants')):
for confarea in _config_areas:
_load_dict_from_dbm(
['main', tenant, confarea],
os.path.join(rootpath, tenant, confarea))
except OSError:
pass
@classmethod
def wait_for_sync(cls, fullsync=False):
if cls._cfgwriter is not None:
cls._cfgwriter.join()
cls._bg_sync_to_file(fullsync)
if cls._cfgwriter is not None:
cls._cfgwriter.join()
@classmethod
def shutdown(cls):
cls.wait_for_sync()
sys.exit(0)
@classmethod
def _bg_sync_to_file(cls, fullsync=False):
if statelessmode:
return
with cls._syncstate:
if (cls._syncrunning and cls._cfgwriter is not None and
cls._cfgwriter.isAlive()):
cls._writepending = True
return
if cls._syncrunning: # This suggests an unclean write attempt,
# do a fullsync as a recovery
fullsync = True
cls._syncrunning = True
# if the thread is exiting, join it to let it close, just in case
if cls._cfgwriter is not None:
cls._cfgwriter.join()
cls._cfgwriter = threading.Thread(target=cls._sync_to_file, args=(fullsync,))
cls._cfgwriter.start()
@classmethod
def _sync_to_file(cls, fullsync=False):
with _synclock:
if statelessmode:
return
_mkpath(cls._cfgdir)
with open(os.path.join(cls._cfgdir, 'transactioncount'), 'wb') as f:
f.write(struct.pack('!Q', _txcount))
if (fullsync or 'dirtyglobals' in _cfgstore and
'globals' in _cfgstore):
if fullsync: # globals is not a given to be set..
dirtyglobals = _cfgstore['globals']
else:
with _dirtylock:
dirtyglobals = copy.deepcopy(_cfgstore['dirtyglobals'])
del _cfgstore['dirtyglobals']
globalf = dbm.open(os.path.join(cls._cfgdir, "globals"), 'c', 384) # 0600
try:
for globalkey in dirtyglobals:
if globalkey in _cfgstore['globals']:
globalf[globalkey] = \
cPickle.dumps(_cfgstore['globals'][globalkey], protocol=cPickle.HIGHEST_PROTOCOL)
else:
if globalkey in globalf:
del globalf[globalkey]
finally:
globalf.close()
if fullsync or 'collectivedirty' in _cfgstore:
if len(_cfgstore.get('collective', ())) > 1:
collectivef = dbm.open(os.path.join(cls._cfgdir, "collective"),
'c', 384)
try:
if fullsync:
colls = _cfgstore['collective']
else:
with _dirtylock:
colls = copy.deepcopy(_cfgstore['collectivedirty'])
del _cfgstore['collectivedirty']
for coll in colls:
if coll in _cfgstore['collective']:
collectivef[coll] = cPickle.dumps(
_cfgstore['collective'][coll], protocol=cPickle.HIGHEST_PROTOCOL)
else:
if coll in collectivef:
del collectivef[coll]
finally:
collectivef.close()
else:
try:
os.remove(os.path.join(cls._cfgdir, "collective"))
except OSError:
pass
if fullsync:
pathname = cls._cfgdir
currdict = _cfgstore['main']
for category in currdict:
_mkpath(pathname)
dbf = dbm.open(os.path.join(pathname, category), 'c', 384) # 0600
try:
for ck in currdict[category]:
dbf[ck] = cPickle.dumps(currdict[category][ck], protocol=cPickle.HIGHEST_PROTOCOL)
finally:
dbf.close()
elif 'dirtykeys' in _cfgstore:
with _dirtylock:
currdirt = copy.deepcopy(_cfgstore['dirtykeys'])
del _cfgstore['dirtykeys']
for tenant in currdirt:
dkdict = currdirt[tenant]
if tenant is None:
pathname = cls._cfgdir
currdict = _cfgstore['main']
else:
pathname = os.path.join(cls._cfgdir, 'tenants', tenant)
currdict = _cfgstore['tenant'][tenant]
for category in dkdict:
_mkpath(pathname)
dbf = dbm.open(os.path.join(pathname, category), 'c', 384) # 0600
try:
for ck in dkdict[category]:
if ck not in currdict[category]:
if ck in dbf:
del dbf[ck]
else:
dbf[ck] = cPickle.dumps(currdict[category][ck], protocol=cPickle.HIGHEST_PROTOCOL)
finally:
dbf.close()
willrun = False
with cls._syncstate:
if cls._writepending:
cls._writepending = False
willrun = True
else:
cls._syncrunning = False
if willrun:
return cls._sync_to_file()
def _recalculate_expressions(self, cfgobj, formatter, node, changeset):
for key in cfgobj:
if not isinstance(cfgobj[key], dict):
continue
if 'expression' in cfgobj[key]:
cfgobj[key] = _decode_attribute(key, cfgobj,
formatter=formatter)
_addchange(changeset, node, key)
elif ('cryptvalue' not in cfgobj[key] and
'value' not in cfgobj[key]):
# recurse for nested structures, with some hint that
# it might indeed be a nested structure
self._recalculate_expressions(cfgobj[key], formatter, node,
changeset)
def _restore_keys(jsond, password, newpassword=None, sync=True):
# the jsond from the restored file, password (if any) used to protect
# the file, and newpassword to use, (also check the service.cfg file)
global _masterkey
global _masterintegritykey
if isinstance(jsond, dict):
keydata = jsond
else:
keydata = json.loads(jsond)
cryptkey = _parse_key(keydata['cryptkey'], password)
integritykey = None
if 'integritykey' in keydata:
integritykey = _parse_key(keydata['integritykey'], password)
conf.init_config()
cfg = conf.get_config()
if cfg.has_option('security', 'externalcfgkey'):
keyfilename = cfg.get('security', 'externalcfgkey')
with open(keyfilename, 'r') as keyfile:
newpassword = keyfile.read()
set_global('master_privacy_key', _format_key(cryptkey,
password=newpassword), sync)
if integritykey:
set_global('master_integrity_key', _format_key(integritykey,
password=newpassword), sync)
_masterkey = cryptkey
_masterintegritykey = integritykey
if sync:
ConfigManager.wait_for_sync()
def _dump_keys(password, dojson=True):
if _masterkey is None:
init_masterkey()
cryptkey = _format_key(_masterkey, password=password)
if 'passphraseprotected' in cryptkey:
cryptkey = '!'.join(
[confluent.util.stringify(base64.b64encode(x))
for x in cryptkey['passphraseprotected']])
else:
cryptkey = '*unencrypted:{0}'.format(confluent.util.stringify(base64.b64encode(
cryptkey['unencryptedvalue'])))
keydata = {'cryptkey': cryptkey}
if _masterintegritykey is not None:
integritykey = _format_key(_masterintegritykey, password=password)
if 'passphraseprotected' in integritykey:
integritykey = '!'.join([confluent.util.stringify(base64.b64encode(x)) for x in
integritykey['passphraseprotected']])
else:
integritykey = '*unencrypted:{0}'.format(confluent.util.stringify(base64.b64encode(
integritykey['unencryptedvalue'])))
keydata['integritykey'] = integritykey
if dojson:
return json.dumps(keydata, sort_keys=True, indent=4, separators=(',', ': '))
return keydata
def restore_db_from_directory(location, password):
try:
with open(os.path.join(location, 'keys.json'), 'r') as cfgfile:
keydata = cfgfile.read()
json.loads(keydata)
_restore_keys(keydata, password)
except IOError as e:
if e.errno == 2:
raise Exception("Cannot restore without keys, this may be a "
"redacted dump")
try:
moreglobals = json.load(open(os.path.join(location, 'globals.json')))
for globvar in moreglobals:
set_global(globvar, moreglobals[globvar])
except IOError as e:
if e.errno != 2:
raise
try:
collective = json.load(open(os.path.join(location, 'collective.json')))
_cfgstore['collective'] = {}
for coll in collective:
add_collective_member(coll, collective[coll]['address'],
collective[coll]['fingerprint'])
except IOError as e:
if e.errno != 2:
raise
with open(os.path.join(location, 'main.json'), 'r') as cfgfile:
cfgdata = cfgfile.read()
ConfigManager(tenant=None)._load_from_json(cfgdata)
ConfigManager.wait_for_sync(True)
def dump_db_to_directory(location, password, redact=None, skipkeys=False):
if not redact and not skipkeys:
with open(os.path.join(location, 'keys.json'), 'w') as cfgfile:
cfgfile.write(_dump_keys(password))
cfgfile.write('\n')
with open(os.path.join(location, 'main.json'), 'w') as cfgfile:
cfgfile.write(ConfigManager(tenant=None)._dump_to_json(redact=redact))
cfgfile.write('\n')
if 'collective' in _cfgstore:
with open(os.path.join(location, 'collective.json'), 'w') as cfgfile:
cfgfile.write(json.dumps(_cfgstore['collective']))
cfgfile.write('\n')
bkupglobals = get_globals()
if bkupglobals:
json.dump(bkupglobals, open(os.path.join(location, 'globals.json'),
'w'))
try:
for tenant in os.listdir(
os.path.join(ConfigManager._cfgdir, '/tenants/')):
with open(os.path.join(location, 'tenants', tenant,
'main.json'), 'w') as cfgfile:
cfgfile.write(ConfigManager(tenant=tenant)._dump_to_json(
redact=redact))
cfgfile.write('\n')
except OSError:
pass
def get_globals():
bkupglobals = {}
for globvar in _cfgstore['globals']:
if globvar.endswith('_key'):
continue
bkupglobals[globvar] = _cfgstore['globals'][globvar]
return bkupglobals
def init(stateless=False):
global _cfgstore
if stateless:
_cfgstore = {}
return
try:
ConfigManager._read_from_path()
except IOError:
_cfgstore = {}
# some unit tests worth implementing:
# set group attribute on lower priority group, result is that node should not
# change
# after that point, then unset on the higher priority group, lower priority
# group should get it then
# rinse and repeat for set on node versus set on group
# clear group attribute and assure than it becomes unset on all nodes
# set various expressions
|
tello_control_ui.py
|
from PIL import Image
from PIL import ImageTk
import tkinter as tki
from tkinter import Toplevel, Scale
import threading
import datetime
import cv2
import os
import time
import platform
class TelloUI:
"""Wrapper class to enable the GUI."""
def __init__(self,tello,outputpath):
"""
Initial all the element of the GUI,support by Tkinter
:param tello: class interacts with the Tello drone.
Raises:
RuntimeError: If the Tello rejects the attempt to enter command mode.
"""
self.tello = tello # videostream device
self.outputPath = outputpath # the path that save pictures created by clicking the takeSnapshot button
self.frame = None # frame read from h264decoder and used for pose recognition
self.thread = None # thread of the Tkinter mainloop
self.stopEvent = None
# control variables
self.distance = 0.1 # default distance for 'move' cmd
self.degree = 30 # default degree for 'cw' or 'ccw' cmd
# if the flag is TRUE,the auto-takeoff thread will stop waiting for the response from tello
self.quit_waiting_flag = False
# initialize the root window and image panel
self.root = tki.Tk()
self.panel = None
# create buttons
self.btn_snapshot = tki.Button(self.root, text="Snapshot!",
command=self.takeSnapshot)
self.btn_snapshot.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_pause = tki.Button(self.root, text="Pause", relief="raised", command=self.pauseVideo)
self.btn_pause.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_landing = tki.Button(
self.root, text="Open Command Panel", relief="raised", command=self.openCmdWindow)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
# start a thread that constantly pools the video sensor for
# the most recently read frame
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.videoLoop, args=())
self.thread.start()
# set a callback to handle when the window is closed
self.root.wm_title("TELLO Controller")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
# the sending_command will send command to tello every 5 seconds
self.sending_command_thread = threading.Thread(target = self._sendingCommand)
def videoLoop(self):
"""
The mainloop thread of Tkinter
Raises:
RuntimeError: To get around a RunTime error that Tkinter throws due to threading.
"""
try:
# start the thread that get GUI image and drwa skeleton
time.sleep(0.5)
self.sending_command_thread.start()
while not self.stopEvent.is_set():
system = platform.system()
# read the frame for GUI show
self.frame = self.tello.read()
if self.frame is None or self.frame.size == 0:
continue
# transfer the format from frame to image
image = Image.fromarray(self.frame)
# we found compatibility problem between Tkinter,PIL and Macos,and it will
# sometimes result the very long preriod of the "ImageTk.PhotoImage" function,
# so for Macos,we start a new thread to execute the _updateGUIImage function.
if system =="Windows" or system =="Linux":
self._updateGUIImage(image)
else:
thread_tmp = threading.Thread(target=self._updateGUIImage,args=(image,))
thread_tmp.start()
time.sleep(0.03)
except RuntimeError as e:
print("[INFO] caught a RuntimeError")
def _updateGUIImage(self,image):
"""
Main operation to initial the object of image,and update the GUI panel
"""
image = ImageTk.PhotoImage(image)
# if the panel none ,we need to initial it
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
# otherwise, simply update the panel
else:
self.panel.configure(image=image)
self.panel.image = image
def _sendingCommand(self):
"""
start a while loop that sends 'command' to tello every 5 second
"""
while True:
self.tello.send_command('command')
time.sleep(5)
def _setQuitWaitingFlag(self):
"""
set the variable as TRUE,it will stop computer waiting for response from tello
"""
self.quit_waiting_flag = True
def openCmdWindow(self):
"""
open the cmd window and initial all the button and text
"""
panel = Toplevel(self.root)
panel.wm_title("Command Panel")
# create text input entry
text0 = tki.Label(panel,
text='This Controller map keyboard inputs to Tello control commands\n'
'Adjust the trackbar to reset distance and degree parameter',
font='Helvetica 10 bold'
)
text0.pack(side='top')
text1 = tki.Label(panel, text=
'W - Move Tello Up\t\t\tArrow Up - Move Tello Forward\n'
'S - Move Tello Down\t\t\tArrow Down - Move Tello Backward\n'
'A - Rotate Tello Counter-Clockwise\tArrow Left - Move Tello Left\n'
'D - Rotate Tello Clockwise\t\tArrow Right - Move Tello Right',
justify="left")
text1.pack(side="top")
self.btn_landing = tki.Button(
panel, text="Land", relief="raised", command=self.telloLanding)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_takeoff = tki.Button(
panel, text="Takeoff", relief="raised", command=self.telloTakeOff)
self.btn_takeoff.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
# binding arrow keys to drone control
self.tmp_f = tki.Frame(panel, width=100, height=2)
self.tmp_f.bind('<KeyPress-w>', self.on_keypress_w)
self.tmp_f.bind('<KeyPress-s>', self.on_keypress_s)
self.tmp_f.bind('<KeyPress-a>', self.on_keypress_a)
self.tmp_f.bind('<KeyPress-d>', self.on_keypress_d)
self.tmp_f.bind('<KeyPress-Up>', self.on_keypress_up)
self.tmp_f.bind('<KeyPress-Down>', self.on_keypress_down)
self.tmp_f.bind('<KeyPress-Left>', self.on_keypress_left)
self.tmp_f.bind('<KeyPress-Right>', self.on_keypress_right)
self.tmp_f.pack(side="bottom")
self.tmp_f.focus_set()
self.btn_landing = tki.Button(
panel, text="Flip", relief="raised", command=self.openFlipWindow)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.distance_bar = Scale(panel, from_=0.02, to=5, tickinterval=0.01, digits=3, label='Distance(m)',
resolution=0.01)
self.distance_bar.set(0.2)
self.distance_bar.pack(side="left")
self.btn_distance = tki.Button(panel, text="Reset Distance", relief="raised",
command=self.updateDistancebar,
)
self.btn_distance.pack(side="left", fill="both",
expand="yes", padx=10, pady=5)
self.degree_bar = Scale(panel, from_=1, to=360, tickinterval=10, label='Degree')
self.degree_bar.set(30)
self.degree_bar.pack(side="right")
self.btn_distance = tki.Button(panel, text="Reset Degree", relief="raised", command=self.updateDegreebar)
self.btn_distance.pack(side="right", fill="both",
expand="yes", padx=10, pady=5)
def openFlipWindow(self):
"""
open the flip window and initial all the button and text
"""
panel = Toplevel(self.root)
panel.wm_title("Gesture Recognition")
self.btn_flipl = tki.Button(
panel, text="Flip Left", relief="raised", command=self.telloFlip_l)
self.btn_flipl.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipr = tki.Button(
panel, text="Flip Right", relief="raised", command=self.telloFlip_r)
self.btn_flipr.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipf = tki.Button(
panel, text="Flip Forward", relief="raised", command=self.telloFlip_f)
self.btn_flipf.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipb = tki.Button(
panel, text="Flip Backward", relief="raised", command=self.telloFlip_b)
self.btn_flipb.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
def takeSnapshot(self):
"""
save the current frame of the video as a jpg file and put it into outputpath
"""
# grab the current timestamp and use it to construct the filename
ts = datetime.datetime.now()
filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S"))
p = os.path.sep.join((self.outputPath, filename))
# save the file
cv2.imwrite(p, cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR))
print("[INFO] saved {}".format(filename))
def pauseVideo(self):
"""
Toggle the freeze/unfreze of video
"""
if self.btn_pause.config('relief')[-1] == 'sunken':
self.btn_pause.config(relief="raised")
self.tello.video_freeze(False)
else:
self.btn_pause.config(relief="sunken")
self.tello.video_freeze(True)
def telloTakeOff(self):
return self.tello.takeoff()
def telloLanding(self):
return self.tello.land()
def telloFlip_l(self):
return self.tello.flip('l')
def telloFlip_r(self):
return self.tello.flip('r')
def telloFlip_f(self):
return self.tello.flip('f')
def telloFlip_b(self):
return self.tello.flip('b')
def telloCW(self, degree):
return self.tello.rotate_cw(degree)
def telloCCW(self, degree):
return self.tello.rotate_ccw(degree)
def telloMoveForward(self, distance):
return self.tello.move_forward(distance)
def telloMoveBackward(self, distance):
return self.tello.move_backward(distance)
def telloMoveLeft(self, distance):
return self.tello.move_left(distance)
def telloMoveRight(self, distance):
return self.tello.move_right(distance)
def telloUp(self, dist):
return self.tello.move_up(dist)
def telloDown(self, dist):
return self.tello.move_down(dist)
def updateTrackBar(self):
self.my_tello_hand.setThr(self.hand_thr_bar.get())
def updateDistancebar(self):
self.distance = self.distance_bar.get()
print ('reset distance to %.1f' % self.distance)
def updateDegreebar(self):
self.degree = self.degree_bar.get()
print ('reset distance to %d' % self.degree)
def on_keypress_w(self, event):
print ("up %d m" % self.distance)
self.telloUp(self.distance)
def on_keypress_s(self, event):
print ("down %d m" % self.distance)
self.telloDown(self.distance)
def on_keypress_a(self, event):
print ("ccw %d degree" % self.degree)
self.tello.rotate_ccw(self.degree)
def on_keypress_d(self, event):
print ("cw %d m" % self.degree)
self.tello.rotate_cw(self.degree)
def on_keypress_up(self, event):
print ("forward %d m" % self.distance)
self.telloMoveForward(self.distance)
def on_keypress_down(self, event):
print ("backward %d m" % self.distance)
self.telloMoveBackward(self.distance)
def on_keypress_left(self, event):
print ("left %d m" % self.distance)
self.telloMoveLeft(self.distance)
def on_keypress_right(self, event):
print ("right %d m" % self.distance)
self.telloMoveRight(self.distance)
def on_keypress_enter(self, event):
if self.frame is not None:
self.registerFace()
self.tmp_f.focus_set()
def onClose(self):
"""
set the stop event, cleanup the camera, and allow the rest of
the quit process to continue
"""
print("[INFO] closing...")
self.stopEvent.set()
del self.tello
self.root.quit()
|
process_controller.py
|
import multiprocessing
import logging
import time
from inspect import getfullargspec
import sys
import os
import signal
import atexit
from collections import deque
from subprocess import Popen, DETACHED_PROCESS, CREATE_NEW_PROCESS_GROUP
import tcp_log_socket
"""The default test logger and logging server are globally implemented.
Future changes may change this to a class-based implementation."""
logger = logging.getLogger()
log_server_pid = None
log_server_dir = "tcp_log_server.py" #The name of the log file to be output; created relative to the directory of the logging server
"""Method to spawn the included test log server; uses globals at the current time due to pickling restrictions on class-implemented loggers."""
def use_included_logger():
global log_server
global logger
global log_server_pid
logging_socket = tcp_log_socket.local_logging_socket(__name__)
logger = logging_socket.logger
log_server = Popen([sys.executable, log_server_dir], close_fds=True, shell=True, creationflags=DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP)
log_server_pid = log_server.pid
"""Terminate the included logger and close its window."""
def kill_included_logger():
global log_server_pid
Popen("TASKKILL /F /PID {} /T".format(log_server_pid))
class ProcessController(object):
def __init__(self, target_method, included_logger=True):
"""[NOTE: GLOBAL REFERENCE] If included_logger is True, use the included logger and log server.
Reference to a single global which tracks if the included logging server has been started."""
self.included_logger = included_logger
if self.included_logger:
use_included_logger() #Reference to a global function; loggers belonging to the same class implementation cannot be pickled using multiprocessing.Process
global log_server_pid
self.log_server_pid = log_server_pid
"""Exit handlers for normal and abnormal termination"""
atexit.register(self.quit)
signal.signal(signal.SIGABRT, self.exit)
signal.signal(signal.SIGTERM, self.exit)
"""Target method to run with a pool of workers or in another process. If the method has no arguments, apply a wrapper to allow it to be used by worker processes."""
self.target_method = target_method #method to assign to a process or pool
self.target_method_no_args = False
self.req_args = getfullargspec(self.target_method).args #Get list of required positional arguments
if not len(self.req_args):
self.target_method_no_args = True
"""Initialization for pool functionality"""
self.pool = None #Contains a persistent pool; no pool initialized by default
self.pool_batch_id = 0
self.pool_cache = deque([]) #Cache which holds results of pending and recently finished batches of jobs
self.pool_results = deque([]) #Stores all results that have been created by a controller instance by order of worker completion
"""Initialization for process functionality"""
self.processes = deque([]) #List of created processes
self.process_queue = None #Multiprocessing queue used to get results from worker process
self.process_results = deque([]) #Stores all worker process results for processing
#Creates a new persistent pool with a number of processes or replaces an existing one
def create_new_pool(self, num_processes):
if self.pool is not None:
logger.info("Pool exists; waiting for existing jobs to finish before closing pool.")
self.pool.close() #Wait for existing jobs to finish
self.pool = multiprocessing.Pool(num_processes)
logger.info("Pool with {} available processes created.".format(num_processes))
#Runs jobs for a given list of input parameters using the assigned target method and an existing pool
def use_pool(self, jobs, **kwargs):
if self.pool is None:
logger.warning("No pool exists; create a pool to run jobs.")
if self.target_method_no_args: #if target method uses no positional arguments, use apply_async
results = []
for job in jobs:
result = self.pool.apply_async(self.target_method, (), **kwargs)
results.append(result)
elif any(isinstance(entry, list) for entry in jobs): #if open jobs (input_data) contain nested lists, use starmap
results = self.pool.starmap_async(self.target_method, jobs)
else:
results = self.pool.map_async(self.target_method, jobs)
logger.info("Created worker processes; running processes: {}".format(self.pool._processes))
self.pool_cache.appendleft(results)
logger.info("Caching pending batch of jobs in temporary storage.")
#Get unretrieved results from pool temporary cache and retrieve them. The method stores all results retrieved in a pool results queue and returns all unretrieved results to that point to the user.
def get_pool_results(self):
results = []
while len(self.pool_cache):
logger.info("Unretrieved results in pool cache: {} batches. Attempting to retrieve a batch.".format(len(self.pool_cache)))
result = self.pool_cache.pop()
if isinstance(result, list): #if any result is a list (if target_method does not use positional args)
batch = []
for entry in result:
try:
entry = entry.get()
batch.append(entry)
except Exception as e:
logger.warning("Result could not be retrieved; Pool Batch ID: {}".format(self.pool_batch_id))
logger.error("Specific cause for failure: {}".format(e))
result = batch
else:
try:
result = result.get()
logger.info("Result successfully retrieved for Pool Batch ID: {}".format(self.pool_batch_id))
except Exception as e:
logger.warning("Result could not be retrieved; Pool Batch ID: {}".format(self.pool_batch_id))
logger.error("Specific cause for failure: {}".format(e))
result = [result, "Pool Batch ID: {}".format(self.pool_batch_id)]
results.append(result)
self.pool_batch_id += 1
logger.info("Appending result to pool results queue.")
self.pool_results.appendleft(result)
logger.info("All retrieved results returned; {} batches retrieved.".format(len(results)))
return results
#Check process list for dead processes
def clean_process_list(self):
logger.info("Checking for dead or orphaned processes.")
while len(self.processes) > 0:
process = self.processes.pop()
if process.is_alive() is not True:
logger.info("{} is unresponsive; terminating process.".format(process.name))
if process is not None:
process.terminate()
else:
self.processes.appendleft(process)
#Worker method which puts results from a target method into a queue, if any exist. Self-terminates on completion.
def worker(self, args, **kwargs):
worker_name = multiprocessing.current_process().name
if self.included_logger:
logging_socket = tcp_log_socket.local_logging_socket(worker_name)
logger = logging_socket.logger
else:
logger = logging.getLogger(worker_name)
logger.info("Running process {}; waiting for results.".format(worker_name))
print("kwargs: {}".format(kwargs))
if self.target_method_no_args:
results = self.target_method(**kwargs)
elif not isinstance(args, list):
results = self.target_method(args, **kwargs)
else:
results = self.target_method(*args, **kwargs)
results_queue = self.process_queue
logger.info("Ran target method, storing results and name of finished process.")
results_queue.put([results, worker_name])
logger.info("Process {} completed, exiting.".format(worker_name))
sys.exit(0)
#Creates and uses a process to run a job using the assigned target method.
def use_process(self, args, **kwargs):
if self.pool:
logger.warning("""Pool exists; close pool before using individual workers.""")
else:
self.clean_process_list()
if self.process_queue is None:
self.process_queue = multiprocessing.Queue()
process = multiprocessing.Process(target=self.worker, args=(args,), kwargs=kwargs)
logger.info("Created process; process name is {}".format(process.name))
self.processes.append(process)
process.start()
logger.info("Process {} started.".format(process.name))
#Dump the results from worker processes to a sorted deque. Return the results as well.
def get_process_results(self):
results = []
if self.process_queue is None:
logging.info("No worker results queue; use a process to retrieve results.")
else:
while self.process_queue.qsize() > 0:
logging.info("Worker results queue is not empty: {} entries. Getting result from queue.".format(self.process_queue.qsize()))
result = self.process_queue.get()
results.append(result)
logging.info("Storing result to controller process results queue.")
self.process_results.appendleft(result)
logging.info("Worker results queue is empty, returning retrieved results.")
return results
#Waits for workers to finish pending jobs and signals them to exit. If the included test logger is used, the logger is closed and its process is killed.
def quit(self):
if len(self.processes):
self.clean_process_list()
if self.pool is not None:
self.pool.close()
if self.included_logger:
kill_included_logger() #Reference to global (module-level) method which terminates the included test logger
#Clears the controller as well as signaling all pending jobs and workers to exit
def clear(self):
self.quit()
self = None
#Quick cleanup called in the event of interruptions or unexpected terminations. Pending jobs and results will be lost!
def exit(self, signal, frame):
if len(self.processes):
for process in self.processes:
process.terminate()
if self.pool is not None:
self.pool.terminate()
if self.included_logger:
kill_included_logger() #Reference to global (module_level) method
|
test_single_exception.py
|
"""
All tests should fail.
"""
from threading import Thread
def test_reraise(reraise):
def run():
with reraise:
assert False
Thread(target=run).start()
def test_reraise_wrap(reraise):
def run():
assert False
Thread(target=reraise.wrap(run)).start()
def test_reraise_wrap_decorator(reraise):
@reraise.wrap
def run():
assert False
Thread(target=run).start()
def test_exception(reraise):
assert False
|
conftest.py
|
# -*- coding: utf-8 -*-
import pytest
import subprocess
import threading
from urllib.parse import urlparse
from .prepare import make_specs, default_appname, default_sha, default_network_name, default_podname, default_cpu_quota, default_memory, default_git, make_specs_text, default_combo_name, default_env_name, default_env, core_online
from citadel.app import create_app
from citadel.config import BUILD_ZONE
from citadel.ext import db, rds
from citadel.libs.utils import logger
from citadel.models.app import App, Release, Combo
from citadel.rpc import core_pb2 as pb
from citadel.rpc.client import get_core
json_headers = {'Content-Type': 'application/json'}
@pytest.fixture
def app(request):
app = create_app()
app.config['DEBUG'] = True
ctx = app.app_context()
ctx.push()
def tear_down():
ctx.pop()
request.addfinalizer(tear_down)
return app
@pytest.fixture
def client(app):
with app.test_client() as client:
yield client
@pytest.fixture
def test_db(request, app):
def check_service_host(uri):
"""只能在本地或者容器里跑测试"""
u = urlparse(uri)
return u.hostname in ('localhost', '127.0.0.1') or 'hub.ricebook.net__ci__' in u.hostname
if not (check_service_host(app.config['SQLALCHEMY_DATABASE_URI']) and check_service_host(app.config['REDIS_URL'])):
raise Exception('Need to run test on localhost or in container')
db.create_all()
app = App.get_or_create(default_appname, git=default_git)
app.add_env_set(default_env_name, default_env)
Release.create(app, default_sha, make_specs_text())
Combo.create(default_appname, default_combo_name, 'web', default_podname,
networks=[default_network_name], cpu_quota=default_cpu_quota,
memory=default_memory, count=1, envname=default_env_name)
def teardown():
db.session.remove()
db.drop_all()
rds.flushdb()
request.addfinalizer(teardown)
@pytest.fixture(scope='session')
def test_app_image():
if not core_online:
pytest.skip(msg='one or more eru-core is offline, skip core-related tests')
specs = make_specs()
appname = default_appname
builds_map = {stage_name: pb.Build(**build) for stage_name, build in specs.builds.items()}
core_builds = pb.Builds(stages=specs.stages, builds=builds_map)
opts = pb.BuildImageOptions(name=appname,
user=appname,
uid=12345,
tag=default_sha,
builds=core_builds)
core = get_core(BUILD_ZONE)
build_image_messages = list(core.build_image(opts))
image_tag = ''
for m in build_image_messages:
assert not m.error
image_tag = m.progress
assert '{}:{}'.format(default_appname, default_sha) in image_tag
return image_tag
@pytest.fixture
def watch_etcd(request, test_db):
p = subprocess.Popen(
'bin/run-etcd-watcher --zone test-zone --sync'.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
logger.info('Starting watch_etcd process %s', p)
def async_thread_output(p):
while p.poll() is None:
# A None value indicates that the process hasn't terminated yet.
print(p.stdout.readline())
t = threading.Thread(target=async_thread_output, args=(p, ), daemon=True)
t.start()
def teardown():
logger.info('Terminating watch_etcd process %s', p)
p.terminate()
t.join(10)
request.addfinalizer(teardown)
return p
|
__init__.py
|
"""Support for the Fibaro devices."""
from collections import defaultdict
import logging
from typing import Optional
from fiblary3.client.v4.client import Client as FibaroClient, StateHandler
import voluptuous as vol
from homeassistant.const import (
ATTR_ARMED,
ATTR_BATTERY_LEVEL,
CONF_DEVICE_CLASS,
CONF_EXCLUDE,
CONF_ICON,
CONF_PASSWORD,
CONF_URL,
CONF_USERNAME,
CONF_WHITE_VALUE,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import convert, slugify
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_ENERGY_KWH = "current_energy_kwh"
ATTR_CURRENT_POWER_W = "current_power_w"
CONF_COLOR = "color"
CONF_DEVICE_CONFIG = "device_config"
CONF_DIMMING = "dimming"
CONF_GATEWAYS = "gateways"
CONF_PLUGINS = "plugins"
CONF_RESET_COLOR = "reset_color"
DOMAIN = "fibaro"
FIBARO_CONTROLLERS = "fibaro_controllers"
FIBARO_DEVICES = "fibaro_devices"
FIBARO_COMPONENTS = [
"binary_sensor",
"climate",
"cover",
"light",
"scene",
"sensor",
"lock",
"switch",
]
FIBARO_TYPEMAP = {
"com.fibaro.multilevelSensor": "sensor",
"com.fibaro.binarySwitch": "switch",
"com.fibaro.multilevelSwitch": "switch",
"com.fibaro.FGD212": "light",
"com.fibaro.FGR": "cover",
"com.fibaro.doorSensor": "binary_sensor",
"com.fibaro.doorWindowSensor": "binary_sensor",
"com.fibaro.FGMS001": "binary_sensor",
"com.fibaro.heatDetector": "binary_sensor",
"com.fibaro.lifeDangerSensor": "binary_sensor",
"com.fibaro.smokeSensor": "binary_sensor",
"com.fibaro.remoteSwitch": "switch",
"com.fibaro.sensor": "sensor",
"com.fibaro.colorController": "light",
"com.fibaro.securitySensor": "binary_sensor",
"com.fibaro.hvac": "climate",
"com.fibaro.setpoint": "climate",
"com.fibaro.FGT001": "climate",
"com.fibaro.thermostatDanfoss": "climate",
"com.fibaro.doorLock": "lock",
}
DEVICE_CONFIG_SCHEMA_ENTRY = vol.Schema(
{
vol.Optional(CONF_DIMMING): cv.boolean,
vol.Optional(CONF_COLOR): cv.boolean,
vol.Optional(CONF_WHITE_VALUE): cv.boolean,
vol.Optional(CONF_RESET_COLOR): cv.boolean,
vol.Optional(CONF_DEVICE_CLASS): cv.string,
vol.Optional(CONF_ICON): cv.string,
}
)
FIBARO_ID_LIST_SCHEMA = vol.Schema([cv.string])
GATEWAY_CONFIG = vol.Schema(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_URL): cv.url,
vol.Optional(CONF_PLUGINS, default=False): cv.boolean,
vol.Optional(CONF_EXCLUDE, default=[]): FIBARO_ID_LIST_SCHEMA,
vol.Optional(CONF_DEVICE_CONFIG, default={}): vol.Schema(
{cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}
),
},
extra=vol.ALLOW_EXTRA,
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Required(CONF_GATEWAYS): vol.All(cv.ensure_list, [GATEWAY_CONFIG])}
)
},
extra=vol.ALLOW_EXTRA,
)
class FibaroController:
"""Initiate Fibaro Controller Class."""
def __init__(self, config):
"""Initialize the Fibaro controller."""
self._client = FibaroClient(
config[CONF_URL], config[CONF_USERNAME], config[CONF_PASSWORD]
)
self._scene_map = None
# Whether to import devices from plugins
self._import_plugins = config[CONF_PLUGINS]
self._device_config = config[CONF_DEVICE_CONFIG]
self._room_map = None # Mapping roomId to room object
self._device_map = None # Mapping deviceId to device object
self.fibaro_devices = None # List of devices by type
self._callbacks = {} # Update value callbacks by deviceId
self._state_handler = None # Fiblary's StateHandler object
self._excluded_devices = config[CONF_EXCLUDE]
self.hub_serial = None # Unique serial number of the hub
def connect(self):
"""Start the communication with the Fibaro controller."""
try:
login = self._client.login.get()
info = self._client.info.get()
self.hub_serial = slugify(info.serialNumber)
except AssertionError:
_LOGGER.error("Can't connect to Fibaro HC. Please check URL")
return False
if login is None or login.status is False:
_LOGGER.error(
"Invalid login for Fibaro HC. Please check username and password"
)
return False
self._room_map = {room.id: room for room in self._client.rooms.list()}
self._read_devices()
self._read_scenes()
return True
def enable_state_handler(self):
"""Start StateHandler thread for monitoring updates."""
self._state_handler = StateHandler(self._client, self._on_state_change)
def disable_state_handler(self):
"""Stop StateHandler thread used for monitoring updates."""
self._state_handler.stop()
self._state_handler = None
def _on_state_change(self, state):
"""Handle change report received from the HomeCenter."""
callback_set = set()
for change in state.get("changes", []):
try:
dev_id = change.pop("id")
if dev_id not in self._device_map.keys():
continue
device = self._device_map[dev_id]
for property_name, value in change.items():
if property_name == "log":
if value and value != "transfer OK":
_LOGGER.debug("LOG %s: %s", device.friendly_name, value)
continue
if property_name == "logTemp":
continue
if property_name in device.properties:
device.properties[property_name] = value
_LOGGER.debug(
"<- %s.%s = %s", device.ha_id, property_name, str(value)
)
else:
_LOGGER.warning("%s.%s not found", device.ha_id, property_name)
if dev_id in self._callbacks:
callback_set.add(dev_id)
except (ValueError, KeyError):
pass
for item in callback_set:
self._callbacks[item]()
def register(self, device_id, callback):
"""Register device with a callback for updates."""
self._callbacks[device_id] = callback
def get_children(self, device_id):
"""Get a list of child devices."""
return [
device
for device in self._device_map.values()
if device.parentId == device_id
]
def get_children2(self, device_id, endpoint_id):
"""Get a list of child devices for the same endpoint."""
return [
device
for device in self._device_map.values()
if device.parentId == device_id
and (
"endPointId" not in device.properties
or device.properties.endPointId == endpoint_id
)
]
def get_siblings(self, device):
"""Get the siblings of a device."""
if "endPointId" in device.properties:
return self.get_children2(
self._device_map[device.id].parentId,
self._device_map[device.id].properties.endPointId,
)
return self.get_children(self._device_map[device.id].parentId)
@staticmethod
def _map_device_to_type(device):
"""Map device to HA device type."""
# Use our lookup table to identify device type
device_type = None
if "type" in device:
device_type = FIBARO_TYPEMAP.get(device.type)
if device_type is None and "baseType" in device:
device_type = FIBARO_TYPEMAP.get(device.baseType)
# We can also identify device type by its capabilities
if device_type is None:
if "setBrightness" in device.actions:
device_type = "light"
elif "turnOn" in device.actions:
device_type = "switch"
elif "open" in device.actions:
device_type = "cover"
elif "secure" in device.actions:
device_type = "lock"
elif "value" in device.properties:
if device.properties.value in ("true", "false"):
device_type = "binary_sensor"
else:
device_type = "sensor"
# Switches that control lights should show up as lights
if device_type == "switch" and device.properties.get("isLight", False):
device_type = "light"
return device_type
def _read_scenes(self):
scenes = self._client.scenes.list()
self._scene_map = {}
for device in scenes:
if "name" not in device or "id" not in device:
continue
device.fibaro_controller = self
if "roomID" not in device or device.roomID == 0:
room_name = "Unknown"
else:
room_name = self._room_map[device.roomID].name
device.room_name = room_name
device.friendly_name = f"{room_name} {device.name}"
device.ha_id = (
f"scene_{slugify(room_name)}_{slugify(device.name)}_{device.id}"
)
device.unique_id_str = f"{self.hub_serial}.scene.{device.id}"
self._scene_map[device.id] = device
self.fibaro_devices["scene"].append(device)
_LOGGER.debug("%s scene -> %s", device.ha_id, device)
def _read_devices(self):
"""Read and process the device list."""
devices = self._client.devices.list()
self._device_map = {}
self.fibaro_devices = defaultdict(list)
last_climate_parent = None
last_endpoint = None
for device in devices:
try:
if "name" not in device or "id" not in device:
continue
device.fibaro_controller = self
if "roomID" not in device or device.roomID == 0:
room_name = "Unknown"
else:
room_name = self._room_map[device.roomID].name
device.room_name = room_name
device.friendly_name = f"{room_name} {device.name}"
device.ha_id = (
f"{slugify(room_name)}_{slugify(device.name)}_{device.id}"
)
if (
device.enabled
and (
"isPlugin" not in device
or (not device.isPlugin or self._import_plugins)
)
and device.ha_id not in self._excluded_devices
):
device.mapped_type = self._map_device_to_type(device)
device.device_config = self._device_config.get(device.ha_id, {})
else:
device.mapped_type = None
dtype = device.mapped_type
if dtype is None:
continue
device.unique_id_str = f"{self.hub_serial}.{device.id}"
self._device_map[device.id] = device
_LOGGER.debug(
"%s (%s, %s) -> %s %s",
device.ha_id,
device.type,
device.baseType,
dtype,
str(device),
)
if dtype != "climate":
self.fibaro_devices[dtype].append(device)
continue
# We group climate devices into groups with the same
# endPointID belonging to the same parent device.
if "endPointId" in device.properties:
_LOGGER.debug(
"climate device: %s, endPointId: %s",
device.ha_id,
device.properties.endPointId,
)
else:
_LOGGER.debug("climate device: %s, no endPointId", device.ha_id)
# If a sibling of this device has been added, skip this one
# otherwise add the first visible device in the group
# which is a hack, but solves a problem with FGT having
# hidden compatibility devices before the real device
if last_climate_parent != device.parentId or (
"endPointId" in device.properties
and last_endpoint != device.properties.endPointId
):
_LOGGER.debug("Handle separately")
self.fibaro_devices[dtype].append(device)
last_climate_parent = device.parentId
if "endPointId" in device.properties:
last_endpoint = device.properties.endPointId
else:
last_endpoint = 0
else:
_LOGGER.debug("not handling separately")
except (KeyError, ValueError):
pass
def setup(hass, base_config):
"""Set up the Fibaro Component."""
if DOMAIN not in base_config:
# AIS new config_flow way
hass.data[DOMAIN] = {}
hass.data[DOMAIN][CONF_GATEWAYS] = {}
hass.data[FIBARO_CONTROLLERS] = {}
return True
# old configuration.yaml way
gateways = base_config[DOMAIN][CONF_GATEWAYS]
hass.data[FIBARO_CONTROLLERS] = {}
def stop_fibaro(event):
"""Stop Fibaro Thread."""
_LOGGER.info("Shutting down Fibaro connection")
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.disable_state_handler()
hass.data[FIBARO_DEVICES] = {}
for component in FIBARO_COMPONENTS:
hass.data[FIBARO_DEVICES][component] = []
for gateway in gateways:
controller = FibaroController(gateway)
if controller.connect():
hass.data[FIBARO_CONTROLLERS][controller.hub_serial] = controller
for component in FIBARO_COMPONENTS:
hass.data[FIBARO_DEVICES][component].extend(
controller.fibaro_devices[component]
)
if hass.data[FIBARO_CONTROLLERS]:
for component in FIBARO_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, base_config)
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.enable_state_handler()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_fibaro)
return True
return False
class FibaroDevice(Entity):
"""Representation of a Fibaro device entity."""
def __init__(self, fibaro_device):
"""Initialize the device."""
self.fibaro_device = fibaro_device
self.controller = fibaro_device.fibaro_controller
self._name = fibaro_device.friendly_name
self.ha_id = fibaro_device.ha_id
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.controller.register(self.fibaro_device.id, self._update_callback)
def _update_callback(self):
"""Update the state."""
self.schedule_update_ha_state(True)
@property
def device_info(self):
sw_version = 1
manufacturer = "Fibaro"
if "properties" in self.fibaro_device:
if "zwaveVersion" in self.fibaro_device.properties:
sw_version = self.fibaro_device.properties.zwaveVersion
if "zwaveCompany" in self.fibaro_device.properties:
manufacturer = self.fibaro_device.properties.zwaveCompany
return {
"identifiers": {(DOMAIN, self.ha_id)},
"name": self._name,
"manufacturer": manufacturer,
"model": self.fibaro_device.type,
"sw_version": sw_version,
"via_device": None,
}
@property
def level(self):
"""Get the level of Fibaro device."""
if "value" in self.fibaro_device.properties:
return self.fibaro_device.properties.value
return None
@property
def level2(self):
"""Get the tilt level of Fibaro device."""
if "value2" in self.fibaro_device.properties:
return self.fibaro_device.properties.value2
return None
def dont_know_message(self, action):
"""Make a warning in case we don't know how to perform an action."""
_LOGGER.warning(
"Not sure how to setValue: %s (available actions: %s)",
str(self.ha_id),
str(self.fibaro_device.actions),
)
def set_level(self, level):
"""Set the level of Fibaro device."""
self.action("setValue", level)
if "value" in self.fibaro_device.properties:
self.fibaro_device.properties.value = level
if "brightness" in self.fibaro_device.properties:
self.fibaro_device.properties.brightness = level
def set_level2(self, level):
"""Set the level2 of Fibaro device."""
self.action("setValue2", level)
if "value2" in self.fibaro_device.properties:
self.fibaro_device.properties.value2 = level
def call_turn_on(self):
"""Turn on the Fibaro device."""
self.action("turnOn")
def call_turn_off(self):
"""Turn off the Fibaro device."""
self.action("turnOff")
def call_set_color(self, red, green, blue, white):
"""Set the color of Fibaro device."""
red = int(max(0, min(255, red)))
green = int(max(0, min(255, green)))
blue = int(max(0, min(255, blue)))
white = int(max(0, min(255, white)))
color_str = f"{red},{green},{blue},{white}"
self.fibaro_device.properties.color = color_str
self.action("setColor", str(red), str(green), str(blue), str(white))
def action(self, cmd, *args):
"""Perform an action on the Fibaro HC."""
if cmd in self.fibaro_device.actions:
getattr(self.fibaro_device, cmd)(*args)
_LOGGER.debug("-> %s.%s%s called", str(self.ha_id), str(cmd), str(args))
else:
self.dont_know_message(cmd)
@property
def current_power_w(self):
"""Return the current power usage in W."""
if "power" in self.fibaro_device.properties:
power = self.fibaro_device.properties.power
if power:
return convert(power, float, 0.0)
else:
return None
@property
def current_binary_state(self):
"""Return the current binary state."""
if self.fibaro_device.properties.value == "false":
return False
if (
self.fibaro_device.properties.value == "true"
or int(self.fibaro_device.properties.value) > 0
):
return True
return False
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self.fibaro_device.unique_id_str
@property
def name(self) -> Optional[str]:
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Get polling requirement from fibaro device."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {"fibaro_id": self.fibaro_device.id}
try:
if "battery" in self.fibaro_device.interfaces:
attr[ATTR_BATTERY_LEVEL] = int(
self.fibaro_device.properties.batteryLevel
)
if "fibaroAlarmArm" in self.fibaro_device.interfaces:
attr[ATTR_ARMED] = bool(self.fibaro_device.properties.armed)
if "power" in self.fibaro_device.interfaces:
attr[ATTR_CURRENT_POWER_W] = convert(
self.fibaro_device.properties.power, float, 0.0
)
if "energy" in self.fibaro_device.interfaces:
attr[ATTR_CURRENT_ENERGY_KWH] = convert(
self.fibaro_device.properties.energy, float, 0.0
)
except (ValueError, KeyError):
pass
return attr
# AIS
async def async_setup_entry(hass, config_entry):
"""Set up config entry."""
import threading
# discover_devices is a sync function.
t = threading.Thread(target=discover_devices, args=(hass, config_entry))
t.start()
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
# TODO await hass.config_entries.async_forward_entry_unload(config_entry, "xxx")
return True
def discover_devices(hass, config_entry):
"""
Run periodically to discover new devices.
Currently it's only run at startup.
"""
# ------------
gateway = {
CONF_URL: config_entry.data[CONF_URL],
CONF_USERNAME: config_entry.data[CONF_USERNAME],
CONF_PASSWORD: config_entry.data[CONF_PASSWORD],
CONF_PLUGINS: False,
CONF_DEVICE_CONFIG: {},
CONF_EXCLUDE: [],
}
def stop_fibaro(event):
"""Stop Fibaro Thread."""
_LOGGER.info("Shutting down Fibaro connection")
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.disable_state_handler()
hass.data[FIBARO_DEVICES] = {}
for component in FIBARO_COMPONENTS:
hass.data[FIBARO_DEVICES][component] = []
controller = FibaroController(gateway)
if controller.connect():
hass.data[FIBARO_CONTROLLERS][controller.hub_serial] = controller
for component in FIBARO_COMPONENTS:
hass.data[FIBARO_DEVICES][component].extend(
controller.fibaro_devices[component]
)
if hass.data[FIBARO_CONTROLLERS]:
for component in FIBARO_COMPONENTS:
# discovery.load_platform(hass, component, DOMAIN, {}, config_entry)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.enable_state_handler()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_fibaro)
return True
return False
|
test_content.py
|
from __future__ import print_function
import os
import re
import sys
import json
import time
import argparse
import threading
import subprocess
import traceback
from time import sleep
from datetime import datetime
from distutils.version import LooseVersion
import urllib3
import requests
import demisto_client.demisto_api
from slackclient import SlackClient
from Tests.mock_server import MITMProxy, AMIConnection
from Tests.test_integration import Docker, test_integration, disable_all_integrations
from Tests.test_dependencies import get_used_integrations, get_tests_allocation_for_threads
from demisto_sdk.commands.common.constants import RUN_ALL_TESTS_FORMAT, FILTER_CONF, PB_Status
from demisto_sdk.commands.common.tools import print_color, print_error, print_warning, \
LOG_COLORS, str2bool
# Disable insecure warnings
urllib3.disable_warnings()
SERVER_URL = "https://{}"
INTEGRATIONS_CONF = "./Tests/integrations_file.txt"
FAILED_MATCH_INSTANCE_MSG = "{} Failed to run.\n There are {} instances of {}, please select one of them by using " \
"the instance_name argument in conf.json. The options are:\n{}"
SERVICE_RESTART_TIMEOUT = 300
SERVICE_RESTART_POLLING_INTERVAL = 5
SLACK_MEM_CHANNEL_ID = 'CM55V7J8K'
def options_handler():
parser = argparse.ArgumentParser(description='Utility for batch action on incidents')
parser.add_argument('-k', '--apiKey', help='The Demisto API key for the server', required=True)
parser.add_argument('-s', '--server', help='The server URL to connect to')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-e', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--nightly', type=str2bool, help='Run nightly tests')
parser.add_argument('-t', '--slack', help='The token for slack', required=True)
parser.add_argument('-a', '--circleci', help='The token for circleci', required=True)
parser.add_argument('-b', '--buildNumber', help='The build number', required=True)
parser.add_argument('-g', '--buildName', help='The build name', required=True)
parser.add_argument('-i', '--isAMI', type=str2bool, help='is AMI build or not', default=False)
parser.add_argument('-m', '--memCheck', type=str2bool,
help='Should trigger memory checks or not. The slack channel to check the data is: '
'dmst_content_nightly_memory_data', default=False)
parser.add_argument('-d', '--serverVersion', help='Which server version to run the '
'tests on(Valid only when using AMI)', default="NonAMI")
parser.add_argument('-l', '--testsList', help='List of specific, comma separated'
'tests to run')
options = parser.parse_args()
tests_settings = TestsSettings(options)
return tests_settings
class TestsSettings:
def __init__(self, options):
self.api_key = options.apiKey
self.server = options.server
self.conf_path = options.conf
self.secret_conf_path = options.secret
self.nightly = options.nightly
self.slack = options.slack
self.circleci = options.circleci
self.buildNumber = options.buildNumber
self.buildName = options.buildName
self.isAMI = options.isAMI
self.memCheck = options.memCheck
self.serverVersion = options.serverVersion
self.serverNumericVersion = None
self.specific_tests_to_run = self.parse_tests_list_arg(options.testsList)
self.is_local_run = (self.server is not None)
@staticmethod
def parse_tests_list_arg(tests_list):
tests_to_run = tests_list.split(",") if tests_list else []
return tests_to_run
class PrintJob:
def __init__(self, message_to_print, print_function_to_execute, message_color=None):
self.print_function_to_execute = print_function_to_execute
self.message_to_print = message_to_print
self.message_color = message_color
def execute_print(self):
if self.message_color:
self.print_function_to_execute(self.message_to_print, self.message_color)
else:
self.print_function_to_execute(self.message_to_print)
class ParallelPrintsManager:
def __init__(self, number_of_threads):
self.threads_print_jobs = [[] for i in range(number_of_threads)]
self.print_lock = threading.Lock()
self.threads_last_update_times = [time.time() for i in range(number_of_threads)]
def should_update_thread_status(self, thread_index):
current_time = time.time()
thread_last_update = self.threads_last_update_times[thread_index]
return current_time - thread_last_update > 300
def add_print_job(self, message_to_print, print_function_to_execute, thread_index, message_color=None,
include_timestamp=False):
if include_timestamp:
message_to_print = f'[{datetime.now()}] {message_to_print}'
print_job = PrintJob(message_to_print, print_function_to_execute, message_color=message_color)
self.threads_print_jobs[thread_index].append(print_job)
if self.should_update_thread_status(thread_index):
print("Thread {} is still running.".format(thread_index))
self.threads_last_update_times[thread_index] = time.time()
def execute_thread_prints(self, thread_index):
self.print_lock.acquire()
prints_to_execute = self.threads_print_jobs[thread_index]
for print_job in prints_to_execute:
print_job.execute_print()
self.print_lock.release()
self.threads_print_jobs[thread_index] = []
class TestsDataKeeper:
def __init__(self):
self.succeeded_playbooks = []
self.failed_playbooks = []
self.skipped_tests = []
self.skipped_integrations = []
self.rerecorded_tests = []
self.empty_files = []
self.unmockable_integrations = {}
def add_tests_data(self, succeed_playbooks, failed_playbooks, skipped_tests, skipped_integration,
unmockable_integrations):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook in succeed_playbooks:
self.succeeded_playbooks.append(playbook)
for playbook in failed_playbooks:
self.failed_playbooks.append(playbook)
for playbook in skipped_tests:
self.skipped_tests.append(playbook)
for playbook in skipped_integration:
self.skipped_integrations.append(playbook)
for playbook_id, reason in unmockable_integrations.items():
self.unmockable_integrations[playbook_id] = reason
def add_proxy_related_test_data(self, proxy):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook_id in proxy.rerecorded_tests:
self.rerecorded_tests.append(playbook_id)
for playbook_id in proxy.empty_files:
self.empty_files.append(playbook_id)
def print_test_summary(tests_data_keeper, is_ami=True):
succeed_playbooks = tests_data_keeper.succeeded_playbooks
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_tests = tests_data_keeper.skipped_tests
unmocklable_integrations = tests_data_keeper.unmockable_integrations
skipped_integration = tests_data_keeper.skipped_integrations
rerecorded_tests = tests_data_keeper.rerecorded_tests
empty_files = tests_data_keeper.empty_files
succeed_count = len(succeed_playbooks)
failed_count = len(failed_playbooks)
skipped_count = len(skipped_tests)
rerecorded_count = len(rerecorded_tests) if is_ami else 0
empty_mocks_count = len(empty_files) if is_ami else 0
unmocklable_integrations_count = len(unmocklable_integrations)
print('\nTEST RESULTS:')
tested_playbooks_message = '\t Number of playbooks tested - ' + str(succeed_count + failed_count)
print(tested_playbooks_message)
succeeded_playbooks_message = '\t Number of succeeded tests - ' + str(succeed_count)
print_color(succeeded_playbooks_message, LOG_COLORS.GREEN)
if failed_count > 0:
failed_tests_message = '\t Number of failed tests - ' + str(failed_count) + ':'
print_error(failed_tests_message)
for playbook_id in failed_playbooks:
print_error('\t - ' + playbook_id)
if rerecorded_count > 0:
recording_warning = '\t Tests with failed playback and successful re-recording - ' + str(rerecorded_count) + ':'
print_warning(recording_warning)
for playbook_id in rerecorded_tests:
print_warning('\t - ' + playbook_id)
if empty_mocks_count > 0:
empty_mock_successes_msg = '\t Successful tests with empty mock files - ' + str(empty_mocks_count) + ':'
print(empty_mock_successes_msg)
proxy_explanation = '\t (either there were no http requests or no traffic is passed through the proxy.\n'\
'\t Investigate the playbook and the integrations.\n'\
'\t If the integration has no http traffic, add to unmockable_integrations in conf.json)'
print(proxy_explanation)
for playbook_id in empty_files:
print('\t - ' + playbook_id)
if len(skipped_integration) > 0:
skipped_integrations_warning = '\t Number of skipped integration - ' + str(len(skipped_integration)) + ':'
print_warning(skipped_integrations_warning)
for playbook_id in skipped_integration:
print_warning('\t - ' + playbook_id)
if skipped_count > 0:
skipped_tests_warning = '\t Number of skipped tests - ' + str(skipped_count) + ':'
print_warning(skipped_tests_warning)
for playbook_id in skipped_tests:
print_warning('\t - ' + playbook_id)
if unmocklable_integrations_count > 0:
unmockable_warning = '\t Number of unmockable integrations - ' + str(unmocklable_integrations_count) + ':'
print_warning(unmockable_warning)
for playbook_id, reason in unmocklable_integrations.items():
print_warning('\t - ' + playbook_id + ' - ' + reason)
def update_test_msg(integrations, test_message):
if integrations:
integrations_names = [integration['name'] for integration in
integrations]
test_message = test_message + ' with integration(s): ' + ','.join(
integrations_names)
return test_message
def turn_off_telemetry(server, demisto_api_key):
"""
Turn off telemetry on the AMI instance
:param server: demisto server to connect to
:param demisto_api_key: api key to use for connection
:return: None
"""
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/telemetry?status=notelemetry')
if status_code != 200:
print_error('Request to turn off telemetry failed with status code "{}"\n{}'.format(status_code, body))
sys.exit(1)
def reset_containers(server, demisto_api_key, prints_manager, thread_index):
prints_manager.add_print_job('Resetting containers', print, thread_index)
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/containers/reset')
if status_code != 200:
error_msg = 'Request to reset containers failed with status code "{}"\n{}'.format(status_code, body)
prints_manager.add_print_job(error_msg, print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
sys.exit(1)
sleep(10)
def has_unmockable_integration(integrations, unmockable_integrations):
return list(set(x['name'] for x in integrations).intersection(unmockable_integrations.keys()))
def get_docker_limit():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.limit_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_processes_data():
process = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_memory_data():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.usage_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def send_slack_message(slack, chanel, text, user_name, as_user):
sc = SlackClient(slack)
sc.api_call(
"chat.postMessage",
channel=chanel,
username=user_name,
as_user=as_user,
text=text,
mrkdwn='true'
)
def run_test_logic(tests_settings, c, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name,
prints_manager, thread_index=0, is_mock_run=False):
status, inc_id = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run, thread_index=thread_index)
# c.api_client.pool.close()
if status == PB_Status.COMPLETED:
prints_manager.add_print_job('PASS: {} succeed'.format(test_message), print_color, thread_index,
message_color=LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
elif status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
else:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
playbook_id_with_mock = playbook_id
if not is_mock_run:
playbook_id_with_mock += " (Mock Disabled)"
failed_playbooks.append(playbook_id_with_mock)
if not tests_settings.is_local_run:
notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name)
succeed = status in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
return succeed
# run the test using a real instance, record traffic.
def run_and_record(tests_settings, c, proxy, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name,
prints_manager, thread_index=0):
proxy.set_tmp_folder()
proxy.start(playbook_id, record=True, thread_index=thread_index, prints_manager=prints_manager)
succeed = run_test_logic(tests_settings, c, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name,
prints_manager, thread_index=thread_index, is_mock_run=True)
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
proxy.clean_mock_file(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
if succeed:
proxy.move_mock_file_to_repo(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
proxy.set_repo_folder()
return succeed
def mock_run(tests_settings, c, proxy, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name, start_message,
prints_manager, thread_index=0):
rerecord = False
if proxy.has_mock_file(playbook_id):
start_mock_message = '{} (Mock: Playback)'.format(start_message)
prints_manager.add_print_job(start_mock_message, print, thread_index, include_timestamp=True)
proxy.start(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
# run test
status, _ = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run=True, thread_index=thread_index)
# use results
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if status == PB_Status.COMPLETED:
succeed_message = 'PASS: {} succeed'.format(test_message)
prints_manager.add_print_job(succeed_message, print_color, thread_index, LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.FAILED_DOCKER_TEST:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
failed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
mock_failed_message = "Test failed with mock, recording new mock file. (Mock: Recording)"
prints_manager.add_print_job(mock_failed_message, print, thread_index)
rerecord = True
else:
mock_recording_message = start_message + ' (Mock: Recording)'
prints_manager.add_print_job(mock_recording_message, print, thread_index, include_timestamp=True)
# Mock recording - no mock file or playback failure.
succeed = run_and_record(tests_settings, c, proxy, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name,
prints_manager, thread_index=thread_index)
if rerecord and succeed:
proxy.rerecorded_tests.append(playbook_id)
test_end_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(test_end_message, print, thread_index, include_timestamp=True)
def run_test(tests_settings, demisto_api_key, proxy, failed_playbooks, integrations, unmockable_integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, is_ami=True, thread_index=0):
start_message = f'------ Test {test_message} start ------'
client = demisto_client.configure(base_url=server_url, api_key=demisto_api_key, verify_ssl=False)
if not is_ami or (not integrations or has_unmockable_integration(integrations, unmockable_integrations)):
prints_manager.add_print_job(start_message + ' (Mock: Disabled)', print, thread_index, include_timestamp=True)
run_test_logic(tests_settings, client, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name,
prints_manager, thread_index=thread_index)
prints_manager.add_print_job('------ Test %s end ------\n' % (test_message,), print, thread_index,
include_timestamp=True)
return
mock_run(tests_settings, client, proxy, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name, start_message,
prints_manager, thread_index=thread_index)
def http_request(url, params_dict=None):
try:
res = requests.request("GET",
url,
verify=True,
params=params_dict,
)
res.raise_for_status()
return res.json()
except Exception as e:
raise e
def get_user_name_from_circle(circleci_token, build_number):
url = "https://circleci.com/api/v1.1/project/github/demisto/content/{0}?circle-token={1}".format(build_number,
circleci_token)
res = http_request(url)
user_details = res.get('user', {})
return user_details.get('name', '')
def notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name):
circle_user_name = get_user_name_from_circle(circle_ci, build_number)
sc = SlackClient(slack)
user_id = retrieve_id(circle_user_name, sc)
text = "{0} - {1} Failed\n{2}".format(build_name, playbook_id, server_url) if inc_id == -1 \
else "{0} - {1} Failed\n{2}/#/WorkPlan/{3}".format(build_name, playbook_id, server_url, inc_id)
if user_id:
sc.api_call(
"chat.postMessage",
channel=user_id,
username="Content CircleCI",
as_user="False",
text=text
)
def retrieve_id(circle_user_name, sc):
user_id = ''
res = sc.api_call('users.list')
user_list = res.get('members', [])
for user in user_list:
profile = user.get('profile', {})
name = profile.get('real_name_normalized', '')
if name == circle_user_name:
user_id = user.get('id', '')
return user_id
def create_result_files(tests_data_keeper):
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_integration = tests_data_keeper.skipped_integrations
skipped_tests = tests_data_keeper.skipped_tests
with open("./Tests/failed_tests.txt", "w") as failed_tests_file:
failed_tests_file.write('\n'.join(failed_playbooks))
with open('./Tests/skipped_tests.txt', "w") as skipped_tests_file:
skipped_tests_file.write('\n'.join(skipped_tests))
with open('./Tests/skipped_integrations.txt', "w") as skipped_integrations_file:
skipped_integrations_file.write('\n'.join(skipped_integration))
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, value)
return json.loads(item_as_string)
def set_integration_params(demisto_api_key, integrations, secret_params, instance_names, playbook_id,
prints_manager, placeholders_map, thread_index=0):
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
error_msg = FAILED_MATCH_INSTANCE_MSG.format(playbook_id, len(integration_params),
integration['name'],
'\n'.join(optional_instance_names))
prints_manager.add_print_job(error_msg, print_error, thread_index)
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
elif integration['name'] == 'Demisto REST API':
integration['params'] = {
'url': 'https://localhost',
'apikey': demisto_api_key,
'insecure': True,
}
return True
def collect_integrations(integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations):
integrations = []
is_nightly_integration = False
test_skipped_integration = []
for integration in integrations_conf:
if integration in skipped_integrations_conf.keys():
skipped_integration.add("{0} - reason: {1}".format(integration, skipped_integrations_conf[integration]))
test_skipped_integration.append(integration)
if integration in nightly_integrations:
is_nightly_integration = True
# string description
integrations.append({
'name': integration,
'params': {}
})
return test_skipped_integration, integrations, is_nightly_integration
def extract_filtered_tests(is_nightly):
if is_nightly:
# TODO: verify this response
return [], False, True
with open(FILTER_CONF, 'r') as filter_file:
filtered_tests = filter_file.readlines()
filtered_tests = [line.strip('\n') for line in filtered_tests]
is_filter_configured = bool(filtered_tests)
run_all = RUN_ALL_TESTS_FORMAT in filtered_tests
return filtered_tests, is_filter_configured, run_all
def load_conf_files(conf_path, secret_conf_path):
with open(conf_path) as data_file:
conf = json.load(data_file)
secret_conf = None
if secret_conf_path:
with open(secret_conf_path) as data_file:
secret_conf = json.load(data_file)
return conf, secret_conf
def run_test_scenario(tests_settings, t, proxy, default_test_timeout, skipped_tests_conf, nightly_integrations,
skipped_integrations_conf, skipped_integration, is_nightly, run_all_tests, is_filter_configured,
filtered_tests, skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_api_key, prints_manager, thread_index=0, is_ami=True):
playbook_id = t['playbookID']
nightly_test = t.get('nightly', False)
integrations_conf = t.get('integrations', [])
instance_names_conf = t.get('instance_names', [])
test_message = 'playbook: ' + playbook_id
test_options = {
'timeout': t.get('timeout', default_test_timeout),
'memory_threshold': t.get('memory_threshold', Docker.DEFAULT_CONTAINER_MEMORY_USAGE),
'pid_threshold': t.get('pid_threshold', Docker.DEFAULT_CONTAINER_PIDS_USAGE)
}
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf, ]
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf, ]
test_skipped_integration, integrations, is_nightly_integration = collect_integrations(
integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations)
if playbook_id in filtered_tests:
playbook_skipped_integration.update(test_skipped_integration)
skip_nightly_test = (nightly_test or is_nightly_integration) and not is_nightly
# Skip nightly test
if skip_nightly_test:
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
prints_manager.add_print_job('Skip test', print, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
if not run_all_tests:
# Skip filtered test
if is_filter_configured and playbook_id not in filtered_tests:
return
# Skip bad test
if playbook_id in skipped_tests_conf:
skipped_tests.add(f'{playbook_id} - reason: {skipped_tests_conf[playbook_id]}')
return
# Skip integration
if test_skipped_integration:
return
# Skip version mismatch test
test_from_version = t.get('fromversion', '0.0.0')
test_to_version = t.get('toversion', '99.99.99')
if not (LooseVersion(test_from_version) <= LooseVersion(server_numeric_version) <= LooseVersion(test_to_version)):
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
warning_message = 'Test {} ignored due to version mismatch (test versions: {}-{})'.format(test_message,
test_from_version,
test_to_version)
prints_manager.add_print_job(warning_message, print_warning, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
placeholders_map = {'%%SERVER_HOST%%': server}
are_params_set = set_integration_params(demisto_api_key, integrations, secret_params, instance_names_conf,
playbook_id, prints_manager, placeholders_map, thread_index=thread_index)
if not are_params_set:
failed_playbooks.append(playbook_id)
return
test_message = update_test_msg(integrations, test_message)
options = options_handler()
stdout, stderr = get_docker_memory_data()
text = 'Memory Usage: {}'.format(stdout) if not stderr else stderr
if options.nightly and options.memCheck and not tests_settings.is_local_run:
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
stdout, stderr = get_docker_processes_data()
text = stdout if not stderr else stderr
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
run_test(tests_settings, demisto_api_key, proxy, failed_playbooks, integrations, unmockable_integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server, build_name, prints_manager, is_ami, thread_index=thread_index)
def get_server_numeric_version(ami_env, is_local_run=False):
"""
Gets the current server version
Arguments:
ami_env: (str)
AMI version name.
is_local_run: (bool)
when running locally, assume latest version.
Returns:
(str) Server numeric version
"""
default_version = '99.99.98'
env_results_path = './env_results.json'
if is_local_run:
print_color(f'Local run, assuming server version is {default_version}', LOG_COLORS.GREEN)
return default_version
if not os.path.isfile(env_results_path):
print_warning(f'Did not find {env_results_path} file, assuming server version is {default_version}.')
return default_version
with open(env_results_path, 'r') as json_file:
env_results = json.load(json_file)
instances_ami_names = set([env.get('AmiName') for env in env_results if ami_env in env.get('Role', '')])
if len(instances_ami_names) != 1:
print_warning(f'Did not get one AMI Name, got {instances_ami_names}.'
f' Assuming server version is {default_version}')
return default_version
instances_ami_name = list(instances_ami_names)[0]
extracted_version = re.findall(r'Demisto-(?:Circle-CI|MarketPlace)-Content-[\w-]+-([\d.]+)-[\d]{5}',
instances_ami_name)
if extracted_version:
server_numeric_version = extracted_version[0]
else:
server_numeric_version = default_version
# make sure version is three-part version
if server_numeric_version.count('.') == 1:
server_numeric_version += ".0"
print_color(f'Server version: {server_numeric_version}', LOG_COLORS.GREEN)
return server_numeric_version
def get_instances_ips_and_names(tests_settings):
if tests_settings.server:
return [tests_settings.server]
with open('./Tests/instance_ips.txt', 'r') as instance_file:
instance_ips = instance_file.readlines()
instance_ips = [line.strip('\n').split(":") for line in instance_ips]
return instance_ips
def get_test_records_of_given_test_names(tests_settings, tests_names_to_search):
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
test_records_with_supplied_names = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name and test_name in tests_names_to_search:
test_records_with_supplied_names.append(test_record)
return test_records_with_supplied_names
def execute_testing(tests_settings, server_ip, mockable_tests_names, unmockable_tests_names,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True):
server = SERVER_URL.format(server_ip)
server_numeric_version = tests_settings.serverNumericVersion
start_message = "Executing tests with the server {} - and the server ip {}".format(server, server_ip)
prints_manager.add_print_job(start_message, print, thread_index)
is_nightly = tests_settings.nightly
is_memory_check = tests_settings.memCheck
slack = tests_settings.slack
circle_ci = tests_settings.circleci
build_number = tests_settings.buildNumber
build_name = tests_settings.buildName
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
demisto_api_key = tests_settings.api_key
default_test_timeout = conf.get('testTimeout', 30)
tests = conf['tests']
skipped_tests_conf = conf['skipped_tests']
nightly_integrations = conf['nightly_integrations']
skipped_integrations_conf = conf['skipped_integrations']
unmockable_integrations = conf['unmockable_integrations']
secret_params = secret_conf['integrations'] if secret_conf else []
filtered_tests, is_filter_configured, run_all_tests = extract_filtered_tests(tests_settings.nightly)
if is_filter_configured and not run_all_tests:
is_nightly = True
if not tests or len(tests) == 0:
prints_manager.add_print_job('no integrations are configured for test', print, thread_index)
prints_manager.execute_thread_prints(thread_index)
return
# turn off telemetry
turn_off_telemetry(server, demisto_api_key)
proxy = None
if is_ami:
ami = AMIConnection(server_ip)
ami.clone_mock_data()
proxy = MITMProxy(server_ip)
failed_playbooks = []
succeed_playbooks = []
skipped_tests = set([])
skipped_integration = set([])
playbook_skipped_integration = set([])
disable_all_integrations(demisto_api_key, server, prints_manager, thread_index=thread_index)
prints_manager.execute_thread_prints(thread_index)
mockable_tests = get_test_records_of_given_test_names(tests_settings, mockable_tests_names)
unmockable_tests = get_test_records_of_given_test_names(tests_settings, unmockable_tests_names)
if is_nightly and is_memory_check:
mem_lim, err = get_docker_limit()
send_slack_message(slack, SLACK_MEM_CHANNEL_ID,
f'Build Number: {build_number}\n Server Address: {server}\nMemory Limit: {mem_lim}',
'Content CircleCI', 'False')
try:
# first run the mock tests to avoid mockless side effects in container
if is_ami and mockable_tests:
proxy.configure_proxy_in_demisto(demisto_api_key, server, proxy.ami.docker_ip + ':' + proxy.PROXY_PORT)
for t in mockable_tests:
run_test_scenario(tests_settings, t, proxy, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf, skipped_integration, is_nightly,
run_all_tests, is_filter_configured, filtered_tests,
skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server,
build_name, server_numeric_version, demisto_api_key, prints_manager,
thread_index=thread_index)
proxy.configure_proxy_in_demisto(demisto_api_key, server, '')
# reset containers after clearing the proxy server configuration
reset_containers(server, demisto_api_key, prints_manager, thread_index)
prints_manager.add_print_job("\nRunning mock-disabled tests", print, thread_index)
for t in unmockable_tests:
run_test_scenario(tests_settings, t, proxy, default_test_timeout, skipped_tests_conf, nightly_integrations,
skipped_integrations_conf, skipped_integration, is_nightly, run_all_tests,
is_filter_configured, filtered_tests, skipped_tests, secret_params, failed_playbooks,
playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server,
build_name, server_numeric_version, demisto_api_key,
prints_manager, thread_index, is_ami)
prints_manager.execute_thread_prints(thread_index)
except Exception as exc:
prints_manager.add_print_job(f'~~ Thread {thread_index + 1} failed ~~\n{str(exc)}\n{traceback.format_exc()}',
print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
failed_playbooks.append(f'~~ Thread {thread_index + 1} failed ~~')
raise
finally:
tests_data_keeper.add_tests_data(succeed_playbooks, failed_playbooks, skipped_tests,
skipped_integration, unmockable_integrations)
if is_ami:
tests_data_keeper.add_proxy_related_test_data(proxy)
if build_name == 'master':
updating_mocks_msg = "Pushing new/updated mock files to mock git repo."
prints_manager.add_print_job(updating_mocks_msg, print, thread_index)
ami.upload_mock_files(build_name, build_number)
if playbook_skipped_integration and build_name == 'master':
comment = 'The following integrations are skipped and critical for the test:\n {}'.\
format('\n- '.join(playbook_skipped_integration))
add_pr_comment(comment)
def get_unmockable_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
unmockable_integrations = conf['unmockable_integrations']
tests = conf['tests']
unmockable_tests = []
for test_record in tests:
test_name = test_record.get("playbookID")
integrations_used_in_test = get_used_integrations(test_record)
unmockable_integrations_used = [integration_name for integration_name in integrations_used_in_test if
integration_name in unmockable_integrations]
if test_name and (not integrations_used_in_test or unmockable_integrations_used):
unmockable_tests.append(test_name)
return unmockable_tests
def get_all_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
all_tests = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name:
all_tests.append(test_name)
return all_tests
def manage_tests(tests_settings):
"""
This function manages the execution of Demisto's tests.
Args:
tests_settings (TestsSettings): An object containing all the relevant data regarding how the tests should be ran
"""
tests_settings.serverNumericVersion = get_server_numeric_version(tests_settings.serverVersion,
tests_settings.is_local_run)
instances_ips = get_instances_ips_and_names(tests_settings)
is_nightly = tests_settings.nightly
number_of_instances = len(instances_ips)
prints_manager = ParallelPrintsManager(number_of_instances)
tests_data_keeper = TestsDataKeeper()
if tests_settings.server:
# If the user supplied a server - all tests will be done on that server.
server_ip = tests_settings.server
print_color("Starting tests for {}".format(server_ip), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(server_ip))
all_tests = get_all_tests(tests_settings)
mockable_tests = []
print(tests_settings.specific_tests_to_run)
unmockable_tests = tests_settings.specific_tests_to_run if tests_settings.specific_tests_to_run else all_tests
execute_testing(tests_settings, server_ip, mockable_tests, unmockable_tests, tests_data_keeper, prints_manager,
thread_index=0, is_ami=False)
elif tests_settings.isAMI:
# Running tests in AMI configuration.
# This is the way we run most tests, including running Circle for PRs and nightly.
if is_nightly:
# If the build is a nightly build, run tests in parallel.
test_allocation = get_tests_allocation_for_threads(number_of_instances, tests_settings.conf_path)
current_thread_index = 0
all_unmockable_tests_list = get_unmockable_tests(tests_settings)
threads_array = []
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion: # Only run tests for given AMI Role
current_instance = ami_instance_ip
tests_allocation_for_instance = test_allocation[current_thread_index]
unmockable_tests = [test for test in all_unmockable_tests_list
if test in tests_allocation_for_instance]
mockable_tests = [test for test in tests_allocation_for_instance if test not in unmockable_tests]
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
if number_of_instances == 1:
execute_testing(tests_settings, current_instance, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
else:
thread_kwargs = {
"tests_settings": tests_settings,
"server_ip": current_instance,
"mockable_tests_names": mockable_tests,
"unmockable_tests_names": unmockable_tests,
"thread_index": current_thread_index,
"prints_manager": prints_manager,
"tests_data_keeper": tests_data_keeper,
}
t = threading.Thread(target=execute_testing, kwargs=thread_kwargs)
threads_array.append(t)
t.start()
current_thread_index += 1
for t in threads_array:
t.join()
else:
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion:
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
all_tests = get_all_tests(tests_settings)
unmockable_tests = get_unmockable_tests(tests_settings)
mockable_tests = [test for test in all_tests if test not in unmockable_tests]
execute_testing(tests_settings, ami_instance_ip, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
sleep(8)
else:
# TODO: understand better when this occurs and what will be the settings
# This case is rare, and usually occurs on two cases:
# 1. When someone from Server wants to trigger a content build on their branch.
# 2. When someone from content wants to run tests on a specific build.
server_numeric_version = '99.99.98' # assume latest
print("Using server version: {} (assuming latest for non-ami)".format(server_numeric_version))
instance_ip = instances_ips[0][1]
all_tests = get_all_tests(tests_settings)
execute_testing(tests_settings, instance_ip, [], all_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=False)
print_test_summary(tests_data_keeper, tests_settings.isAMI)
create_result_files(tests_data_keeper)
if tests_data_keeper.failed_playbooks:
tests_failed_msg = "Some tests have failed. Not destroying instances."
print(tests_failed_msg)
sys.exit(1)
else:
file_path = "./Tests/is_build_passed_{}.txt".format(tests_settings.serverVersion.replace(' ', ''))
with open(file_path, "w") as is_build_passed_file:
is_build_passed_file.write('Build passed')
def add_pr_comment(comment):
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = '?q={}+repo:demisto/content+org:demisto+is:pr+is:open+head:{}+is:open'.format(sha1, branch_name)
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
print_warning('Add pull request comment failed: There is more then one open pull request for branch {}.'
.format(branch_name))
except Exception as e:
print_warning('Add pull request comment failed: {}'.format(e))
def handle_github_response(response):
res_dict = response.json()
if not res_dict.ok:
print_warning('Add pull request comment failed: {}'.
format(res_dict.get('message')))
return res_dict
def main():
print("Time is: {}\n\n\n".format(datetime.now()))
tests_settings = options_handler()
# should be removed after solving: https://github.com/demisto/etc/issues/21383
# -------------
if 'master' in tests_settings.serverVersion.lower():
print('[{}] sleeping for 30 secs'.format(datetime.now()))
sleep(45)
# -------------
manage_tests(tests_settings)
if __name__ == '__main__':
main()
|
server.py
|
import click
import questionary as q
import docker
import os
import time
import subprocess
from threading import Thread
from functools import wraps
from colorama import (Fore, Style)
from sqlalchemy.engine.url import make_url
from docker.client import DockerClient
from vantage6.common import (info, warning, error, debug as debug_msg,
check_config_write_permissions)
from vantage6.common.docker.addons import (
pull_if_newer, check_docker_running, remove_container_if_exists,
get_server_config_name
)
from vantage6.common.docker.network_manager import NetworkManager
from vantage6.common.globals import (
APPNAME,
STRING_ENCODING,
DEFAULT_DOCKER_REGISTRY,
DEFAULT_SERVER_IMAGE
)
from vantage6.cli.globals import (DEFAULT_SERVER_ENVIRONMENT,
DEFAULT_SERVER_SYSTEM_FOLDERS)
from vantage6.cli.context import ServerContext
from vantage6.cli.configuration_wizard import (
select_configuration_questionaire,
configuration_wizard
)
from vantage6.cli.utils import check_config_name_allowed
from vantage6.cli.rabbitmq.queue_manager import RabbitMQManager
from vantage6.cli import __version__
def click_insert_context(func):
# add option decorators
@click.option('-n', '--name', default=None,
help="name of the configuration you want to use.")
@click.option('-c', '--config', default=None,
help='absolute path to configuration-file; overrides NAME')
@click.option('-e', '--environment',
default=DEFAULT_SERVER_ENVIRONMENT,
help='configuration environment to use')
@click.option('--system', 'system_folders', flag_value=True)
@click.option('--user', 'system_folders', flag_value=False,
default=DEFAULT_SERVER_SYSTEM_FOLDERS)
@wraps(func)
def func_with_context(name, config, environment, system_folders,
*args, **kwargs):
# select configuration if none supplied
if config:
ctx = ServerContext.from_external_config_file(
config,
environment,
system_folders
)
else:
if name:
name, environment = (name, environment)
else:
try:
name, environment = select_configuration_questionaire(
"server", system_folders
)
except Exception:
error("No configurations could be found!")
exit(1)
# raise error if config could not be found
if not ServerContext.config_exists(
name,
environment,
system_folders
):
scope = "system" if system_folders else "user"
error(
f"Configuration {Fore.RED}{name}{Style.RESET_ALL} with "
f"{Fore.RED}{environment}{Style.RESET_ALL} does not exist "
f"in the {Fore.RED}{scope}{Style.RESET_ALL} folders!"
)
exit(1)
# create server context, and initialize db
ServerContext.LOGGING_ENABLED = False
ctx = ServerContext(
name,
environment=environment,
system_folders=system_folders
)
return func(ctx, *args, **kwargs)
return func_with_context
@click.group(name='server')
def cli_server():
"""Subcommand `vserver`."""
pass
#
# start
#
@cli_server.command(name='start')
@click.option('--ip', default=None, help='ip address to listen on')
@click.option('-p', '--port', default=None, type=int, help='port to listen on')
@click.option('-i', '--image', default=None, help="Server Docker image to use")
@click.option('--rabbitmq-image', default=None,
help="RabbitMQ docker image to use")
@click.option('--keep/--auto-remove', default=False,
help="Keep image after finishing")
@click.option('--mount-src', default='',
help="mount vantage6-master package source")
@click.option('--attach/--detach', default=False,
help="Attach server logs to the console after start")
@click_insert_context
def cli_server_start(ctx, ip, port, image, rabbitmq_image, keep, mount_src,
attach):
"""Start the server."""
info("Starting server...")
info("Finding Docker daemon.")
docker_client = docker.from_env()
# will print an error if not
check_docker_running()
# check if name is allowed for docker volume, else exit
check_config_name_allowed(ctx.name)
# check that this server is not already running
running_servers = docker_client.containers.list(
filters={"label": f"{APPNAME}-type=server"})
for server in running_servers:
if server.name == f"{APPNAME}-{ctx.name}-{ctx.scope}-server":
error(f"Server {Fore.RED}{ctx.name}{Style.RESET_ALL} "
"is already running")
exit(1)
# Determine image-name. First we check if the option --image has been used.
# Then we check if the image has been specified in the config file, and
# finally we use the default settings from the package.
if image is None:
image = ctx.config.get(
"image",
f"{DEFAULT_DOCKER_REGISTRY}/{DEFAULT_SERVER_IMAGE}"
)
info(f"Pulling latest server image '{image}'.")
try:
pull_if_newer(docker.from_env(), image)
# docker_client.images.pull(image)
except Exception:
warning("... alas, no dice!")
else:
info(" ... success!")
info("Creating mounts")
config_file = "/mnt/config.yaml"
mounts = [
docker.types.Mount(
config_file, str(ctx.config_file), type="bind"
)
]
if mount_src:
mount_src = os.path.abspath(mount_src)
mounts.append(docker.types.Mount("/vantage6", mount_src, type="bind"))
# FIXME: code duplication with cli_server_import()
# try to mount database
uri = ctx.config['uri']
url = make_url(uri)
environment_vars = None
# If host is None, we're dealing with a file-based DB, like SQLite
if (url.host is None):
db_path = url.database
if not os.path.isabs(db_path):
# We're dealing with a relative path here -> make it absolute
db_path = ctx.data_dir / url.database
basename = os.path.basename(db_path)
dirname = os.path.dirname(db_path)
os.makedirs(dirname, exist_ok=True)
# we're mounting the entire folder that contains the database
mounts.append(docker.types.Mount(
"/mnt/database/", dirname, type="bind"
))
environment_vars = {
"VANTAGE6_DB_URI": f"sqlite:////mnt/database/{basename}",
"VANTAGE6_CONFIG_NAME": ctx.config_file_name
}
else:
warning(f"Database could not be transfered, make sure {url.host} "
"is reachable from the Docker container")
info("Consider using the docker-compose method to start a server")
# Create a docker network for the server and other services like RabbitMQ
# to reside in
server_network_mgr = NetworkManager(
network_name=f"{APPNAME}-{ctx.name}-{ctx.scope}-network"
)
server_network_mgr.create_network(is_internal=False)
# Note that ctx.data_dir has been created at this point, which is required
# for putting some RabbitMQ configuration files inside
info('Starting RabbitMQ container')
_start_rabbitmq(ctx, rabbitmq_image, server_network_mgr)
# The `ip` and `port` refer here to the ip and port within the container.
# So we do not really care that is it listening on all interfaces.
internal_port = 5000
cmd = (
f'uwsgi --http :{internal_port} --gevent 1000 --http-websockets '
'--master --callable app --disable-logging '
'--wsgi-file /vantage6/vantage6-server/vantage6/server/wsgi.py '
f'--pyargv {config_file}'
)
info(cmd)
info("Run Docker container")
port_ = str(port or ctx.config["port"] or 5000)
container = docker_client.containers.run(
image,
command=cmd,
mounts=mounts,
detach=True,
labels={
f"{APPNAME}-type": "server",
"name": ctx.config_file_name
},
environment=environment_vars,
ports={f"{internal_port}/tcp": (ip, port_)},
name=ctx.docker_container_name,
auto_remove=not keep,
tty=True,
network=server_network_mgr.network_name
)
info(f"Success! container id = {container}")
if attach:
logs = container.attach(stream=True, logs=True, stdout=True)
Thread(target=print_log_worker, args=(logs,), daemon=True).start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
info("Closing log file. Keyboard Interrupt.")
exit(0)
def _start_rabbitmq(ctx: ServerContext, rabbitmq_image: str,
network_mgr: NetworkManager) -> None:
""" Starts a RabbitMQ container """
if not ctx.config.get('rabbitmq'):
warning('Message queue disabled! This means that the server '
'application cannot scale horizontally!')
else:
# kick off RabbitMQ container
rabbit_mgr = RabbitMQManager(
ctx=ctx, network_mgr=network_mgr, image=rabbitmq_image)
rabbit_mgr.start()
#
# list
#
@cli_server.command(name='list')
def cli_server_configuration_list():
"""Print the available configurations."""
client = docker.from_env()
check_docker_running()
running_server = client.containers.list(
filters={"label": f"{APPNAME}-type=server"})
running_node_names = []
for node in running_server:
running_node_names.append(node.name)
header = \
"\nName"+(21*" ") + \
"Environments"+(20*" ") + \
"Status"+(10*" ") + \
"System/User"
click.echo(header)
click.echo("-"*len(header))
running = Fore.GREEN + "Online" + Style.RESET_ALL
stopped = Fore.RED + "Offline" + Style.RESET_ALL
# system folders
configs, f1 = ServerContext.available_configurations(
system_folders=True)
for config in configs:
status = running if f"{APPNAME}-{config.name}-system-server" in \
running_node_names else stopped
click.echo(
f"{config.name:25}"
f"{str(config.available_environments):32}"
f"{status:25} System "
)
# user folders
configs, f2 = ServerContext.available_configurations(
system_folders=False)
for config in configs:
status = running if f"{APPNAME}-{config.name}-user-server" in \
running_node_names else stopped
click.echo(
f"{config.name:25}"
f"{str(config.available_environments):32}"
f"{status:25} User "
)
click.echo("-"*85)
if len(f1)+len(f2):
warning(
f"{Fore.RED}Failed imports: {len(f1)+len(f2)}{Style.RESET_ALL}")
#
# files
#
@cli_server.command(name='files')
@click_insert_context
def cli_server_files(ctx):
"""List files locations of a server instance."""
info(f"Configuration file = {ctx.config_file}")
info(f"Log file = {ctx.log_file}")
info(f"Database = {ctx.get_database_uri()}")
#
# new
#
@cli_server.command(name='new')
@click.option('-n', '--name', default=None,
help="name of the configutation you want to use.")
@click.option('-e', '--environment', default=DEFAULT_SERVER_ENVIRONMENT,
help='configuration environment to use')
@click.option('--system', 'system_folders', flag_value=True)
@click.option('--user', 'system_folders', flag_value=False,
default=DEFAULT_SERVER_SYSTEM_FOLDERS)
def cli_server_new(name, environment, system_folders):
"""Create new configuration."""
if not name:
name = q.text("Please enter a configuration-name:").ask()
name_new = name.replace(" ", "-")
if name != name_new:
info(f"Replaced spaces from configuration name: {name}")
name = name_new
# check if name is allowed for docker volume, else exit
check_config_name_allowed(name)
# check that this config does not exist
try:
if ServerContext.config_exists(name, environment, system_folders):
error(
f"Configuration {Fore.RED}{name}{Style.RESET_ALL} with "
f"environment {Fore.RED}{environment}{Style.RESET_ALL} "
f"already exists!"
)
exit(1)
except Exception as e:
print(e)
exit(1)
# Check that we can write in this folder
if not check_config_write_permissions(system_folders):
error("Your user does not have write access to all folders. Exiting")
info(f"Create a new server using '{Fore.GREEN}vserver new "
"--user{Style.RESET_ALL}' instead!")
exit(1)
# create config in ctx location
cfg_file = configuration_wizard(
"server",
name,
environment=environment,
system_folders=system_folders
)
info(f"New configuration created: {Fore.GREEN}{cfg_file}{Style.RESET_ALL}")
# info(f"root user created.")
flag = "" if system_folders else "--user"
info(
f"You can start the server by running "
f"{Fore.GREEN}vserver start {flag}{Style.RESET_ALL}"
)
#
# import
#
# TODO this method has a lot of duplicated code from `start`
@cli_server.command(name='import')
@click.argument('file_', type=click.Path(exists=True))
@click.option('--drop-all', is_flag=True, default=False)
@click.option('-i', '--image', default=None, help="Node Docker image to use")
@click.option('--keep/--auto-remove', default=False,
help="Keep image after finishing")
@click_insert_context
def cli_server_import(ctx, file_, drop_all, image, keep):
""" Import organizations/collaborations/users and tasks.
Especially useful for testing purposes.
"""
info("Starting server...")
info("Finding Docker daemon.")
docker_client = docker.from_env()
# will print an error if not
check_docker_running()
# check if name is allowed for docker volume, else exit
check_config_name_allowed(ctx.name)
# pull latest Docker image
if image is None:
image = ctx.config.get(
"image",
f"{DEFAULT_DOCKER_REGISTRY}/{DEFAULT_SERVER_IMAGE}"
)
info(f"Pulling latest server image '{image}'.")
try:
docker_client.images.pull(image)
except Exception:
warning("... alas, no dice!")
else:
info(" ... success!")
info("Creating mounts")
mounts = [
docker.types.Mount(
"/mnt/config.yaml", str(ctx.config_file), type="bind"
),
docker.types.Mount(
"/mnt/import.yaml", str(file_), type="bind"
)
]
# FIXME: code duplication with cli_server_start()
# try to mount database
uri = ctx.config['uri']
url = make_url(uri)
environment_vars = None
# If host is None, we're dealing with a file-based DB, like SQLite
if (url.host is None):
db_path = url.database
if not os.path.isabs(db_path):
# We're dealing with a relative path here -> make it absolute
db_path = ctx.data_dir / url.database
basename = os.path.basename(db_path)
dirname = os.path.dirname(db_path)
os.makedirs(dirname, exist_ok=True)
# we're mounting the entire folder that contains the database
mounts.append(docker.types.Mount(
"/mnt/database/", dirname, type="bind"
))
environment_vars = {
"VANTAGE6_DB_URI": f"sqlite:////mnt/database/{basename}"
}
else:
warning(f"Database could not be transfered, make sure {url.host} "
"is reachable from the Docker container")
info("Consider using the docker-compose method to start a server")
drop_all_ = "--drop-all" if drop_all else ""
cmd = f'vserver-local import -c /mnt/config.yaml -e {ctx.environment} ' \
f'{drop_all_} /mnt/import.yaml'
info(cmd)
info("Run Docker container")
container = docker_client.containers.run(
image,
command=cmd,
mounts=mounts,
detach=True,
labels={
f"{APPNAME}-type": "server",
"name": ctx.config_file_name
},
environment=environment_vars,
auto_remove=not keep,
tty=True
)
logs = container.logs(stream=True, stdout=True)
Thread(target=print_log_worker, args=(logs,), daemon=False).start()
info(f"Success! container id = {container.id}")
# print_log_worker(container.logs(stream=True))
# for log in container.logs(stream=True):
# print(log.decode("utf-8"))
# info(f"Check logs files using {Fore.GREEN}docker logs {container.id}"
# f"{Style.RESET_ALL}")
# info("Reading yaml file.")
# with open(file_) as f:
# entities = yaml.safe_load(f.read())
# info("Adding entities to database.")
# fixture.load(entities, drop_all=drop_all)
#
# shell
#
@cli_server.command(name='shell')
@click_insert_context
def cli_server_shell(ctx):
""" Run a iPython shell. """
docker_client = docker.from_env()
# will print an error if not
check_docker_running()
running_servers = docker_client.containers.list(
filters={"label": f"{APPNAME}-type=server"})
if ctx.docker_container_name not in [s.name for s in running_servers]:
error(f"Server {Fore.RED}{ctx.name}{Style.RESET_ALL} is not running?")
return
try:
subprocess.run(['docker', 'exec', '-it', ctx.docker_container_name,
'vserver-local', 'shell', '-c', '/mnt/config.yaml'])
except Exception as e:
info("Failed to start subprocess...")
debug_msg(e)
#
# stop
#
@cli_server.command(name='stop')
@click.option("-n", "--name", default=None, help="Configuration name")
@click.option('--system', 'system_folders', flag_value=True)
@click.option('--user', 'system_folders', flag_value=False,
default=DEFAULT_SERVER_SYSTEM_FOLDERS)
@click.option('--all', 'all_servers', flag_value=True, help="Stop all servers")
def cli_server_stop(name, system_folders, all_servers):
""" Stop a running server """
client = docker.from_env()
check_docker_running()
running_servers = client.containers.list(
filters={"label": f"{APPNAME}-type=server"})
if not running_servers:
warning("No servers are currently running.")
return
running_server_names = [server.name for server in running_servers]
if all_servers:
for container_name in running_server_names:
_stop_server_containers(client, container_name, system_folders)
else:
if not name:
container_name = q.select("Select the server you wish to stop:",
choices=running_server_names).ask()
else:
post_fix = "system" if system_folders else "user"
container_name = f"{APPNAME}-{name}-{post_fix}-server"
if container_name in running_server_names:
_stop_server_containers(client, container_name, system_folders)
else:
error(f"{Fore.RED}{name}{Style.RESET_ALL} is not running!")
def _stop_server_containers(client: DockerClient, container_name: str,
system_folders: bool) -> None:
"""
Given a server's name, kill its docker container and related (RabbitMQ)
containers.
"""
# kill the server
container = client.containers.get(container_name)
container.kill()
info(f"Stopped the {Fore.GREEN}{container_name}{Style.RESET_ALL} server.")
# find the configuration name from the docker container name
# server name is formatted as f"{APPNAME}-{self.name}-{self.scope}-server"
scope = "system" if system_folders else "user"
config_name = get_server_config_name(container_name, scope)
# kill the RabbitMQ container (if it exists)
rabbit_container_name = f'{APPNAME}-{config_name}-rabbitmq'
remove_container_if_exists(client, name=rabbit_container_name)
info(f"Stopped the {Fore.GREEN}{rabbit_container_name}{Style.RESET_ALL} "
"container.")
#
# attach
#
@cli_server.command(name='attach')
@click.option("-n", "--name", default=None, help="configuration name")
@click.option('--system', 'system_folders', flag_value=True)
@click.option('--user', 'system_folders', flag_value=False,
default=DEFAULT_SERVER_SYSTEM_FOLDERS)
def cli_server_attach(name, system_folders):
"""Attach the logs from the docker container to the terminal."""
client = docker.from_env()
check_docker_running()
running_servers = client.containers.list(
filters={"label": f"{APPNAME}-type=server"})
running_server_names = [node.name for node in running_servers]
if not name:
name = q.select("Select the server you wish to inspect:",
choices=running_server_names).ask()
else:
post_fix = "system" if system_folders else "user"
name = f"{APPNAME}-{name}-{post_fix}-server"
if name in running_server_names:
container = client.containers.get(name)
logs = container.attach(stream=True, logs=True, stdout=True)
Thread(target=print_log_worker, args=(logs,), daemon=True).start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
info("Closing log file. Keyboard Interrupt.")
exit(0)
else:
error(f"{Fore.RED}{name}{Style.RESET_ALL} was not running!?")
#
# version
#
@cli_server.command(name='version')
@click.option("-n", "--name", default=None, help="configuration name")
@click.option('--system', 'system_folders', flag_value=True)
@click.option('--user', 'system_folders', flag_value=False,
default=DEFAULT_SERVER_SYSTEM_FOLDERS)
def cli_server_version(name, system_folders):
"""Returns current version of vantage6 services installed."""
client = docker.from_env()
check_docker_running()
running_servers = client.containers.list(
filters={"label": f"{APPNAME}-type=server"})
running_server_names = [server.name for server in running_servers]
if not name:
if not running_server_names:
error("No servers are running! You can only check the version for "
"servers that are running")
exit(1)
name = q.select("Select the server you wish to inspect:",
choices=running_server_names).ask()
else:
post_fix = "system" if system_folders else "user"
name = f"{APPNAME}-{name}-{post_fix}"
if name in running_server_names:
container = client.containers.get(name)
version = container.exec_run(cmd='vserver-local version',
stdout=True)
click.echo({"server": version.output.decode('utf-8'),
"cli": __version__})
else:
error(f"Server {name} is not running! Cannot provide version...")
def print_log_worker(logs_stream):
for log in logs_stream:
print(log.decode(STRING_ENCODING), end="")
|
flocklab_serial.py
|
#! /usr/bin/env python3
"""
Copyright (c) 2020, ETH Zurich, Computer Engineering Group
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys, getopt, signal, socket, time, subprocess, errno, queue, serial, select, multiprocessing, threading, traceback, struct
import lib.daemon as daemon
import lib.flocklab as flocklab
### Global variables ###
pidfile = None
config = None
isdaemon = False
proc_list = [] # List with all running processes
dbbuf_proc = [] # Dbbuf process
msgQueueDbBuf = None # Queue used to send data to the DB buffer
##############################################################################
#
# Usage
#
##############################################################################
def usage():
print("Usage: %s --output=<string> [--port=<string>] [--baudrate=<int>] [--socketport=<int>] [--stop] [--daemon] [--debug] [--help]" %sys.argv[0])
print("Options:")
print(" --output=<string>\t\tOutput filename.")
print(" --port=<string>\t\tOptional. Port over which serial communication is done. Default is serial.")
print("\t\t\t\tPossible values are: %s" % (str(flocklab.tg_port_types)))
print(" --baudrate=<int>\t\tOptional. Baudrate of serial device. Default is 115200.")
print("\t\t\t\tPossible values are: %s" % (" ".join([str(x) for x in flocklab.tg_baud_rates])))
print(" --socketport=<int>\t\tOptional. If set, a server socket will be created on the specified port.")
print(" --stop\t\t\tOptional. Causes the program to stop a possibly running instance of the serial reader service.")
print(" --daemon\t\t\tOptional. If set, program will run as a daemon. If not specified, all output will be written to STDOUT and STDERR.")
print(" --debug\t\t\tOptional. Print debug messages to log.")
print(" --help\t\t\tOptional. Print this help.")
return(0)
### END usage()
##############################################################################
#
# sigterm_handler
#
##############################################################################
def sigterm_handler(signum, frame):
"""If the program is terminated by sending it the signal SIGTERM
(e.g. by executing 'kill') or SIGINT (pressing ctrl-c),
this signal handler is invoked for cleanup."""
flocklab.log_info("Main process received SIGTERM signal")
# Close serial forwarder object:
retval = stop_on_sig(flocklab.SUCCESS)
sys.exit(retval)
### END sigterm_handler()
##############################################################################
#
# ServerSockets class
#
##############################################################################
class ServerSockets():
def __init__(self, port):
self.sock = None
self.sock_host = ''
self.sock_port = port
self.sock_rx_waittime = 5.0
self.sock_rx_bufsize = 4096
self.sock_listen_timeout = 0.2
self.connection = None
self.address = None
def start(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.sock_host, self.sock_port))
self.sock.settimeout(self.sock_listen_timeout)
flocklab.log_info("Started socket %s:%d" % (self.sock_host, self.sock_port))
except:
self.sock = None
flocklab.log_error("Encountered error: %s, %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
def stop(self):
if self.sock != None:
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
flocklab.log_info("Stopped socket %s:%d" % (self.sock_host, self.sock_port))
except:
flocklab.log_error("Could not stop socket %s:%d due to error: %s, %s" % (self.sock_host, self.sock_port, str(sys.exc_info()[0]), str(sys.exc_info()[1])))
finally:
self.connection = None
self.address = None
self.sock = None
def waitForClient(self):
if self.sock != None:
# flocklab.log_info("Waiting for clients on socket %s:%d" % (self.sock_host, self.sock_port))
try:
self.sock.listen(1)
self.connection, self.address = self.sock.accept()
self.connection.setblocking(0)
self.connection.settimeout(self.sock_rx_waittime)
flocklab.log_info("Client %s:%d connected to socket %s:%d" % (self.address[0], self.address[1], self.sock_host, self.sock_port))
except socket.timeout:
self.connection = None
return self.connection
else:
raise socket.error
def disconnectClient(self):
if self.connection != None:
flocklab.log_info("Disconnect client %s:%d from socket %s:%d" % (self.address[0], self.address[1], self.sock_host, self.sock_port))
self.connection.close()
self.connection = None
self.address = None
def send(self, data):
if self.connection != None:
return self.connection.send(data)
else:
raise socket.error
def recv(self, bufsize=None):
if ((self.sock != None) and (self.connection != None)):
if bufsize == None:
bufsize = self.sock_rx_bufsize
return self.connection.recv(bufsize)
else:
raise socket.error
def isRunning(self):
if self.sock != None:
return True
return False
def clientConnected(self):
if self.connection != None:
return True
return False
### END ServerSockets()
##############################################################################
#
# SerialForwarder class
#
##############################################################################
class SerialForwarder():
def __init__(self, slotnr, serialdev, baudrate):
self.ser = serial.Serial()
self.ser.port = serialdev
self.ser.baudrate = baudrate
self.num_elements_rcv = 0
self.num_elements_snd = 0
# If it breaks try the below
#self.serConf() # Uncomment lines here till it works
self.addr = None
def cmd(self, cmd_str):
self.ser.write(cmd_str + "\n")
sleep(0.5)
return self.ser.readline()
def serConf(self):
self.ser.baudrate = baudrate
self.ser.bytesize = serial.EIGHTBITS
self.ser.parity = serial.PARITY_NONE
self.ser.stopbits = serial.STOPBITS_ONE
self.ser.timeout = 0 # Non-Block reading
self.ser.xonxoff = False # Disable Software Flow Control
self.ser.rtscts = False # Disable (RTS/CTS) flow Control
self.ser.dsrdtr = False # Disable (DSR/DTR) flow Control
self.ser.writeTimeout = 2
def open(self):
try:
self.ser.open()
flocklab.log_info("SerialForwarder started for device %s with baudrate %d" % (self.ser.port, self.ser.baudrate))
except(Exception) as err:
flocklab.log_error("SerialForwarder could not start because: %s" % (str(sys.exc_info()[1])))
return None
flocklab.log_info("SerialForwarder opened.")
def close(self):
self.ser.close()
flocklab.log_info("SerialForwarder stopped")
def isRunning(self):
if self.ser.is_open:
return True
return False
def read(self):
ret = None
# Get data from serialdump:
try:
data = self.ser.readline()
if (data != ''):
# Useful data was retrieved, insert it into queue:
timestamp = time.time()
self.num_elements_rcv = self.num_elements_rcv +1
ret = [data, timestamp]
except(select.error) as err:
if (err.errno == 4):
flocklab.log_info("SerialForwarder interrupted due to caught stop signal.")
except:
flocklab.log_error("SerialForwarder encountered error: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
return ret
def write(self, data):
try:
rs = self.ser.write(data)
if rs != len(data):
flocklab.log_error("SerialForwarder error while writing: no of bytes written (%d) != no of bytes in data (%d)." %str(rs, len(data)))
self.num_elements_snd = self.num_elements_snd + 1
except(socket.error) as err:
flocklab.log_error("SerialForwarder error while writing to serial forwarder: %s" %str(err))
self.close()
except:
flocklab.log_error("SerialForwarder encountered error: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
return None
### END SerialForwarder
##############################################################################
#
# ThreadSerialReader thread
#
##############################################################################
def ThreadSerialReader(sf, msgQueueDbBuf, msgQueueSockBuf, stopLock):
data = ''
timestamp = time.time()
sf_err_back_init = 0.5 # Initial time to wait after error on opening serial port
sf_err_back_step = 0.5 # Time to increase backoff time to wait after error on opening serial port
sf_err_back_max = 5.0 # Maximum backoff time to wait after error on opening serial port
sf_err_backoff = sf_err_back_init # Time to wait after error on opening serial port
flocklab.log_info("ThreadSerialReader started.")
while stopLock.acquire(False):
stopLock.release()
if not sf.isRunning():
rs = sf.open()
if rs == None:
# There was an error opening the serial device. Wait some time before trying again:
time.sleep(sf_err_backoff)
# Increase backoff time to wait
sf_err_backoff = sf_err_backoff + sf_err_back_step
if sf_err_backoff > sf_err_back_max:
sf_err_backoff = sf_err_back_max
else:
sf_err_backoff = sf_err_back_init
data = ''
timestamp = 0
if sf.isRunning():
# Read data:
try:
[data, timestamp] = sf.read()
if data != None:
# Data has been received.
if len(data) > 0:
try:
# Data is put directly onto the buffer queue for the socket:
msgQueueSockBuf.put(data, False)
# Data is wirtten directly into the DB bufferr queue:
msgQueueDbBuf.put([0,data,timestamp], False)
#flocklab.log_debug("[0,%s,%s]" %(str(data), str(timestamp)))
except queue.Full:
flocklab.log_error("Queue msgQueueSockBuf full in ThreadSerialReader, dropping data.")
except:
flocklab.log_error("ThreadSerialReader could not insert data into queues because: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
data = ''
timestamp = lastTimestamp = 0
except:
flocklab.log_error("ThreadSerialReader encountered error: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
sf.close()
# Stop thread:
flocklab.log_error("ThreadSerialReader stopping...")
if sf.isRunning():
sf.close()
flocklab.log_error("ThreadSerialReader stopped.")
### END ThreadSerialReader()
##############################################################################
#
# ThreadSocketProxy thread
#
##############################################################################
def ThreadSocketProxy(msgQueueSockBuf, ServerSock, sf, msgQueueDbBuf, stopLock):
poll_timeout = 1000
READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR
READ_WRITE = READ_ONLY | select.POLLOUT
message_queues = {}
connection = None
fd_to_socket = {}
try:
flocklab.log_info("ThreadSocketProxy started")
# Initialize poller:
poller = select.poll()
poller.register(msgQueueSockBuf._reader, READ_ONLY)
fd_to_socket[msgQueueSockBuf._reader.fileno()] = msgQueueSockBuf
# Let thread run until stopLock is acquired:
while stopLock.acquire(False):
stopLock.release()
try:
if not ServerSock.isRunning():
ServerSock.start()
if not ServerSock.clientConnected():
connection = ServerSock.waitForClient()
if ServerSock.clientConnected():
fd_to_socket[connection.fileno()] = connection
poller.register(connection, READ_ONLY)
# Wait for data:
# drop data if client is not connected
events = poller.poll(poll_timeout)
for fd, flag in events:
# Retrieve the actual socket from its file descriptor
s = fd_to_socket[fd]
# Handle inputs
if flag & (select.POLLIN | select.POLLPRI):
if s is connection:
data = ServerSock.recv()
timestamp = time.time()
#flocklab.log_debug("---> Received data from socket: %s: >%s<" % (str(timestamp), str(data)))
if data == '':
# That can only mean that the socket has been closed.
poller.unregister(s)
if ServerSock.isRunning():
ServerSock.disconnectClient()
continue
# Send received data to serial forwarder and the DB buffer
if not sf.isRunning():
sf.close()
sf.open()
if sf.isRunning():
sf.write(data)
#flocklab.log_debug("<--- Wrote data to SF: >%s<" % (str(data)))
# Signal with 1, that data is from writer (use 0 for reader):
try:
dataSanList = data.replace(b'\r', b'').split(b'\n')
for i, dataSan in enumerate(dataSanList):
ts = timestamp + i * 0.000001 # with sligthly different timestamps we make sure that ordering is preserved
if(len(dataSan) > 0):
msgQueueDbBuf.put([1, dataSan, ts], False)
except queue.Full:
flocklab.log_error("Queue msgQueueDbBuf full in ThreadSocketProxy, dropping data.")
except Exception:
flocklab.log_error("An error occurred, serial data dropped (%s, %s)." % (str(sys.exc_info()[1]), traceback.format_exc()))
elif s is msgQueueSockBuf:
# Retrieve element from queue:
item = msgQueueSockBuf.get()
# Forward element to socket:
if ((ServerSock.isRunning()) and (ServerSock.clientConnected())):
try:
rs = ServerSock.send(item)
#flocklab.log_debug("<--- Sent data to socket (rs: %s, len: %d)" % (str(rs), len(item)))
if rs != len(item):
raise socket.error
except(socket.error) as err:
flocklab.log_warning("ThreadSocketProxy could not send data to socket because of encountered error: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
poller.unregister(connection)
if ServerSock.clientConnected():
ServerSock.disconnectClient()
continue
elif flag & select.POLLHUP:
# The POLLHUP flag indicates a client that "hung up" the connection without closing it cleanly.
if ((s is connection) and (ServerSock.isRunning())):
poller.unregister(s)
if ServerSock.clientConnected():
ServerSock.disconnectClient()
continue
elif flag & select.POLLERR:
if ((s is connection) and (ServerSock.isRunning())):
poller.unregister(s)
ServerSock.disconnectClient()
ServerSock.stop()
continue
except:
flocklab.log_error("ThreadSocketProxy encountered error: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
# Stop the thread
flocklab.log_debug("ThreadSocketProxy stopping...")
try:
if ServerSock.isRunning():
ServerSock.disconnectClient()
ServerSock.stop()
except:
flocklab.log_error("Error in ServerSock.disconnectClient(): %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
except:
flocklab.log_error("ThreadSocketProxy encountered error: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
flocklab.log_info("ThreadSocketProxy stopped.")
### END ThreadSocketProxy()
##############################################################################
#
# ProcDbBuf
#
##############################################################################
def ProcDbBuf(msgQueueDbBuf, stopLock, resultsfile):
_num_elements = 0
_dbfile = None
_dbfile_creation_time = 0
_dbflushinterval = 300
_obsresfolder = resultsfile
def _get_db_file_name():
return "%s/serial_%s.db" % (_obsresfolder, time.strftime("%Y%m%d%H%M%S", time.gmtime()))
try:
flocklab.log_info("ProcDbBuf started")
# set lower priority
os.nice(1)
# Let process run until stoplock is acquired:
while stopLock.acquire(False):
stopLock.release()
try:
# Wait for data in the queue:
_waittime = _dbfile_creation_time + _dbflushinterval - time.time()
if _waittime <= 0:
if _dbfile is not None:
_dbfile.close()
flocklab.log_info("ProcDbBuf closed dbfile %s" % _dbfilename)
_dbfilename = _get_db_file_name()
_dbfile = open(_dbfilename, "wb+")
_dbfile_creation_time = time.time()
_waittime = _dbflushinterval
flocklab.log_info("ProcDbBuf opened dbfile %s" % _dbfilename)
_service, _data, _ts = msgQueueDbBuf.get(True, _waittime)
try:
_len = len(_data)
except:
continue
if _len > 0:
_ts_sec = int(_ts)
# Write to dbfile:
if _dbfile is None:
_dbfilename = _get_db_file_name()
_dbfile = open(_dbfilename, "wb+")
_dbfile_creation_time = time.time()
flocklab.log_info("ProcDbBuf opened dbfile %s" % _dbfilename)
#why Illl and why _len + 12, in decode iii is used..?
#flocklab.log_debug("SERVICE: %s - DATA: %s" % (str(_service), str(_data)))
packet = struct.pack("<Illl%ds" % _len,_len + 12, _service, _ts_sec, int((_ts - _ts_sec) * 1e6), _data)
_dbfile.write(packet)
_num_elements = _num_elements + 1
except queue.Empty:
continue
except(IOError) as err:
if (err.errno == 4):
flocklab.log_info("ProcDbBuf interrupted due to caught stop signal.")
continue
else:
raise
# Stop the process
flocklab.log_debug("ProcDbBuf stopping... %d elements received " % _num_elements)
except KeyboardInterrupt:
pass
except:
flocklab.log_error("ProcDbBuf encountered error: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
# flush dbfile and errorfile
try:
if _dbfile is not None:
_dbfile.close()
flocklab.log_debug("ProcDbBuf closed dbfile %s" % _dbfilename)
flocklab.log_info("ProcDbBuf stopped.")
except:
flocklab.log_error("ProcDbBuf encountered error: %s: %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
### END ProcDbBuf
##############################################################################
#
# stop_on_sig
#
##############################################################################
def stop_on_sig(ret_val=flocklab.SUCCESS):
"""Stop all serial forwarder threads and the output socket
and exit the application.
Arguments:
ret_val: Return value to exit the program with.
"""
global proc_list
# Close all threads:
flocklab.log_debug("Closing %d processes/threads..." % len(proc_list))
for (proc,stopLock) in proc_list:
try:
stopLock.acquire()
except:
flocklab.log_error("Could not acquire stop lock for process/thread.")
flocklab.log_debug("Joining %d processes/threads..." % len(proc_list))
for (proc,stopLock) in proc_list:
try:
proc.join(10)
except:
flocklab.log_warning("Could not stop process/thread.")
if proc.is_alive():
flocklab.log_error("Could not stop process/thread.")
# Stop dbbuf process:
flocklab.log_debug("Closing ProcDbBuf process...")
try:
dbbuf_proc[1].acquire()
except:
flocklab.log_error("Could not acquire stoplock for ProcDbBuf process.")
# Send some dummy data to the queue of the DB buffer to wake it up:
msgQueueDbBuf.put([None, None, None])
flocklab.log_debug("Joining ProcDbBuf process...")
try:
dbbuf_proc[0].join(30)
except:
flocklab.log_error("Could not stop ProcDbBuf process.")
if dbbuf_proc and dbbuf_proc[0].is_alive():
flocklab.log_error("Could not stop ProcDbBuf process.")
# Remove the PID file if it exists:
if os.path.exists(pidfile):
try:
os.remove(pidfile)
except:
flocklab.log_warning("Could not remove pid file.")
flocklab.log_info("FlockLab serial service stopped.")
return ret_val
### END stop_on_sig()
##############################################################################
#
# stop_on_api
#
##############################################################################
def stop_on_api():
"""Stop all already running serial reader processes
"""
# Get PID of running serial reader (if any) from pidfile and send it the terminate signal.
try:
pid = int(open(pidfile, 'r').read())
# Signal the process to stop:
if (pid > 0):
flocklab.log_info("Sending SIGTERM signal to process %d" %pid)
try:
os.kill(pid, signal.SIGTERM)
except OSError:
os.remove(pidfile)
raise
try:
os.waitpid(pid, 0)
except OSError:
pass # can occur, no need to print a warning
return flocklab.SUCCESS
except (IOError, OSError):
# The pid file was most probably not present. This can have two causes:
# 1) The serial reader service is not running.
# 2) The serial reader service did not shut down correctly the last time.
# As consequence, try to kill all remaining serial reader servce threads (handles 1)) and if that
# was not successful (meaning cause 2) takes effect), return ENOPKG.
try:
patterns = [os.path.basename(__file__),]
ownpid = str(os.getpid())
for pattern in patterns:
p = subprocess.Popen(['pgrep', '-f', pattern], stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(None)
if (out != None):
for pid in out.split('\n'):
if ((pid != '') and (pid != ownpid)):
flocklab.log_info("Trying to kill process %s" %pid)
os.kill(int(pid), signal.SIGKILL)
return flocklab.SUCCESS
return errno.ENOPKG
except (OSError, ValueError):
flocklab.log_error("Error while trying to kill serial service threads: %s, %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
return errno.EINVAL
### END stop_on_api()
##############################################################################
#
# Main
#
##############################################################################
def main(argv):
global proc_list
global isdaemon
global dbbuf_proc
global pidfile
global config
global msgQueueDbBuf
debug = False
port = 'serial' # Standard port. Can be overwritten by the user.
serialdev = None
baudrate = 115200 # Standard baudrate. Can be overwritten by the user.
slotnr = None
socketport = None
output = None
stop = False
# Get config:
config = flocklab.get_config()
if not config:
flocklab.error_logandexit("Could not read configuration file.")
# Get command line parameters.
try:
opts, args = getopt.getopt(argv, "ehqdt:p:m:b:o:l:", ["stop", "help", "daemon", "debug", "port=", "baudrate=", "output=", "socketport="])
except(getopt.GetoptError) as err:
flocklab.error_logandexit(str(err), errno.EINVAL)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(flocklab.SUCCESS)
elif opt in ("-d", "--debug"):
debug = True
elif opt in ("-q", "--daemon"):
isdaemon = True
elif opt in ("-e", "--stop"):
stop = True
elif opt in ("-b", "--baudrate"):
if int(arg) not in flocklab.tg_baud_rates:
flocklab.error_logandexit("Baudrate not valid. Check help for possible baud rates.", errno.EINVAL)
else:
baudrate = int(arg)
elif opt in ("-p", "--port"):
if arg not in flocklab.tg_port_types:
flocklab.error_logandexit("Port not valid. Possible values are: %s" % (str(flocklab.tg_port_types)), errno.EINVAL)
else:
port = arg
elif opt in ("-o", "--output"):
output = arg
elif opt in ("-l", "--socketport"):
socketport = int(arg)
else:
flocklab.error_logandexit("Unknown option '%s'." % (opt), errno.EINVAL)
# Check if the mandatory parameter is set:
if not stop:
if not output:
flocklab.error_logandexit("No output file specified.", errno.EINVAL)
# Check if folder exists
if not os.path.isdir(os.path.dirname(output)):
flocklab.error_logandexit("Output directory '%s' does not exist." % (os.path.dirname(output)))
pidfile = "%s/flocklab_serial.pid" % (config.get("observer", "pidfolder"))
if stop:
logger = flocklab.get_logger(debug=debug)
rs = stop_on_api()
sys.exit(rs)
# If the daemon option is on, later on the process will be daemonized.
if isdaemon:
daemon.daemonize(pidfile=pidfile, closedesc=True)
else:
open(pidfile, 'w').write("%d" % (os.getpid()))
# init logger AFTER daemonizing the process
logger = flocklab.get_logger(debug=debug)
if not logger:
flocklab.error_logandexit("Could not get logger.")
# Find out which target interface is currently activated.
slotnr = flocklab.tg_get_selected()
if not slotnr:
flocklab.error_logandexit("Could not determine slot number.")
logger.debug("Selected slot number is %d." % slotnr)
# Set the serial path:
if port == 'usb':
serialdev = flocklab.tg_usb_port
else:
serialdev = flocklab.tg_serial_port
# Initialize message queues ---
msgQueueDbBuf = multiprocessing.Queue()
msgQueueSockBuf = multiprocessing.Queue()
# Initialize socket ---
if not socketport is None:
ServerSock = ServerSockets(socketport)
# Initialize serial forwarder ---
sf = SerialForwarder(slotnr, serialdev, baudrate)
# Start process for DB buffer ---
stopLock = multiprocessing.Lock()
p = multiprocessing.Process(target=ProcDbBuf, args=(msgQueueDbBuf, stopLock, output), name="ProcDbBuf")
try:
p.daemon = True
p.start()
time.sleep(1)
if p.is_alive():
dbbuf_proc = [p, stopLock]
logger.debug("DB buffer process running.")
else:
flocklab.error_logandexit("DB buffer process is not running.", errno.ESRCH)
except:
stop_on_sig(flocklab.SUCCESS)
flocklab.error_logandexit("Error when starting DB buffer process: %s, %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])), errno.ECONNABORTED)
# Start thread for serial reader ---
stopLock = multiprocessing.Lock()
p = threading.Thread(target=ThreadSerialReader, args=(sf,msgQueueDbBuf,msgQueueSockBuf,stopLock))
try:
p.daemon = True
p.start()
time.sleep(1)
if p.is_alive():
proc_list.append((p, stopLock))
logger.debug("Serial reader thread running.")
else:
flocklab.error_logandexit("Serial reader thread is not running.", errno.ESRCH)
except:
stop_on_sig(flocklab.SUCCESS)
flocklab.error_logandexit("Error when starting serial reader thread: %s, %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])), errno.ECONNABORTED)
# Start thread for socket proxy ---
if not socketport is None:
stopLock = multiprocessing.Lock()
p = threading.Thread(target=ThreadSocketProxy, args=(msgQueueSockBuf,ServerSock,
sf,msgQueueDbBuf,stopLock))
try:
p.daemon = True
p.start()
time.sleep(1)
if p.is_alive():
proc_list.append((p, stopLock))
logger.debug("Socket proxy thread running.")
else:
flocklab.error_logandexit("Socket proxy thread is not running.", errno.ESRCH)
except:
stop_on_sig(flocklab.SUCCESS)
error_logandexit("Error when starting socket proxy thread: %s, %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1])), errno.ECONNABORTED)
# Catch kill signal and ctrl-c
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
logger.debug("Signal handler registered.")
logger.info("FlockLab serial service started.")
""" Enter an infinite loop which hinders the program from exiting.
This is needed as otherwise the thread list would get lost which would make it
impossible to stop all threads when the service is stopped.
The loop is stopped as soon as the program receives a stop signal.
"""
while True:
# Wake up once every now and then:
time.sleep(10)
sys.exit(flocklab.SUCCESS)
### END main()
if __name__ == "__main__":
try:
main(sys.argv[1:])
except Exception:
flocklab.error_logandexit("Encountered error: %s\n%s\nCommandline was: %s" % (str(sys.exc_info()[1]), traceback.format_exc(), str(sys.argv)))
|
experiments.py
|
from __future__ import print_function
from .. import datasets
from . import metrics
from . import models
from . import methods
from .. import __version__
import numpy as np
import sklearn
import os
import pickle
import sys
import time
import subprocess
from multiprocessing import Pool
import itertools
import copy
import random
import time
try:
from queue import Queue
except ImportError:
from Queue import Queue
from threading import Thread, Lock
regression_metrics = [
"runtime",
"local_accuracy",
"consistency_guarantees",
"keep_positive_mask",
"keep_positive_resample",
"keep_positive_impute",
"keep_negative_mask",
"keep_negative_resample",
"keep_negative_impute",
"keep_absolute_mask__r2",
"keep_absolute_resample__r2",
"keep_absolute_impute__r2",
"remove_positive_mask",
"remove_positive_resample",
"remove_positive_impute",
"remove_negative_mask",
"remove_negative_resample",
"remove_negative_impute",
"remove_absolute_mask__r2",
"remove_absolute_resample__r2",
"remove_absolute_impute__r2"
]
binary_classification_metrics = [
"runtime",
"local_accuracy",
"consistency_guarantees",
"keep_positive_mask",
"keep_positive_resample",
"keep_positive_impute",
"keep_negative_mask",
"keep_negative_resample",
"keep_negative_impute",
"keep_absolute_mask__roc_auc",
"keep_absolute_resample__roc_auc",
"keep_absolute_impute__roc_auc",
"remove_positive_mask",
"remove_positive_resample",
"remove_positive_impute",
"remove_negative_mask",
"remove_negative_resample",
"remove_negative_impute",
"remove_absolute_mask__roc_auc",
"remove_absolute_resample__roc_auc",
"remove_absolute_impute__roc_auc"
]
human_metrics = [
"human_and_00",
"human_and_01",
"human_and_11",
"human_or_00",
"human_or_01",
"human_or_11",
"human_xor_00",
"human_xor_01",
"human_xor_11",
"human_sum_00",
"human_sum_01",
"human_sum_11"
]
linear_regress_methods = [
"linear_shap_corr",
"linear_shap_ind",
"coef",
"random",
"kernel_shap_1000_meanref",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
#"sampling_shap_100"
]
linear_classify_methods = [
# NEED LIME
"linear_shap_corr",
"linear_shap_ind",
"coef",
"random",
"kernel_shap_1000_meanref",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
#"sampling_shap_100"
]
tree_regress_methods = [
# NEED tree_shap_ind
# NEED split_count?
"tree_shap_tree_path_dependent",
"tree_shap_independent_200",
"saabas",
"random",
"tree_gain",
"kernel_shap_1000_meanref",
"mean_abs_tree_shap",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
#"sampling_shap_100"
]
tree_classify_methods = [
# NEED tree_shap_ind
# NEED split_count?
"tree_shap_tree_path_dependent",
"tree_shap_independent_200",
"saabas",
"random",
"tree_gain",
"kernel_shap_1000_meanref",
"mean_abs_tree_shap",
#"kernel_shap_100_meanref",
#"sampling_shap_10000",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
#"sampling_shap_100"
]
deep_regress_methods = [
"deep_shap",
"expected_gradients",
"random",
"kernel_shap_1000_meanref",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
]
deep_classify_methods = [
"deep_shap",
"expected_gradients",
"random",
"kernel_shap_1000_meanref",
"sampling_shap_1000",
#"lime_tabular_regression_1000"
]
_experiments = []
_experiments += [["corrgroups60", "lasso", m, s] for s in regression_metrics for m in linear_regress_methods]
_experiments += [["corrgroups60", "ridge", m, s] for s in regression_metrics for m in linear_regress_methods]
_experiments += [["corrgroups60", "decision_tree", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["corrgroups60", "random_forest", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["corrgroups60", "gbm", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["corrgroups60", "ffnn", m, s] for s in regression_metrics for m in deep_regress_methods]
_experiments += [["independentlinear60", "lasso", m, s] for s in regression_metrics for m in linear_regress_methods]
_experiments += [["independentlinear60", "ridge", m, s] for s in regression_metrics for m in linear_regress_methods]
_experiments += [["independentlinear60", "decision_tree", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["independentlinear60", "random_forest", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["independentlinear60", "gbm", m, s] for s in regression_metrics for m in tree_regress_methods]
_experiments += [["independentlinear60", "ffnn", m, s] for s in regression_metrics for m in deep_regress_methods]
_experiments += [["cric", "lasso", m, s] for s in binary_classification_metrics for m in linear_classify_methods]
_experiments += [["cric", "ridge", m, s] for s in binary_classification_metrics for m in linear_classify_methods]
_experiments += [["cric", "decision_tree", m, s] for s in binary_classification_metrics for m in tree_classify_methods]
_experiments += [["cric", "random_forest", m, s] for s in binary_classification_metrics for m in tree_classify_methods]
_experiments += [["cric", "gbm", m, s] for s in binary_classification_metrics for m in tree_classify_methods]
_experiments += [["cric", "ffnn", m, s] for s in binary_classification_metrics for m in deep_classify_methods]
_experiments += [["human", "decision_tree", m, s] for s in human_metrics for m in tree_regress_methods]
def experiments(dataset=None, model=None, method=None, metric=None):
for experiment in _experiments:
if dataset is not None and dataset != experiment[0]:
continue
if model is not None and model != experiment[1]:
continue
if method is not None and method != experiment[2]:
continue
if metric is not None and metric != experiment[3]:
continue
yield experiment
def run_experiment(experiment, use_cache=True, cache_dir="/tmp"):
dataset_name, model_name, method_name, metric_name = experiment
# see if we have a cached version
cache_id = __gen_cache_id(experiment)
cache_file = os.path.join(cache_dir, cache_id + ".pickle")
if use_cache and os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
#print(cache_id.replace("__", " ") + " ...loaded from cache.")
return pickle.load(f)
# compute the scores
print(cache_id.replace("__", " ", 4) + " ...")
sys.stdout.flush()
start = time.time()
X,y = getattr(datasets, dataset_name)()
score = getattr(metrics, metric_name)(
X, y,
getattr(models, dataset_name+"__"+model_name),
method_name
)
print("...took %f seconds.\n" % (time.time() - start))
# cache the scores
with open(cache_file, "wb") as f:
pickle.dump(score, f)
return score
def run_experiments_helper(args):
experiment, cache_dir = args
return run_experiment(experiment, cache_dir=cache_dir)
def run_experiments(dataset=None, model=None, method=None, metric=None, cache_dir="/tmp", nworkers=1):
experiments_arr = list(experiments(dataset=dataset, model=model, method=method, metric=metric))
if nworkers == 1:
out = list(map(run_experiments_helper, zip(experiments_arr, itertools.repeat(cache_dir))))
else:
with Pool(nworkers) as pool:
out = pool.map(run_experiments_helper, zip(experiments_arr, itertools.repeat(cache_dir)))
return list(zip(experiments_arr, out))
nexperiments = 0
total_sent = 0
total_done = 0
total_failed = 0
host_records = {}
worker_lock = Lock()
ssh_conn_per_min_limit = 0 # set as an argument to run_remote_experiments
def __thread_worker(q, host):
global total_sent, total_done
hostname, python_binary = host.split(":")
while True:
# make sure we are not sending too many ssh connections to the host
# (if we send too many connections ssh thottling will lock us out)
while True:
all_clear = False
worker_lock.acquire()
try:
if hostname not in host_records:
host_records[hostname] = []
if len(host_records[hostname]) < ssh_conn_per_min_limit:
all_clear = True
elif time.time() - host_records[hostname][-ssh_conn_per_min_limit] > 61:
all_clear = True
finally:
worker_lock.release()
# if we are clear to send a new ssh connection then break
if all_clear:
break
# if we are not clear then we sleep and try again
time.sleep(5)
experiment = q.get()
# if we are not loading from the cache then we note that we have called the host
cache_dir = "/tmp"
cache_file = os.path.join(cache_dir, __gen_cache_id(experiment) + ".pickle")
if not os.path.isfile(cache_file):
worker_lock.acquire()
try:
host_records[hostname].append(time.time())
finally:
worker_lock.release()
# record how many we have sent off for executation
worker_lock.acquire()
try:
total_sent += 1
__print_status()
finally:
worker_lock.release()
__run_remote_experiment(experiment, hostname, cache_dir=cache_dir, python_binary=python_binary)
# record how many are finished
worker_lock.acquire()
try:
total_done += 1
__print_status()
finally:
worker_lock.release()
q.task_done()
def __print_status():
print("Benchmark task %d of %d done (%d failed, %d running)" % (total_done, nexperiments, total_failed, total_sent - total_done), end="\r")
sys.stdout.flush()
def run_remote_experiments(experiments, thread_hosts, rate_limit=10):
""" Use ssh to run the experiments on remote machines in parallel.
Parameters
----------
experiments : iterable
Output of shap.benchmark.experiments(...).
thread_hosts : list of strings
Each host has the format "host_name:path_to_python_binary" and can appear multiple times
in the list (one for each parallel execution you want on that machine).
rate_limit : int
How many ssh connections we make per minute to each host (to avoid throttling issues).
"""
global ssh_conn_per_min_limit
ssh_conn_per_min_limit = rate_limit
# first we kill any remaining workers from previous runs
# note we don't check_call because pkill kills our ssh call as well
thread_hosts = copy.copy(thread_hosts)
random.shuffle(thread_hosts)
for host in set(thread_hosts):
hostname,_ = host.split(":")
try:
subprocess.run(["ssh", hostname, "pkill -f shap.benchmark.run_experiment"], timeout=15)
except subprocess.TimeoutExpired:
print("Failed to connect to", hostname, "after 15 seconds! Exiting.")
return
experiments = copy.copy(list(experiments))
random.shuffle(experiments) # this way all the hard experiments don't get put on one machine
global nexperiments, total_sent, total_done, total_failed, host_records
nexperiments = len(experiments)
total_sent = 0
total_done = 0
total_failed = 0
host_records = {}
q = Queue()
for host in thread_hosts:
worker = Thread(target=__thread_worker, args=(q, host))
worker.setDaemon(True)
worker.start()
for experiment in experiments:
q.put(experiment)
q.join()
def __run_remote_experiment(experiment, remote, cache_dir="/tmp", python_binary="python"):
global total_failed
dataset_name, model_name, method_name, metric_name = experiment
# see if we have a cached version
cache_id = __gen_cache_id(experiment)
cache_file = os.path.join(cache_dir, cache_id + ".pickle")
if os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
return pickle.load(f)
# this is just so we don't dump everything at once on a machine
time.sleep(random.uniform(0,5))
# run the benchmark on the remote machine
#start = time.time()
cmd = "CUDA_VISIBLE_DEVICES=\"\" "+python_binary+" -c \"import shap; shap.benchmark.run_experiment(['%s', '%s', '%s', '%s'], cache_dir='%s')\" &> %s/%s.output" % (
dataset_name, model_name, method_name, metric_name, cache_dir, cache_dir, cache_id
)
try:
subprocess.check_output(["ssh", remote, cmd])
except subprocess.CalledProcessError as e:
print("The following command failed on %s:" % remote, file=sys.stderr)
print(cmd, file=sys.stderr)
total_failed += 1
print(e)
return
# copy the results back
subprocess.check_output(["scp", remote+":"+cache_file, cache_file])
if os.path.isfile(cache_file):
with open(cache_file, "rb") as f:
#print(cache_id.replace("__", " ") + " ...loaded from remote after %f seconds" % (time.time() - start))
return pickle.load(f)
else:
raise Exception("Remote benchmark call finished but no local file was found!")
def __gen_cache_id(experiment):
dataset_name, model_name, method_name, metric_name = experiment
return "v" + "__".join([__version__, dataset_name, model_name, method_name, metric_name])
|
ducolib.py
|
#!/usr/bin/env python3
import socket
import hashlib
import urllib.request
import time
import os
import logging
import sys
import multiprocessing
logging.basicConfig(filename='ducolib.log', level=logging.DEBUG,
format='%(asctime)s -> %(levelname)s :: %(message)s')
class Miner:
def __init__(self, username, UseLowerDiff, rigname):
self.username = username
self.UseLowerDiff = UseLowerDiff
self.minerName = 'Glukhov Miner'
self.rigname = rigname
def mine(self):
current_buffer = ''
if self.UseLowerDiff:
self.soc.send(
bytes("JOB," + str(self.username) + ",MEDIUM", encoding="utf8")
) # Send job request for lower difficulty
else:
self.soc.send(
bytes("JOB," + str(self.username), encoding="utf8")
) # Send job request
job = self.soc.recv(1024).decode() # Get work from pool
# Split received data to job (job and difficulty)
job = job.split(",")
difficulty = job[2]
# Calculate hash with difficulty
for result in range(100 * int(difficulty) + 1):
ducos1 = hashlib.sha1(
str(job[0] + str(result)).encode("utf-8")
).hexdigest() # Generate hash
if job[1] == ducos1: # If result is even with job
self.soc.send(
bytes(str(result) +
f",,{self.minerName},{self.rigname}", encoding="utf8")
) # Send result of hashing algorithm to pool
# Get feedback about the result
feedback = self.soc.recv(1024).decode()
if feedback == "GOOD": # If result was good
current_buffer = "Accepted share: " + \
str(result)+' '+"Difficulty: "+str(difficulty)
break
elif feedback == "BAD": # If result was bad
current_buffer = "Rejected share: " + \
str(result)+' '+"Difficulty: "+str(difficulty)
break
return current_buffer
def requestAndMine(self):
while True:
try:
self.soc = socket.socket()
self.soc.settimeout(15)
# This sections grabs pool adress and port from Duino-Coin GitHub file
serverip = "https://raw.githubusercontent.com/revoxhere/duino-coin/gh-pages/serverip.txt" # Serverip file
with urllib.request.urlopen(serverip) as content:
content = (
content.read().decode().splitlines()
) # Read content and split into lines
pool_address = content[0] # Line 1 = pool address
pool_port = content[1] # Line 2 = pool port
# This section connects and logs user to the server
# Connect to the server
self.soc.connect((str(pool_address), int(pool_port)))
server_version = self.soc.recv(
3).decode() # Get server version
logging.info("Server is on version: "+str(server_version))
# Mining section
while True:
buff = self.mine()
try:
if 'Accepted' in buff:
logging.info(buff)
elif 'Rejected' in buff:
logging.warning(buff)
else:
logging.warning('Empty buffer, likely error')
except Exception:
pass
try:
self.soc.close()
except Exception as e:
logging.warning(str(e))
except Exception as e:
try:
self.soc.close()
except Exception as e:
logging.warning(str(e))
logging.error(str(e)+' Restarting...')
time.sleep(10)
def start_mining(self):
"""Starts mining as a process"""
try:
self.proc.terminate() # pylint: disable=access-member-before-definition
except Exception:
logging.info('No previously running threads, OK!')
finally:
self.proc = multiprocessing.Process( # pylint: disable=attribute-defined-outside-init
target=self.requestAndMine, args=())
self.proc.start()
def stop_mining(self):
"""Stops mining as a process"""
try:
self.proc.terminate() # pylint: disable=access-member-before-definition
except Exception as e:
logging.error(str(e))
def check_status(self):
"""Returs a copy of the current mine() method buffer."""
with open('ducolib.log') as f:
return f.readlines()[-1]
class MinerCrewChief:
def __init__(self, username, UseLowerDiff, threads, rigname):
self.miners = []
self.username = username
self.UseLowerDiff = UseLowerDiff
self.threads = threads
self.rigname = rigname
logging.info('Mining DUCO for {} with Glukhov Miner :)'.format(
self.username))
logging.info('Using Lower Mining Difficulty: {}. On rig: {}'.format(
self.UseLowerDiff, self.rigname))
def start_mining(self):
if self.threads == 'auto':
self.threads = os.cpu_count()
for i in range(int(self.threads)):
m = Miner(self.username, self.UseLowerDiff, self.rigname)
m.start_mining()
logging.info('Mining Started on Thread {}!'.format(i))
self.miners.append(m)
def stop_mining(self):
try:
csr = 0
for m in self.miners:
m.stop_mining()
logging.info('Mining thread {} stopped!'.format(csr))
csr += 1
except NameError:
logging.warning('Tried stopping non existent miners.')
def check_status(self):
"""For every miner:
returs a copy of the current mine() method buffer."""
states = []
try:
for m in self.miners:
s = m.check_status()
s = s.strip()
states.append(s)
except NameError:
logging.warning('Tried checking non existent miners.')
return states
if __name__ == "__main__":
try:
print('Welcome to the Glukhov Miner :)')
prog = sys.argv[0]
user = sys.argv[1]
try:
diff = sys.argv[2]
if diff.lower() in ['true', 'easy']:
diff = True
elif diff.lower() in ['false', 'net', 'network']:
diff = False
else:
print('Invalid input, setting difficulty to default.')
except IndexError:
diff = False
print('-> No difficulty specified, using default')
try:
threads = sys.argv[3]
except IndexError:
threads = 'auto' # Full power default
try:
rigname = sys.argv[4]
except IndexError:
rigname = 'Ducolib'
try:
seshDuration = sys.argv[5]
seshDuration = int(seshDuration)*3600
except IndexError:
seshDuration = 3600 * 8
print('-> Mining for: ', user)
print('-> Using lower difficulty:', diff)
print('-> Threads:', threads, 'With Rig:', rigname)
print('-> Mining session duration(seconds):', seshDuration)
#workers = MinerCrewChief('Alicia426', True, 'auto')
workers = MinerCrewChief(user, diff, threads, rigname)
workers.start_mining()
print("-> Now mining, don't close the terminal :)")
time.sleep(seshDuration)
workers.stop_mining()
except IndexError as e:
msg = 'Attempted to run as CLI tool, but no correct arguments were given'
logging.warning(msg)
print(msg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.